On 8/22/22 06:31, Cheng Jiang wrote:
When vhost receive packets from the front-end using packed virtqueue, it
receives*
might use multiple descriptors for one packet, so we need calculate and
so we need to*
record the descriptor number for each packet to update available
descriptor counter and used descriptor counter, and rollback when DMA
ring is full.
This is a fix, so the Fixes tag should be present, and sta...@dpdk.org
cc'ed.
Signed-off-by: Cheng Jiang <cheng1.ji...@intel.com>
---
lib/vhost/virtio_net.c | 24 +++++++++++++++---------
1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 35fa4670fd..bfc6d65b7c 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -3553,14 +3553,15 @@ virtio_dev_tx_async_split_compliant(struct virtio_net
*dev,
}
static __rte_always_inline void
-vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq, uint16_t
buf_id)
+vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
+ uint16_t buf_id, uint16_t count)
{
struct vhost_async *async = vq->async;
uint16_t idx = async->buffer_idx_packed;
async->buffers_packed[idx].id = buf_id;
async->buffers_packed[idx].len = 0;
- async->buffers_packed[idx].count = 1;
+ async->buffers_packed[idx].count = count;
async->buffer_idx_packed++;
if (async->buffer_idx_packed >= vq->size)
@@ -3581,6 +3582,8 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
uint16_t nr_vec = 0;
uint32_t buf_len;
struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info = async->pkts_info;
static bool allocerr_warned;
if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx, &desc_count,
@@ -3609,8 +3612,12 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
return -1;
}
+ pkts_info[slot_idx].descs = desc_count;
+
/* update async shadow packed ring */
- vhost_async_shadow_dequeue_single_packed(vq, buf_id);
+ vhost_async_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+
+ vq_inc_last_avail_packed(vq, desc_count);
return err;
}
@@ -3649,9 +3656,6 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct
vhost_virtqueue *vq,
}
pkts_info[slot_idx].mbuf = pkt;
-
- vq_inc_last_avail_packed(vq, 1);
-
}
n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
@@ -3662,6 +3666,8 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct
vhost_virtqueue *vq,
pkt_err = pkt_idx - n_xfer;
if (unlikely(pkt_err)) {
+ uint16_t descs_err = 0;
+
pkt_idx -= pkt_err;
/**
@@ -3678,10 +3684,10 @@ virtio_dev_tx_async_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
}
/* recover available ring */
- if (vq->last_avail_idx >= pkt_err) {
- vq->last_avail_idx -= pkt_err;
+ if (vq->last_avail_idx >= descs_err) {
+ vq->last_avail_idx -= descs_err;
} else {
- vq->last_avail_idx += vq->size - pkt_err;
+ vq->last_avail_idx += vq->size - descs_err;
vq->avail_wrap_counter ^= 1;
}
}
I'm not sure to understand, isn't descs_err always 0 here?
Maxime