In async_enqueue_pkts(), the failed pkts will be freed before return, but, the failed pkts may be retried later, it will cause use after free. So, we free the failed pkts after retry.
Fixes: 1907ce4baec3 ("examples/vhost: fix retry logic on Rx path") Cc: sta...@dpdk.org Signed-off-by: Wenwu Ma <wenwux...@intel.com> --- examples/vhost/main.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 7e1666f42a..7956dc4f13 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -1073,8 +1073,13 @@ drain_vhost(struct vhost_dev *vdev) __ATOMIC_SEQ_CST); } - if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) + if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) { free_pkts(m, nr_xmit); + } else { + uint16_t enqueue_fail = nr_xmit - ret; + if (enqueue_fail > 0) + free_pkts(&m[ret], enqueue_fail); + } } static __rte_always_inline void @@ -1350,17 +1355,12 @@ async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t rx_count) { uint16_t enqueue_count; - uint16_t enqueue_fail = 0; uint16_t dma_id = dma_bind[vid2socketid[dev->vid]].dmas[VIRTIO_RXQ].dev_id; complete_async_pkts(dev); enqueue_count = rte_vhost_submit_enqueue_burst(dev->vid, queue_id, pkts, rx_count, dma_id, 0); - enqueue_fail = rx_count - enqueue_count; - if (enqueue_fail) - free_pkts(&pkts[enqueue_count], enqueue_fail); - return enqueue_count; } @@ -1405,8 +1405,13 @@ drain_eth_rx(struct vhost_dev *vdev) __ATOMIC_SEQ_CST); } - if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) + if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) { free_pkts(pkts, rx_count); + } else { + uint16_t enqueue_fail = rx_count - enqueue_count; + if (enqueue_fail > 0) + free_pkts(&pkts[enqueue_count], enqueue_fail); + } } uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, -- 2.25.1