On Thu, Mar 12, 2026 at 04:31:33PM +0530, Shaiq Wani wrote:
> RS completion only frees the EOP entry's mbuf, leaking preceding
> segments of multi-segment packets. Add a first_id field to the Tx
> entry struct, record the first sw_id at the EOP entry during submit,
> and walk forward from first to EOP to free all segments on completion.
>
> Fixes: 8c6098afa075 ("common/idpf: add Rx/Tx data path")
> Cc: [email protected]
>
> Signed-off-by: Shaiq Wani <[email protected]>
> ---
> drivers/net/intel/common/tx.h | 1 +
> drivers/net/intel/idpf/idpf_common_rxtx.c | 26 ++++++++++++++++++++---
> 2 files changed, 24 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/intel/common/tx.h b/drivers/net/intel/common/tx.h
> index 283bd58d5d..53e61c0b62 100644
> --- a/drivers/net/intel/common/tx.h
> +++ b/drivers/net/intel/common/tx.h
> @@ -121,6 +121,7 @@ struct ci_tx_queue;
> struct ci_tx_entry {
> struct rte_mbuf *mbuf; /* mbuf associated with TX desc, if any. */
> uint16_t next_id; /* Index of next descriptor in ring. */
> + uint16_t first_id; /* Split-queue: first sw_id of packet at EOP entry.
> */
> };
>
> /**
> diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.c
> b/drivers/net/intel/idpf/idpf_common_rxtx.c
> index f73716e57c..bc0677f2ee 100644
> --- a/drivers/net/intel/idpf/idpf_common_rxtx.c
> +++ b/drivers/net/intel/idpf/idpf_common_rxtx.c
> @@ -825,11 +825,23 @@ idpf_split_tx_free(struct ci_tx_queue *cq)
> txq->last_desc_cleaned = q_head;
> break;
> case IDPF_TXD_COMPLT_RS:
> - /* q_head indicates sw_id when ctype is 2 */
> + /* Walk from first segment to EOP, freeing each segment. */
> txe = &txq->sw_ring[q_head];
> if (txe->mbuf != NULL) {
> - rte_pktmbuf_free_seg(txe->mbuf);
> - txe->mbuf = NULL;
> + uint16_t first = txe->first_id;
> + uint16_t idx = first;
> + uint16_t end = (q_head + 1 == txq->sw_nb_desc) ?
> + 0 : q_head + 1;
> +
> + do {
> + txe = &txq->sw_ring[idx];
> + if (txe->mbuf != NULL) {
> + rte_pktmbuf_free_seg(txe->mbuf);
> + txe->mbuf = NULL;
> + }
> + idx = (idx + 1 == txq->sw_nb_desc) ?
> + 0 : idx + 1;
> + } while (idx != end);
> }
> break;
> default:
> @@ -979,9 +991,14 @@ idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> tx_id = 0;
> }
>
> + uint16_t first_sw_id = sw_id;
> +
> do {
> txd = &txr[tx_id];
> txn = &sw_ring[txe->next_id];
> +
> + if (txe->mbuf != NULL)
> + rte_pktmbuf_free_seg(txe->mbuf);
Does hitting this point not indicate a problem condition? If
idpf_split_tx_free has not cleaned this slot, surely it's not correct for
us to free a potentially unsent mbuf?
Also, does this code path use out-of-order completions? If it does, then I
think we need to always check each slot for NULL before we use it, and stop
as soon as we hit a non-null descriptor (potentially undoing any descriptor
writes we have already done for the current packet).
> txe->mbuf = tx_pkt;
>
> /* Setup TX descriptor */
> @@ -1002,6 +1019,9 @@ idpf_dp_splitq_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> /* fill the last descriptor with End of Packet (EOP) bit */
> txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP;
>
> + /* Record first sw_id at EOP so completion can walk forward. */
> + sw_ring[txd->qw1.compl_tag].first_id = first_sw_id;
> +
> txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
> txq->rs_compl_count += nb_used;
>
> --
> 2.34.1
>