record operations on mbufs when it is allocated and released inside the mlx5 PMD.
Signed-off-by: Shani Peretz <shper...@nvidia.com> --- drivers/net/mlx5/mlx5_rx.c | 25 +++++++++++++++++++++++++ drivers/net/mlx5/mlx5_rx.h | 6 ++++++ drivers/net/mlx5/mlx5_rxq.c | 15 +++++++++++++-- drivers/net/mlx5/mlx5_rxtx_vec.c | 16 ++++++++++++++++ drivers/net/mlx5/mlx5_tx.h | 21 +++++++++++++++++++++ drivers/net/mlx5/mlx5_txq.c | 3 +++ 6 files changed, 84 insertions(+), 2 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c index 420a03068d..4e44892d93 100644 --- a/drivers/net/mlx5/mlx5_rx.c +++ b/drivers/net/mlx5/mlx5_rx.c @@ -640,12 +640,19 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, elt_idx = (elts_ci + i) & e_mask; elt = &(*rxq->elts)[elt_idx]; *elt = rte_mbuf_raw_alloc(rxq->mp); +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(*elt, RTE_MBUF_PMD_ALLOC); +#endif if (!*elt) { for (i--; i >= 0; --i) { elt_idx = (elts_ci + i) & elts_n; elt = &(*rxq->elts) [elt_idx]; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(*elt, + RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg (*elt); } @@ -1048,6 +1055,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_prefetch0(wqe); /* Allocate the buf from the same pool. */ rep = rte_mbuf_raw_alloc(seg->pool); +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(rep, RTE_MBUF_PMD_ALLOC); +#endif if (unlikely(rep == NULL)) { ++rxq->stats.rx_nombuf; if (!pkt) { @@ -1062,6 +1072,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rep = NEXT(pkt); NEXT(pkt) = NULL; NB_SEGS(pkt) = 1; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(pkt, RTE_MBUF_PMD_FREE); +#endif rte_mbuf_raw_free(pkt); pkt = rep; } @@ -1076,6 +1089,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) &mcqe, &skip_cnt, false, NULL); if (unlikely(len & MLX5_ERROR_CQE_MASK)) { /* We drop packets with non-critical errors */ +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(rep, RTE_MBUF_PMD_FREE); +#endif rte_mbuf_raw_free(rep); if (len == MLX5_CRITICAL_ERROR_CQE_RET) { rq_ci = rxq->rq_ci << sges_n; @@ -1089,6 +1105,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) continue; } if (len == 0) { +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(rep, RTE_MBUF_PMD_FREE); +#endif rte_mbuf_raw_free(rep); break; } @@ -1540,6 +1559,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) ++rxq->stats.rx_nombuf; break; } +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(pkt, RTE_MBUF_PMD_ALLOC); +#endif len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); if (rxq->crc_present) @@ -1547,6 +1569,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf, strd_idx, strd_cnt); if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) { +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(pkt, RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(pkt); if (rxq_code == MLX5_RXQ_CODE_DROPPED) { ++rxq->stats.idropped; diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h index 7be31066a5..075b4bfc4b 100644 --- a/drivers/net/mlx5/mlx5_rx.h +++ b/drivers/net/mlx5/mlx5_rx.h @@ -525,6 +525,9 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, if (unlikely(next == NULL)) return MLX5_RXQ_CODE_NOMBUF; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(next, RTE_MBUF_PMD_ALLOC); +#endif NEXT(prev) = next; SET_DATA_OFF(next, 0); addr = RTE_PTR_ADD(addr, seg_len); @@ -588,6 +591,9 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, if (unlikely(seg == NULL)) return MLX5_RXQ_CODE_NOMBUF; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(seg, RTE_MBUF_PMD_ALLOC); +#endif SET_DATA_OFF(seg, 0); rte_memcpy(rte_pktmbuf_mtod(seg, void *), RTE_PTR_ADD(addr, len - hdrm_overlap), diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index aeefece8c1..434a57ca32 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -164,6 +164,9 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) rte_errno = ENOMEM; goto error; } +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(buf, RTE_MBUF_PMD_ALLOC); +#endif /* Only vectored Rx routines rely on headroom size. */ MLX5_ASSERT(!has_vec_support || DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM); @@ -221,8 +224,12 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) err = rte_errno; /* Save rte_errno before cleanup. */ elts_n = i; for (i = 0; (i != elts_n); ++i) { - if ((*rxq_ctrl->rxq.elts)[i] != NULL) + if ((*rxq_ctrl->rxq.elts)[i] != NULL) { +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark((*rxq_ctrl->rxq.elts)[i], RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); + } (*rxq_ctrl->rxq.elts)[i] = NULL; } if (rxq_ctrl->share_group == 0) @@ -324,8 +331,12 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) rxq->rq_pi = elts_ci; } for (i = 0; i != q_n; ++i) { - if ((*rxq->elts)[i] != NULL) + if ((*rxq->elts)[i] != NULL) { +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark((*rxq->elts)[i], RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg((*rxq->elts)[i]); + } (*rxq->elts)[i] = NULL; } } diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index 1b701801c5..c7ca808f43 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -63,6 +63,9 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) { #ifdef MLX5_PMD_SOFT_COUNTERS err_bytes += PKT_LEN(pkt); +#endif +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(pkt, RTE_MBUF_PMD_FREE); #endif rte_pktmbuf_free_seg(pkt); } else { @@ -107,6 +110,9 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) rxq->stats.rx_nombuf += n; return; } +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_bulk(elts, n, RTE_MBUF_PMD_ALLOC); +#endif if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) { for (i = 0; i < n; ++i) { /* @@ -171,6 +177,9 @@ mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) rxq->stats.rx_nombuf += n; return; } +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_bulk(elts, n, RTE_MBUF_PMD_ALLOC); +#endif rxq->elts_ci += n; /* Prevent overflowing into consumed mbufs. */ elts_idx = rxq->elts_ci & wqe_mask; @@ -224,6 +233,9 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, if (!elts[i]->pkt_len) { rxq->consumed_strd = strd_n; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(elts[i], RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(elts[i]); #ifdef MLX5_PMD_SOFT_COUNTERS rxq->stats.ipackets -= 1; @@ -236,6 +248,9 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, buf, rxq->consumed_strd, strd_cnt); rxq->consumed_strd += strd_cnt; if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) { +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(elts[i], RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(elts[i]); #ifdef MLX5_PMD_SOFT_COUNTERS rxq->stats.ipackets -= 1; @@ -586,6 +601,7 @@ mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_io_wmb(); *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); } while (tn != pkts_n); + return tn; } diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h index 16307206e2..c3d69942a8 100644 --- a/drivers/net/mlx5/mlx5_tx.h +++ b/drivers/net/mlx5/mlx5_tx.h @@ -555,6 +555,9 @@ mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq, if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) { mbuf = *pkts; pool = mbuf->pool; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_bulk(pkts, pkts_n, RTE_MBUF_PMD_FREE); +#endif rte_mempool_put_bulk(pool, (void *)pkts, pkts_n); return; } @@ -610,6 +613,9 @@ mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq, * Free the array of pre-freed mbufs * belonging to the same memory pool. */ +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_bulk(p_free, n_free, RTE_MBUF_PMD_FREE); +#endif rte_mempool_put_bulk(pool, (void *)p_free, n_free); if (unlikely(mbuf != NULL)) { /* There is the request to start new scan. */ @@ -1225,6 +1231,9 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, /* Exhausted packet, just free. */ mbuf = loc->mbuf; loc->mbuf = mbuf->next; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(mbuf, RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(mbuf); loc->mbuf_off = 0; MLX5_ASSERT(loc->mbuf_nseg > 1); @@ -1267,6 +1276,9 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, /* Exhausted packet, just free. */ mbuf = loc->mbuf; loc->mbuf = mbuf->next; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(mbuf, RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(mbuf); loc->mbuf_off = 0; MLX5_ASSERT(loc->mbuf_nseg >= 1); @@ -1717,6 +1729,9 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq, /* Zero length segment found, just skip. */ mbuf = loc->mbuf; loc->mbuf = loc->mbuf->next; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(mbuf, RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(mbuf); if (--loc->mbuf_nseg == 0) break; @@ -2020,6 +2035,9 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq, wqe->cseg.sq_ds -= RTE_BE32(1); mbuf = loc->mbuf; loc->mbuf = mbuf->next; +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(mbuf, RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(mbuf); if (--nseg == 0) break; @@ -3319,6 +3337,9 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq, * Packet data are completely inlined, * free the packet immediately. */ +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(loc->mbuf, RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(loc->mbuf); } else if ((!MLX5_TXOFF_CONFIG(EMPW) || MLX5_TXOFF_CONFIG(MPW)) && diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 2aa2475a8a..445d1d62c4 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -79,6 +79,9 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) struct rte_mbuf *elt = (*elts)[elts_tail & elts_m]; MLX5_ASSERT(elt != NULL); +#if RTE_MBUF_HISTORY_DEBUG + rte_mbuf_history_mark(elt, RTE_MBUF_PMD_FREE); +#endif rte_pktmbuf_free_seg(elt); #ifdef RTE_LIBRTE_MLX5_DEBUG /* Poisoning. */ -- 2.34.1