Since the simple scalar path now uses the vector Tx entry struct, we can
leverage the vector mbuf cleanup function from that path and avoid
having a separate cleanup function for it.

Signed-off-by: Bruce Richardson <[email protected]>
---
 drivers/net/intel/common/tx_scalar_fns.h | 71 +++++-------------------
 drivers/net/intel/i40e/i40e_rxtx.c       |  2 +-
 drivers/net/intel/ice/ice_rxtx.c         |  2 +-
 3 files changed, 17 insertions(+), 58 deletions(-)

diff --git a/drivers/net/intel/common/tx_scalar_fns.h 
b/drivers/net/intel/common/tx_scalar_fns.h
index b284b80cbe..ce3837a201 100644
--- a/drivers/net/intel/common/tx_scalar_fns.h
+++ b/drivers/net/intel/common/tx_scalar_fns.h
@@ -21,6 +21,20 @@ write_txd(volatile void *txd, uint64_t qw0, uint64_t qw1)
        txd_qw[1] = rte_cpu_to_le_64(qw1);
 }
 
+static __rte_always_inline int
+ci_tx_desc_done_simple(struct ci_tx_queue *txq, uint16_t idx)
+{
+       return (txq->ci_tx_ring[idx].cmd_type_offset_bsz & 
rte_cpu_to_le_64(CI_TXD_QW1_DTYPE_M)) ==
+                       rte_cpu_to_le_64(CI_TX_DESC_DTYPE_DESC_DONE);
+}
+
+/* Free transmitted mbufs using vector-style cleanup */
+static __rte_always_inline int
+ci_tx_free_bufs_simple(struct ci_tx_queue *txq)
+{
+       return ci_tx_free_bufs_vec(txq, ci_tx_desc_done_simple, false);
+}
+
 /* Fill hardware descriptor ring with mbuf data (simple path) */
 static inline void
 ci_tx_fill_hw_ring_simple(volatile struct ci_tx_desc *txdp, struct rte_mbuf 
**pkts,
@@ -52,61 +66,6 @@ ci_tx_fill_hw_ring_simple(volatile struct ci_tx_desc *txdp, 
struct rte_mbuf **pk
        }
 }
 
-/* Free transmitted mbufs from descriptor ring with bulk freeing for Tx simple 
path */
-static __rte_always_inline int
-ci_tx_free_bufs(struct ci_tx_queue *txq)
-{
-       struct ci_tx_entry_vec *txep;
-       uint16_t tx_rs_thresh = txq->tx_rs_thresh;
-       uint16_t i = 0, j = 0;
-       struct rte_mbuf *free[CI_TX_MAX_FREE_BUF_SZ];
-       const uint16_t k = RTE_ALIGN_FLOOR(tx_rs_thresh, CI_TX_MAX_FREE_BUF_SZ);
-       const uint16_t m = tx_rs_thresh % CI_TX_MAX_FREE_BUF_SZ;
-
-       if ((txq->ci_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
-                       rte_cpu_to_le_64(CI_TXD_QW1_DTYPE_M)) !=
-                       rte_cpu_to_le_64(CI_TX_DESC_DTYPE_DESC_DONE))
-               return 0;
-
-       txep = &txq->sw_ring_vec[txq->tx_next_dd - (tx_rs_thresh - 1)];
-
-       for (i = 0; i < tx_rs_thresh; i++)
-               rte_prefetch0((txep + i)->mbuf);
-
-       if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
-               if (k) {
-                       for (j = 0; j != k; j += CI_TX_MAX_FREE_BUF_SZ) {
-                               for (i = 0; i < CI_TX_MAX_FREE_BUF_SZ; ++i, 
++txep) {
-                                       free[i] = txep->mbuf;
-                                       txep->mbuf = NULL;
-                               }
-                               rte_mbuf_raw_free_bulk(free[0]->pool, free,
-                                               CI_TX_MAX_FREE_BUF_SZ);
-                       }
-               }
-
-               if (m) {
-                       for (i = 0; i < m; ++i, ++txep) {
-                               free[i] = txep->mbuf;
-                               txep->mbuf = NULL;
-                       }
-                       rte_mbuf_raw_free_bulk(free[0]->pool, free, m);
-               }
-       } else {
-               for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
-                       rte_pktmbuf_free_seg(txep->mbuf);
-                       txep->mbuf = NULL;
-               }
-       }
-
-       txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
-       txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
-       if (txq->tx_next_dd >= txq->nb_tx_desc)
-               txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
-       return txq->tx_rs_thresh;
-}
-
 /* Simple burst transmit for descriptor-based simple Tx path
  *
  * Transmits a burst of packets by filling hardware descriptors with mbuf
@@ -132,7 +91,7 @@ ci_xmit_burst_simple(struct ci_tx_queue *txq,
         * descriptor, free the associated buffer.
         */
        if (txq->nb_tx_free < txq->tx_free_thresh)
-               ci_tx_free_bufs(txq);
+               ci_tx_free_bufs_simple(txq);
 
        /* Use available descriptor only */
        nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c 
b/drivers/net/intel/i40e/i40e_rxtx.c
index 185e45fb9a..820a955158 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -2367,7 +2367,7 @@ i40e_tx_done_cleanup_simple(struct ci_tx_queue *txq,
                if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
                        break;
 
-               n = ci_tx_free_bufs(txq);
+               n = ci_tx_free_bufs_simple(txq);
 
                if (n == 0)
                        break;
diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index 06f7e85c12..be9d88dda6 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -3208,7 +3208,7 @@ ice_tx_done_cleanup_simple(struct ci_tx_queue *txq,
                if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
                        break;
 
-               n = ci_tx_free_bufs(txq);
+               n = ci_tx_free_bufs_simple(txq);
 
                if (n == 0)
                        break;
-- 
2.51.0

Reply via email to