After bulk allocation and freeing of multiple mbufs increase more than ~2%
throughput on single core.

Signed-off-by: Mallesh Koujalagi <malleshx.koujal...@intel.com>
---
 drivers/net/null/rte_eth_null.c | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 9385ffd..247ede0 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -130,10 +130,11 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, 
uint16_t nb_bufs)
                return 0;
 
        packet_size = h->internals->packet_size;
+
+       if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
+               return 0;
+
        for (i = 0; i < nb_bufs; i++) {
-               bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
-               if (!bufs[i])
-                       break;
                rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
                                        packet_size);
                bufs[i]->data_len = (uint16_t)packet_size;
@@ -149,18 +150,15 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, 
uint16_t nb_bufs)
 static uint16_t
 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
-       int i;
        struct null_queue *h = q;
 
        if ((q == NULL) || (bufs == NULL))
                return 0;
 
-       for (i = 0; i < nb_bufs; i++)
-               rte_pktmbuf_free(bufs[i]);
+       rte_mempool_put_bulk(bufs[0]->pool, (void **)bufs, nb_bufs);
+       rte_atomic64_add(&h->tx_pkts, nb_bufs);
 
-       rte_atomic64_add(&(h->tx_pkts), i);
-
-       return i;
+       return nb_bufs;
 }
 
 static uint16_t
-- 
2.7.4

Reply via email to