This commit introduces netdev_dpdk_eth_tx_queue() function that implements intermediate queue and packet buffering. The packets get buffered till the threshold 'INTERIM_QUEUE_BURST_THRESHOLD[32] is reached and eventually gets transmitted.
Signed-off-by: Bhanuprakash Bodireddy <bhanuprakash.bodire...@intel.com> Signed-off-by: Antonio Fischetti <antonio.fische...@intel.com> Co-authored-by: Antonio Fischetti <antonio.fische...@intel.com> Signed-off-by: Markus Magnusson <markus.magnus...@ericsson.com> Co-authored-by: Markus Magnusson <markus.magnus...@ericsson.com> Acked-by: Eelco Chaudron <echau...@redhat.com> --- lib/netdev-dpdk.c | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index d3892fe..b4dc36a 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -1483,6 +1483,7 @@ static inline int netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid, struct rte_mbuf **pkts, int cnt) { + struct dpdk_tx_queue *txq = &dev->tx_q[qid]; uint32_t nb_tx = 0; while (nb_tx != cnt) { @@ -1506,6 +1507,7 @@ netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid, } } + txq->dpdk_pkt_cnt = 0; return cnt - nb_tx; } @@ -1905,6 +1907,37 @@ out: } } +/* Enqueue packets in an intermediate queue and call the flush + * function when the queue is full. This way we can amortize the + * cost of MMIO writes. */ +static inline int +netdev_dpdk_eth_tx_queue(struct netdev_dpdk *dev, int qid, + struct rte_mbuf **pkts, int cnt) +{ + struct dpdk_tx_queue *txq = &dev->tx_q[qid]; + + int i = 0; + int dropped = 0; + + while (i < cnt) { + int freeslots = INTERIM_QUEUE_BURST_THRESHOLD - txq->dpdk_pkt_cnt; + int tocopy = MIN(freeslots, cnt-i); + + memcpy(&txq->dpdk_burst_pkts[txq->dpdk_pkt_cnt], &pkts[i], + tocopy * sizeof (struct rte_mbuf *)); + + txq->dpdk_pkt_cnt += tocopy; + i += tocopy; + + /* Queue full, burst the packets. */ + if (txq->dpdk_pkt_cnt >= INTERIM_QUEUE_BURST_THRESHOLD) { + dropped += netdev_dpdk_eth_tx_burst(dev, qid, txq->dpdk_burst_pkts, + txq->dpdk_pkt_cnt); + } + } + return dropped; +} + /* Tx function. Transmit packets indefinitely */ static void dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch) @@ -1962,7 +1995,7 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch) newcnt = netdev_dpdk_qos_run(dev, pkts, newcnt); dropped += qos_pkts - newcnt; - dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, newcnt); + dropped += netdev_dpdk_eth_tx_queue(dev, qid, pkts, newcnt); } if (OVS_UNLIKELY(dropped)) { @@ -2020,7 +2053,7 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, cnt = netdev_dpdk_qos_run(dev, pkts, cnt); dropped = batch->count - cnt; - dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, cnt); + dropped += netdev_dpdk_eth_tx_queue(dev, qid, pkts, cnt); if (OVS_UNLIKELY(dropped)) { rte_spinlock_lock(&dev->stats_lock); -- 2.4.11 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev