On 1/31/2019 2:47 AM, Li RongQing wrote:
netdev_dpdk_filter_packet_len() does not need to be protected
by tx_q[].tx_lock, and tx_q[].tx_lock can not protect it too,
same to netdev_dpdk_qos_run
so move them out of this lock to improve the scalability
Thanks for the Patch Li, can you give more details by what you mean in
terms of scalability? The changes are small beow so I'm curious to as to
the usecase you have where your seeing an improvement?
Signed-off-by: Li RongQing <[email protected]>
---
lib/netdev-dpdk.c | 33 ++++++++++++++++++++++-----------
1 file changed, 22 insertions(+), 11 deletions(-)
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 4bf0ca9e8..bf4918e2c 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -2333,15 +2333,15 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
goto out;
}
- rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
-
cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
/* Check has QoS has been configured for the netdev */
cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
dropped = total_pkts - cnt;
+ rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+
+ int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
do {
- int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
unsigned int tx_pkts;
tx_pkts = rte_vhost_enqueue_burst(vid, vhost_qid, cur_pkts, cnt);
@@ -2462,15 +2462,20 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
return;
}
- if (OVS_UNLIKELY(concurrent_txq)) {
- qid = qid % dev->up.n_txq;
- rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
- }
-
if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
struct netdev *netdev = &dev->up;
The change below introduces code duplication in both the if and else
statements specifically for the unlikely case. I'm slow to introduce
this change as it seems the key benefit is in the case where concurrent
txqs are used which to date has not been the common use case for the
wider community. I take it here this case is the beneficiary?
Ian
+ if (OVS_UNLIKELY(concurrent_txq)) {
+ qid = qid % dev->up.n_txq;
+ rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+ }
+
dpdk_do_tx_copy(netdev, qid, batch);
+
+ if (OVS_UNLIKELY(concurrent_txq)) {
+ rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+ }
+
dp_packet_delete_batch(batch, true);
} else {
int tx_cnt, dropped;
@@ -2481,8 +2486,17 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
dropped = batch_cnt - tx_cnt;
+ if (OVS_UNLIKELY(concurrent_txq)) {
+ qid = qid % dev->up.n_txq;
+ rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+ }
+
dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
+ if (OVS_UNLIKELY(concurrent_txq)) {
+ rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+ }
+
if (OVS_UNLIKELY(dropped)) {
rte_spinlock_lock(&dev->stats_lock);
dev->stats.tx_dropped += dropped;
@@ -2490,9 +2504,6 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
}
}
- if (OVS_UNLIKELY(concurrent_txq)) {
- rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
- }
}
static int
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev