Rather than poll all disabled queues and waste some memory for vms that
have been shutdown, we can reconfigure when receiving a destroy
connection notification from the vhost library.
$ while true; do
ovs-appctl dpif-netdev/pmd-rxq-show |awk '
/port: / {
tot++;
if ($5 == "(enabled)") {
en++;
}
}
END {
print "total: " tot ", enabled: " en
}'
sleep 1
done
total: 66, enabled: 66
total: 6, enabled: 2
Co-authored-by: Ilya Maximets <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
Signed-off-by: David Marchand <[email protected]>
---
This patch requires a dpdk fix for the vhost library:
https://git.dpdk.org/dpdk/commit/?id=2344395df2e9
The best would be to consider applying it when this dpdk fix has been
backported in 18.11 stable branch, most likely, for 18.11.2.
I can resubmit it later if needed.
Changes since v2:
- added authorship tags for Ilya
- added logs in destroy_connection
---
lib/netdev-dpdk.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 46 insertions(+), 1 deletion(-)
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 3372373..79f3983 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -186,12 +186,15 @@ static const struct rte_eth_conf port_conf = {
static int new_device(int vid);
static void destroy_device(int vid);
static int vring_state_changed(int vid, uint16_t queue_id, int enable);
+static void destroy_connection(int vid);
static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
.vring_state_changed = vring_state_changed,
- .features_changed = NULL
+ .features_changed = NULL,
+ .new_connection = NULL,
+ .destroy_connection = destroy_connection,
};
enum { DPDK_RING_SIZE = 256 };
@@ -3661,6 +3664,48 @@ vring_state_changed(int vid, uint16_t queue_id, int
enable)
return 0;
}
+static void
+destroy_connection(int vid)
+{
+ struct netdev_dpdk *dev;
+ char ifname[IF_NAME_SZ];
+ bool exists = false;
+
+ rte_vhost_get_ifname(vid, ifname, sizeof ifname);
+
+ ovs_mutex_lock(&dpdk_mutex);
+ LIST_FOR_EACH (dev, list_node, &dpdk_list) {
+ ovs_mutex_lock(&dev->mutex);
+ if (nullable_string_is_equal(ifname, dev->vhost_id)) {
+ uint32_t qp_num = NR_QUEUE;
+
+ if (netdev_dpdk_get_vid(dev) >= 0) {
+ VLOG_ERR("Connection on socket '%s' destroyed while vhost "
+ "device still attached.", dev->vhost_id);
+ }
+
+ /* Restore the number of queue pairs to default. */
+ if (dev->requested_n_txq != qp_num
+ || dev->requested_n_rxq != qp_num) {
+ dev->requested_n_rxq = qp_num;
+ dev->requested_n_txq = qp_num;
+ netdev_request_reconfigure(&dev->up);
+ }
+ ovs_mutex_unlock(&dev->mutex);
+ exists = true;
+ break;
+ }
+ ovs_mutex_unlock(&dev->mutex);
+ }
+ ovs_mutex_unlock(&dpdk_mutex);
+
+ if (exists) {
+ VLOG_INFO("vHost Device '%s' connection has been destroyed", ifname);
+ } else {
+ VLOG_INFO("vHost Device '%s' not found", ifname);
+ }
+}
+
/*
* Retrieve the DPDK virtio device ID (vid) associated with a vhostuser
* or vhostuserclient netdev.
--
1.8.3.1
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev