Rather than poll all disabled queues and waste some memory for vms that
have been shutdown, we can reconfigure when receiving a destroy
connection notification from the vhost library.
$ while true; do
ovs-appctl dpif-netdev/pmd-rxq-show |awk '
/port: / {
tot++;
if ($5 == "(enabled)") {
en++;
}
}
END {
print "total: " tot ", enabled: " en
}'
sleep 1
done
total: 66, enabled: 66
total: 6, enabled: 2
This change requires a fix in the DPDK vhost library, so bump the minimal
required version to 18.11.2.
Co-authored-by: Ilya Maximets <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
Signed-off-by: David Marchand <[email protected]>
---
Changes since v3:
- rebased on master following 18.11.2 support
- added a note in NEWS about minimal dpdk version
Changes since v2:
- added authorship tags for Ilya
- added logs in destroy_connection
---
NEWS | 4 +++-
lib/netdev-dpdk.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/NEWS b/NEWS
index c03e287..2f8171f 100644
--- a/NEWS
+++ b/NEWS
@@ -4,7 +4,9 @@ Post-v2.11.0
* New option 'other_config:dpdk-socket-limit' to limit amount of
hugepage memory that can be used by DPDK.
* Add support for vHost Post-copy Live Migration (experimental).
- * OVS validated with DPDK 18.11.2 which is recommended to be used.
+ * OVS validated with DPDK 18.11.2 which is the new minimal supported
+ version.
+ * DPDK 18.11.1 and lower is no longer supported.
- OpenFlow:
* All features required by OpenFlow 1.5 are now implemented, so
ovs-vswitchd now enables OpenFlow 1.5 by default (in addition to
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index b7f4543..0b9bea4 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -185,12 +185,15 @@ static const struct rte_eth_conf port_conf = {
static int new_device(int vid);
static void destroy_device(int vid);
static int vring_state_changed(int vid, uint16_t queue_id, int enable);
+static void destroy_connection(int vid);
static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
.vring_state_changed = vring_state_changed,
- .features_changed = NULL
+ .features_changed = NULL,
+ .new_connection = NULL,
+ .destroy_connection = destroy_connection,
};
enum { DPDK_RING_SIZE = 256 };
@@ -3663,6 +3666,48 @@ vring_state_changed(int vid, uint16_t queue_id, int
enable)
return 0;
}
+static void
+destroy_connection(int vid)
+{
+ struct netdev_dpdk *dev;
+ char ifname[IF_NAME_SZ];
+ bool exists = false;
+
+ rte_vhost_get_ifname(vid, ifname, sizeof ifname);
+
+ ovs_mutex_lock(&dpdk_mutex);
+ LIST_FOR_EACH (dev, list_node, &dpdk_list) {
+ ovs_mutex_lock(&dev->mutex);
+ if (nullable_string_is_equal(ifname, dev->vhost_id)) {
+ uint32_t qp_num = NR_QUEUE;
+
+ if (netdev_dpdk_get_vid(dev) >= 0) {
+ VLOG_ERR("Connection on socket '%s' destroyed while vhost "
+ "device still attached.", dev->vhost_id);
+ }
+
+ /* Restore the number of queue pairs to default. */
+ if (dev->requested_n_txq != qp_num
+ || dev->requested_n_rxq != qp_num) {
+ dev->requested_n_rxq = qp_num;
+ dev->requested_n_txq = qp_num;
+ netdev_request_reconfigure(&dev->up);
+ }
+ ovs_mutex_unlock(&dev->mutex);
+ exists = true;
+ break;
+ }
+ ovs_mutex_unlock(&dev->mutex);
+ }
+ ovs_mutex_unlock(&dpdk_mutex);
+
+ if (exists) {
+ VLOG_INFO("vHost Device '%s' connection has been destroyed", ifname);
+ } else {
+ VLOG_INFO("vHost Device '%s' not found", ifname);
+ }
+}
+
/*
* Retrieve the DPDK virtio device ID (vid) associated with a vhostuser
* or vhostuserclient netdev.
--
1.8.3.1
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev