Some dynamic state of a virtio-net vDPA devices is restored from CVQ in
the event of a live migration.  However, dataplane needs to be disabled
so the NIC does not receive buffers in the invalid ring.

As a default method to achieve it, let's offer a shadow vring with 0
avail idx.  As a fallback method, we will enable dataplane vqs later, as
proposed previously.

Signed-off-by: Eugenio Pérez <epere...@redhat.com>
---
 net/vhost-vdpa.c | 49 +++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 44 insertions(+), 5 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index af83de92f8..e14ae48f23 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -338,10 +338,25 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
 {
     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
     struct vhost_vdpa *v = &s->vhost_vdpa;
+    bool has_cvq = v->dev->vq_index_end % 2;
 
     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 
-    if (s->always_svq ||
+    if (has_cvq && (v->dev->features & VIRTIO_F_RING_RESET)) {
+        /*
+         * Offer a fake vring to the device while the state is restored
+         * through CVQ.  That way, the guest will not see packets in unexpected
+         * queues.
+         *
+         * This will be undone after loading all state through CVQ, at
+         * vhost_vdpa_net_load.
+         *
+         * TODO: Future optimizations may skip some SVQ setup and teardown,
+         * like set the right kick and call fd or doorbell maps directly, and
+         * the iova tree.
+         */
+        v->shadow_vqs_enabled = true;
+    } else if (s->always_svq ||
         migration_is_setup_or_active(migrate_get_current()->state)) {
         v->shadow_vqs_enabled = true;
         v->shadow_data = true;
@@ -738,10 +753,34 @@ static int vhost_vdpa_net_load(NetClientState *nc)
         return r;
     }
 
-    for (int i = 0; i < v->dev->vq_index; ++i) {
-        r = vhost_vdpa_set_vring_ready(v, i);
-        if (unlikely(r)) {
-            return r;
+    if (v->dev->features & VIRTIO_F_RING_RESET && !s->always_svq &&
+        !migration_is_setup_or_active(migrate_get_current()->state)) {
+        NICState *nic = qemu_get_nic(s->nc.peer);
+        int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
+
+        for (int i = 0; i < queue_pairs; ++i) {
+            NetClientState *ncs = qemu_get_peer(nic->ncs, i);
+            VhostVDPAState *s_i = DO_UPCAST(VhostVDPAState, nc, ncs);
+
+            for (int j = 0; j < 2; ++j) {
+                vhost_net_virtqueue_reset(v->dev->vdev, ncs->peer, j);
+            }
+
+            s_i->vhost_vdpa.shadow_vqs_enabled = false;
+
+            for (int j = 0; j < 2; ++j) {
+                r = vhost_net_virtqueue_restart(v->dev->vdev, ncs->peer, j);
+                if (unlikely(r < 0)) {
+                    return r;
+                }
+            }
+        }
+    } else {
+        for (int i = 0; i < v->dev->vq_index; ++i) {
+            r = vhost_vdpa_set_vring_ready(v, i);
+            if (unlikely(r)) {
+                return r;
+            }
         }
     }
 
-- 
2.39.3


Reply via email to