Re: [Qemu-devel] [PATCH v4 10/11] virtio: migration support for packed ring

2019-02-19 Thread Jason Wang



On 2019/2/19 下午7:00, Wei Xu wrote:

On Tue, Feb 19, 2019 at 03:30:41PM +0800, Jason Wang wrote:

On 2019/2/14 下午12:26,w...@redhat.com  wrote:

From: Wei Xu

Both userspace and vhost-net/user are supported with this patch.

A new subsection is introduced for packed ring, only 'last_avail_idx'
and 'last_avail_wrap_counter' are saved/loaded presumably based on
all the others relevant data(inuse, used/avail index and wrap count
should be the same.

This is probably only true for net device, see comment in virtio_load():

     /*
  * Some devices migrate VirtQueueElements that have been popped
  * from the avail ring but not yet returned to the used ring.
  * Since max ring size < UINT16_MAX it's safe to use modulo
  * UINT16_MAX + 1 subtraction.
  */
     vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
     vdev->vq[i].used_idx);


So you need to migrate used_idx and used_wrap_counter since we don't have
used idx.

This is trying to align with vhost-net/user as we discussed, since all we
have done is to support  virtio-net device for packed ring,



Well, what you did probably work for virtio-net. But again, virtio-net 
is not the only device we want to support.




  maybe we can
consider supporting other devices after we have got it verified.



Please don't and fix the issue, packed virtqueue is a generic layout 
changes that should work for all kinds of virtio devices. It should be 
specially optimized for net, but it doesn't mean it was only designed 
for net.


Thanks




Re: [Qemu-devel] [PATCH v4 10/11] virtio: migration support for packed ring

2019-02-19 Thread Wei Xu
On Tue, Feb 19, 2019 at 03:30:41PM +0800, Jason Wang wrote:
> 
> On 2019/2/14 下午12:26, w...@redhat.com wrote:
> >From: Wei Xu 
> >
> >Both userspace and vhost-net/user are supported with this patch.
> >
> >A new subsection is introduced for packed ring, only 'last_avail_idx'
> >and 'last_avail_wrap_counter' are saved/loaded presumably based on
> >all the others relevant data(inuse, used/avail index and wrap count
> >should be the same.
> 
> 
> This is probably only true for net device, see comment in virtio_load():
> 
>     /*
>  * Some devices migrate VirtQueueElements that have been popped
>  * from the avail ring but not yet returned to the used ring.
>  * Since max ring size < UINT16_MAX it's safe to use modulo
>  * UINT16_MAX + 1 subtraction.
>  */
>     vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
>     vdev->vq[i].used_idx);
> 
> 
> So you need to migrate used_idx and used_wrap_counter since we don't have
> used idx.

This is trying to align with vhost-net/user as we discussed, since all we
have done is to support  virtio-net device for packed ring, maybe we can
consider supporting other devices after we have got it verified.

> 
> 
> >
> >Signed-off-by: Wei Xu 
> >---
> >  hw/virtio/virtio.c | 69 
> > +++---
> >  1 file changed, 66 insertions(+), 3 deletions(-)
> >
> >diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> >index 8cfc7b6..7c5de07 100644
> >--- a/hw/virtio/virtio.c
> >+++ b/hw/virtio/virtio.c
> >@@ -2349,6 +2349,13 @@ static bool virtio_virtqueue_needed(void *opaque)
> >  return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
> >  }
> >+static bool virtio_packed_virtqueue_needed(void *opaque)
> >+{
> >+VirtIODevice *vdev = opaque;
> >+
> >+return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
> >+}
> >+
> >  static bool virtio_ringsize_needed(void *opaque)
> >  {
> >  VirtIODevice *vdev = opaque;
> >@@ -2390,6 +2397,17 @@ static const VMStateDescription vmstate_virtqueue = {
> >  }
> >  };
> >+static const VMStateDescription vmstate_packed_virtqueue = {
> >+.name = "packed_virtqueue_state",
> >+.version_id = 1,
> >+.minimum_version_id = 1,
> >+.fields = (VMStateField[]) {
> >+VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
> >+VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
> >+VMSTATE_END_OF_LIST()
> >+}
> >+};
> >+
> >  static const VMStateDescription vmstate_virtio_virtqueues = {
> >  .name = "virtio/virtqueues",
> >  .version_id = 1,
> >@@ -2402,6 +2420,18 @@ static const VMStateDescription 
> >vmstate_virtio_virtqueues = {
> >  }
> >  };
> >+static const VMStateDescription vmstate_virtio_packed_virtqueues = {
> >+.name = "virtio/packed_virtqueues",
> >+.version_id = 1,
> >+.minimum_version_id = 1,
> >+.needed = _packed_virtqueue_needed,
> >+.fields = (VMStateField[]) {
> >+VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
> >+  VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, 
> >VirtQueue),
> >+VMSTATE_END_OF_LIST()
> >+}
> >+};
> >+
> >  static const VMStateDescription vmstate_ringsize = {
> >  .name = "ringsize_state",
> >  .version_id = 1,
> >@@ -2522,6 +2552,7 @@ static const VMStateDescription vmstate_virtio = {
> >  _virtio_ringsize,
> >  _virtio_broken,
> >  _virtio_extra_state,
> >+_virtio_packed_virtqueues,
> >  NULL
> >  }
> >  };
> >@@ -2794,6 +2825,17 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int 
> >version_id)
> >  virtio_queue_update_rings(vdev, i);
> >  }
> >+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
> >+vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
> >+vdev->vq[i].avail_wrap_counter =
> >+vdev->vq[i].last_avail_wrap_counter;
> >+
> >+vdev->vq[i].used_idx = vdev->vq[i].last_avail_idx;
> >+vdev->vq[i].used_wrap_counter =
> >+vdev->vq[i].last_avail_wrap_counter;
> >+continue;
> >+}
> >+
> >  nheads = vring_avail_idx(>vq[i]) - 
> > vdev->vq[i].last_avail_idx;
> >  /* Check it isn't doing strange things with descriptor 
> > numbers. */
> >  if (nheads > vdev->vq[i].vring.num) {
> >@@ -2955,17 +2997,34 @@ hwaddr virtio_queue_get_used_size(VirtIODevice 
> >*vdev, int n)
> >  uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
> >  {
> >-return vdev->vq[n].last_avail_idx;
> >+uint16_t idx;
> >+
> >+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
> >+idx = vdev->vq[n].last_avail_idx;
> >+idx |= ((int)vdev->vq[n].avail_wrap_counter) << 15;
> >+} else {

Re: [Qemu-devel] [PATCH v4 10/11] virtio: migration support for packed ring

2019-02-18 Thread Jason Wang



On 2019/2/14 下午12:26, w...@redhat.com wrote:

From: Wei Xu 

Both userspace and vhost-net/user are supported with this patch.

A new subsection is introduced for packed ring, only 'last_avail_idx'
and 'last_avail_wrap_counter' are saved/loaded presumably based on
all the others relevant data(inuse, used/avail index and wrap count
should be the same.



This is probably only true for net device, see comment in virtio_load():

    /*
 * Some devices migrate VirtQueueElements that have been popped
 * from the avail ring but not yet returned to the used ring.
 * Since max ring size < UINT16_MAX it's safe to use modulo
 * UINT16_MAX + 1 subtraction.
 */
    vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
    vdev->vq[i].used_idx);


So you need to migrate used_idx and used_wrap_counter since we don't 
have used idx.





Signed-off-by: Wei Xu 
---
  hw/virtio/virtio.c | 69 +++---
  1 file changed, 66 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 8cfc7b6..7c5de07 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -2349,6 +2349,13 @@ static bool virtio_virtqueue_needed(void *opaque)
  return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
  }
  
+static bool virtio_packed_virtqueue_needed(void *opaque)

+{
+VirtIODevice *vdev = opaque;
+
+return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
+}
+
  static bool virtio_ringsize_needed(void *opaque)
  {
  VirtIODevice *vdev = opaque;
@@ -2390,6 +2397,17 @@ static const VMStateDescription vmstate_virtqueue = {
  }
  };
  
+static const VMStateDescription vmstate_packed_virtqueue = {

+.name = "packed_virtqueue_state",
+.version_id = 1,
+.minimum_version_id = 1,
+.fields = (VMStateField[]) {
+VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
+VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
+VMSTATE_END_OF_LIST()
+}
+};
+
  static const VMStateDescription vmstate_virtio_virtqueues = {
  .name = "virtio/virtqueues",
  .version_id = 1,
@@ -2402,6 +2420,18 @@ static const VMStateDescription 
vmstate_virtio_virtqueues = {
  }
  };
  
+static const VMStateDescription vmstate_virtio_packed_virtqueues = {

+.name = "virtio/packed_virtqueues",
+.version_id = 1,
+.minimum_version_id = 1,
+.needed = _packed_virtqueue_needed,
+.fields = (VMStateField[]) {
+VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
+  VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, 
VirtQueue),
+VMSTATE_END_OF_LIST()
+}
+};
+
  static const VMStateDescription vmstate_ringsize = {
  .name = "ringsize_state",
  .version_id = 1,
@@ -2522,6 +2552,7 @@ static const VMStateDescription vmstate_virtio = {
  _virtio_ringsize,
  _virtio_broken,
  _virtio_extra_state,
+_virtio_packed_virtqueues,
  NULL
  }
  };
@@ -2794,6 +2825,17 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int 
version_id)
  virtio_queue_update_rings(vdev, i);
  }
  
+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {

+vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
+vdev->vq[i].avail_wrap_counter =
+vdev->vq[i].last_avail_wrap_counter;
+
+vdev->vq[i].used_idx = vdev->vq[i].last_avail_idx;
+vdev->vq[i].used_wrap_counter =
+vdev->vq[i].last_avail_wrap_counter;
+continue;
+}
+
  nheads = vring_avail_idx(>vq[i]) - 
vdev->vq[i].last_avail_idx;
  /* Check it isn't doing strange things with descriptor numbers. */
  if (nheads > vdev->vq[i].vring.num) {
@@ -2955,17 +2997,34 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, 
int n)
  
  uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)

  {
-return vdev->vq[n].last_avail_idx;
+uint16_t idx;
+
+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+idx = vdev->vq[n].last_avail_idx;
+idx |= ((int)vdev->vq[n].avail_wrap_counter) << 15;
+} else {
+idx = (int)vdev->vq[n].last_avail_idx;
+}
+return idx;
  }
  
  void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)

  {
-vdev->vq[n].last_avail_idx = idx;
-vdev->vq[n].shadow_avail_idx = idx;
+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+vdev->vq[n].last_avail_idx = idx & 0x7fff;
+vdev->vq[n].avail_wrap_counter = !!(idx & 0x8000);



Let's define some macros for those magic number.



+} else {
+vdev->vq[n].last_avail_idx = idx;
+vdev->vq[n].shadow_avail_idx = idx;
+}
  }
  
  void 

[Qemu-devel] [PATCH v4 10/11] virtio: migration support for packed ring

2019-02-13 Thread wexu
From: Wei Xu 

Both userspace and vhost-net/user are supported with this patch.

A new subsection is introduced for packed ring, only 'last_avail_idx'
and 'last_avail_wrap_counter' are saved/loaded presumably based on
all the others relevant data(inuse, used/avail index and wrap count
should be the same.

Signed-off-by: Wei Xu 
---
 hw/virtio/virtio.c | 69 +++---
 1 file changed, 66 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 8cfc7b6..7c5de07 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -2349,6 +2349,13 @@ static bool virtio_virtqueue_needed(void *opaque)
 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
 }
 
+static bool virtio_packed_virtqueue_needed(void *opaque)
+{
+VirtIODevice *vdev = opaque;
+
+return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
+}
+
 static bool virtio_ringsize_needed(void *opaque)
 {
 VirtIODevice *vdev = opaque;
@@ -2390,6 +2397,17 @@ static const VMStateDescription vmstate_virtqueue = {
 }
 };
 
+static const VMStateDescription vmstate_packed_virtqueue = {
+.name = "packed_virtqueue_state",
+.version_id = 1,
+.minimum_version_id = 1,
+.fields = (VMStateField[]) {
+VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
+VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
+VMSTATE_END_OF_LIST()
+}
+};
+
 static const VMStateDescription vmstate_virtio_virtqueues = {
 .name = "virtio/virtqueues",
 .version_id = 1,
@@ -2402,6 +2420,18 @@ static const VMStateDescription 
vmstate_virtio_virtqueues = {
 }
 };
 
+static const VMStateDescription vmstate_virtio_packed_virtqueues = {
+.name = "virtio/packed_virtqueues",
+.version_id = 1,
+.minimum_version_id = 1,
+.needed = _packed_virtqueue_needed,
+.fields = (VMStateField[]) {
+VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
+  VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, 
VirtQueue),
+VMSTATE_END_OF_LIST()
+}
+};
+
 static const VMStateDescription vmstate_ringsize = {
 .name = "ringsize_state",
 .version_id = 1,
@@ -2522,6 +2552,7 @@ static const VMStateDescription vmstate_virtio = {
 _virtio_ringsize,
 _virtio_broken,
 _virtio_extra_state,
+_virtio_packed_virtqueues,
 NULL
 }
 };
@@ -2794,6 +2825,17 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int 
version_id)
 virtio_queue_update_rings(vdev, i);
 }
 
+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
+vdev->vq[i].avail_wrap_counter =
+vdev->vq[i].last_avail_wrap_counter;
+
+vdev->vq[i].used_idx = vdev->vq[i].last_avail_idx;
+vdev->vq[i].used_wrap_counter =
+vdev->vq[i].last_avail_wrap_counter;
+continue;
+}
+
 nheads = vring_avail_idx(>vq[i]) - 
vdev->vq[i].last_avail_idx;
 /* Check it isn't doing strange things with descriptor numbers. */
 if (nheads > vdev->vq[i].vring.num) {
@@ -2955,17 +2997,34 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, 
int n)
 
 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
 {
-return vdev->vq[n].last_avail_idx;
+uint16_t idx;
+
+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+idx = vdev->vq[n].last_avail_idx;
+idx |= ((int)vdev->vq[n].avail_wrap_counter) << 15;
+} else {
+idx = (int)vdev->vq[n].last_avail_idx;
+}
+return idx;
 }
 
 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
 {
-vdev->vq[n].last_avail_idx = idx;
-vdev->vq[n].shadow_avail_idx = idx;
+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+vdev->vq[n].last_avail_idx = idx & 0x7fff;
+vdev->vq[n].avail_wrap_counter = !!(idx & 0x8000);
+} else {
+vdev->vq[n].last_avail_idx = idx;
+vdev->vq[n].shadow_avail_idx = idx;
+}
 }
 
 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
 {
+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+return;
+}
+
 rcu_read_lock();
 if (vdev->vq[n].vring.desc) {
 vdev->vq[n].last_avail_idx = vring_used_idx(>vq[n]);
@@ -2976,6 +3035,10 @@ void virtio_queue_restore_last_avail_idx(VirtIODevice 
*vdev, int n)
 
 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
 {
+if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+return;
+}
+
 rcu_read_lock();
 if (vdev->vq[n].vring.desc) {
 vdev->vq[n].used_idx = vring_used_idx(>vq[n]);
-- 
1.8.3.1