On Mon, Mar 24, 2025 at 3:00 PM Sahil Siddiq <icegambi...@gmail.com> wrote:
>
> Allocate memory for the packed vq format and map them to the vdpa device.
>
> Since "struct vring" and "struct vring_packed's vring" both have the same
> memory layout, the implementation in SVQ start and SVQ stop should not
> differ based on the vq's format.
>
> Also initialize flags, counters and indices for packed vqs before they
> are utilized.
>
> Signed-off-by: Sahil Siddiq <sahil...@proton.me>

Acked-by: Eugenio Pérez <epere...@redhat.com>

> ---
> Changes from v4 -> v5:
> - vhost-shadow-virtqueue.c:
>   (vhost_svq_start): Initialize variables used by packed vring.
>
>  hw/virtio/vhost-shadow-virtqueue.c | 52 +++++++++++++++++++++---------
>  hw/virtio/vhost-shadow-virtqueue.h |  1 +
>  hw/virtio/vhost-vdpa.c             | 37 +++++++++++++++++----
>  3 files changed, 69 insertions(+), 21 deletions(-)
>
> diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
> b/hw/virtio/vhost-shadow-virtqueue.c
> index 6e16cd4bdf..126957231d 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.c
> +++ b/hw/virtio/vhost-shadow-virtqueue.c
> @@ -707,19 +707,33 @@ void vhost_svq_get_vring_addr(const 
> VhostShadowVirtqueue *svq,
>      addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
>  }
>
> -size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
> +size_t vhost_svq_descriptor_area_size(const VhostShadowVirtqueue *svq)
>  {
>      size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;
> -    size_t avail_size = offsetof(vring_avail_t, ring[svq->vring.num]) +
> -                                                              
> sizeof(uint16_t);
> +    return ROUND_UP(desc_size, qemu_real_host_page_size());
> +}
>
> -    return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size());
> +size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
> +{
> +    size_t avail_size;
> +    if (svq->is_packed) {
> +        avail_size = sizeof(uint32_t);
> +    } else {
> +        avail_size = offsetof(vring_avail_t, ring[svq->vring.num]) +
> +                                                             
> sizeof(uint16_t);
> +    }
> +    return ROUND_UP(avail_size, qemu_real_host_page_size());
>  }
>
>  size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
>  {
> -    size_t used_size = offsetof(vring_used_t, ring[svq->vring.num]) +
> -                                                              
> sizeof(uint16_t);
> +    size_t used_size;
> +    if (svq->is_packed) {
> +        used_size = sizeof(uint32_t);
> +    } else {
> +        used_size = offsetof(vring_used_t, ring[svq->vring.num]) +
> +                                                           sizeof(uint16_t);
> +    }
>      return ROUND_UP(used_size, qemu_real_host_page_size());
>  }
>
> @@ -764,8 +778,6 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, 
> int svq_kick_fd)
>  void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
>                       VirtQueue *vq, VhostIOVATree *iova_tree)
>  {
> -    size_t desc_size;
> -
>      event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
>      svq->next_guest_avail_elem = NULL;
>      svq->shadow_avail_idx = 0;
> @@ -774,20 +786,29 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, 
> VirtIODevice *vdev,
>      svq->vdev = vdev;
>      svq->vq = vq;
>      svq->iova_tree = iova_tree;
> +    svq->is_packed = virtio_vdev_has_feature(svq->vdev, 
> VIRTIO_F_RING_PACKED);
> +
> +    if (svq->is_packed) {
> +        svq->vring_packed.avail_wrap_counter = 1;
> +        svq->vring_packed.next_avail_idx = 0;
> +        svq->vring_packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
> +        svq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
> +    }
>
>      svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
>      svq->num_free = svq->vring.num;
> -    svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
> +    svq->vring.desc = mmap(NULL, vhost_svq_descriptor_area_size(svq),
>                             PROT_READ | PROT_WRITE, MAP_SHARED | 
> MAP_ANONYMOUS,
>                             -1, 0);
> -    desc_size = sizeof(vring_desc_t) * svq->vring.num;
> -    svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
> +    svq->vring.avail = mmap(NULL, vhost_svq_driver_area_size(svq),
> +                            PROT_READ | PROT_WRITE, MAP_SHARED | 
> MAP_ANONYMOUS,
> +                            -1, 0);
>      svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
>                             PROT_READ | PROT_WRITE, MAP_SHARED | 
> MAP_ANONYMOUS,
>                             -1, 0);
> -    svq->desc_state = g_new0(SVQDescState, svq->vring.num);
> -    svq->desc_next = g_new0(uint16_t, svq->vring.num);
> -    for (unsigned i = 0; i < svq->vring.num - 1; i++) {
> +    svq->desc_state = g_new0(SVQDescState, svq->num_free);
> +    svq->desc_next = g_new0(uint16_t, svq->num_free);
> +    for (unsigned i = 0; i < svq->num_free - 1; i++) {
>          svq->desc_next[i] = i + 1;
>      }
>  }
> @@ -827,7 +848,8 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
>      svq->vq = NULL;
>      g_free(svq->desc_next);
>      g_free(svq->desc_state);
> -    munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
> +    munmap(svq->vring.desc, vhost_svq_descriptor_area_size(svq));
> +    munmap(svq->vring.avail, vhost_svq_driver_area_size(svq));
>      munmap(svq->vring.used, vhost_svq_device_area_size(svq));
>      event_notifier_set_handler(&svq->hdev_call, NULL);
>  }
> diff --git a/hw/virtio/vhost-shadow-virtqueue.h 
> b/hw/virtio/vhost-shadow-virtqueue.h
> index 5f7699da9d..12c6ea8be2 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.h
> +++ b/hw/virtio/vhost-shadow-virtqueue.h
> @@ -152,6 +152,7 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, 
> int svq_kick_fd);
>  void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
>  void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
>                                struct vhost_vring_addr *addr);
> +size_t vhost_svq_descriptor_area_size(const VhostShadowVirtqueue *svq);
>  size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
>  size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 7efbde3d4c..58c8931d89 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -1137,6 +1137,8 @@ static void vhost_vdpa_svq_unmap_rings(struct vhost_dev 
> *dev,
>
>      vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
>
> +    vhost_vdpa_svq_unmap_ring(v, svq_addr.avail_user_addr);
> +
>      vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
>  }
>
> @@ -1191,38 +1193,61 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev 
> *dev,
>                                       Error **errp)
>  {
>      ERRP_GUARD();
> -    DMAMap device_region, driver_region;
> +    DMAMap descriptor_region, device_region, driver_region;
>      struct vhost_vring_addr svq_addr;
>      struct vhost_vdpa *v = dev->opaque;
> +    size_t descriptor_size = vhost_svq_descriptor_area_size(svq);
>      size_t device_size = vhost_svq_device_area_size(svq);
>      size_t driver_size = vhost_svq_driver_area_size(svq);
> -    size_t avail_offset;
>      bool ok;
>
>      vhost_svq_get_vring_addr(svq, &svq_addr);
>
> +    descriptor_region = (DMAMap) {
> +        .translated_addr = svq_addr.desc_user_addr,
> +        .size = descriptor_size - 1,
> +        .perm = IOMMU_RO,
> +    };
> +    if (svq->is_packed) {
> +        descriptor_region.perm = IOMMU_RW;
> +    }
> +
> +    ok = vhost_vdpa_svq_map_ring(v, &descriptor_region, 
> svq_addr.desc_user_addr,
> +                                 errp);
> +    if (unlikely(!ok)) {
> +        error_prepend(errp, "Cannot create vq descriptor region: ");
> +        return false;
> +    }
> +    addr->desc_user_addr = descriptor_region.iova;
> +
>      driver_region = (DMAMap) {
> +        .translated_addr = svq_addr.avail_user_addr,
>          .size = driver_size - 1,
>          .perm = IOMMU_RO,
>      };
> -    ok = vhost_vdpa_svq_map_ring(v, &driver_region, svq_addr.desc_user_addr,
> +    ok = vhost_vdpa_svq_map_ring(v, &driver_region, svq_addr.avail_user_addr,
>                                   errp);
>      if (unlikely(!ok)) {
>          error_prepend(errp, "Cannot create vq driver region: ");
> +        vhost_vdpa_svq_unmap_ring(v, descriptor_region.translated_addr);
>          return false;
>      }
> -    addr->desc_user_addr = driver_region.iova;
> -    avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
> -    addr->avail_user_addr = driver_region.iova + avail_offset;
> +    addr->avail_user_addr = driver_region.iova;
>
>      device_region = (DMAMap) {
> +        .translated_addr = svq_addr.used_user_addr,
>          .size = device_size - 1,
>          .perm = IOMMU_RW,
>      };
> +    if (svq->is_packed) {
> +        device_region.perm = IOMMU_WO;
> +    }
> +
>      ok = vhost_vdpa_svq_map_ring(v, &device_region, svq_addr.used_user_addr,
>                                   errp);
>      if (unlikely(!ok)) {
>          error_prepend(errp, "Cannot create vq device region: ");
> +        vhost_vdpa_svq_unmap_ring(v, descriptor_region.translated_addr);
>          vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
>      }
>      addr->used_user_addr = device_region.iova;
> --
> 2.48.1
>


Reply via email to