On Fri, Jul 25, 2025 at 5:15 PM Xuan Zhuo <xuanz...@linux.alibaba.com> wrote:
>
> On Fri, 18 Jul 2025 17:16:13 +0800, Jason Wang <jasow...@redhat.com> wrote:
> > This patch introduces map operations for virtio device. Virtio use to
> > use DMA API which is not necessarily the case since some devices
> > doesn't do DMA. Instead of using tricks and abusing DMA API, let's
> > simply abstract the current mapping logic into a virtio specific
> > mapping operations. For the device or transport that doesn't do DMA,
> > they can implement their own mapping logic without the need to trick
> > DMA core. In this case the map_token is opaque to the virtio core that
> > will be passed back to the transport or device specific map
> > operations. For other devices, DMA API will still be used, so map
> > token will still be the dma device to minimize the changeset and
> > performance impact.
> >
> > The mapping operations are abstract as a independent structure instead
> > of reusing virtio_config_ops. This allows the transport can simply
> > reuse the structure for lower layers.
> >
> > A set of new mapping helpers were introduced for the device that want
> > to do mapping by themselves.
> >
> > Reviewed-by: Christoph Hellwig <h...@lst.de>
> > Signed-off-by: Jason Wang <jasow...@redhat.com>
> > ---
> >  drivers/virtio/virtio_ring.c  | 174 +++++++++++++++++++++++++++++-----
> >  include/linux/virtio.h        |  22 +++++
> >  include/linux/virtio_config.h |  68 +++++++++++++
> >  3 files changed, 238 insertions(+), 26 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > index 26ce8a3d6927..3f86e74dd79f 100644
> > --- a/drivers/virtio/virtio_ring.c
> > +++ b/drivers/virtio/virtio_ring.c
> > @@ -298,8 +298,14 @@ size_t virtio_max_dma_size(const struct virtio_device 
> > *vdev)
> >  {
> >       size_t max_segment_size = SIZE_MAX;
> >
> > -     if (vring_use_map_api(vdev))
> > -             max_segment_size = dma_max_mapping_size(vdev->dev.parent);
> > +     if (vring_use_map_api(vdev)) {
> > +             if (vdev->map)
> > +                     max_segment_size =
> > +                             vdev->map->max_mapping_size(vdev->dev.parent);
> > +             else
> > +                     max_segment_size =
> > +                             dma_max_mapping_size(vdev->dev.parent);
> > +     }
> >
> >       return max_segment_size;
> >  }
> > @@ -310,8 +316,8 @@ static void *vring_alloc_queue(struct virtio_device 
> > *vdev, size_t size,
> >                              void *map_token)
> >  {
> >       if (vring_use_map_api(vdev)) {
> > -             return dma_alloc_coherent(map_token, size,
> > -                                       map_handle, flag);
> > +             return virtqueue_map_alloc_coherent(vdev, map_token, size,
> > +                                                 map_handle, flag);
> >       } else {
> >               void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
> >
> > @@ -344,7 +350,8 @@ static void vring_free_queue(struct virtio_device 
> > *vdev, size_t size,
> >                            void *map_token)
> >  {
> >       if (vring_use_map_api(vdev))
> > -             dma_free_coherent(map_token, size, queue, map_handle);
> > +             virtqueue_map_free_coherent(vdev, map_token, size,
> > +                                         queue, map_handle);
> >       else
> >               free_pages_exact(queue, PAGE_ALIGN(size));
> >  }
> > @@ -388,9 +395,9 @@ static int vring_map_one_sg(const struct 
> > vring_virtqueue *vq, struct scatterlist
> >        * the way it expects (we don't guarantee that the scatterlist
> >        * will exist for the lifetime of the mapping).
> >        */
> > -     *addr = dma_map_page(vring_map_token(vq),
> > -                         sg_page(sg), sg->offset, sg->length,
> > -                         direction);
> > +     *addr = virtqueue_map_page_attrs(&vq->vq, sg_page(sg),
> > +                                      sg->offset, sg->length,
> > +                                      direction, 0);
> >
> >       if (dma_mapping_error(vring_map_token(vq), *addr))
> >               return -ENOMEM;
> > @@ -454,11 +461,12 @@ static unsigned int vring_unmap_one_split(const 
> > struct vring_virtqueue *vq,
> >       } else if (!vring_need_unmap_buffer(vq, extra))
> >               goto out;
> >
> > -     dma_unmap_page(vring_map_token(vq),
> > -                    extra->addr,
> > -                    extra->len,
> > -                    (flags & VRING_DESC_F_WRITE) ?
> > -                    DMA_FROM_DEVICE : DMA_TO_DEVICE);
> > +     virtqueue_unmap_page_attrs(&vq->vq,
> > +                                extra->addr,
> > +                                extra->len,
> > +                                (flags & VRING_DESC_F_WRITE) ?
> > +                                DMA_FROM_DEVICE : DMA_TO_DEVICE,
> > +                                0);
> >
> >  out:
> >       return extra->next;
> > @@ -1271,10 +1279,11 @@ static void vring_unmap_extra_packed(const struct 
> > vring_virtqueue *vq,
> >       } else if (!vring_need_unmap_buffer(vq, extra))
> >               return;
> >
> > -     dma_unmap_page(vring_map_token(vq),
> > -                    extra->addr, extra->len,
> > -                    (flags & VRING_DESC_F_WRITE) ?
> > -                    DMA_FROM_DEVICE : DMA_TO_DEVICE);
> > +     virtqueue_unmap_page_attrs(&vq->vq,
> > +                                extra->addr, extra->len,
> > +                                (flags & VRING_DESC_F_WRITE) ?
> > +                                DMA_FROM_DEVICE : DMA_TO_DEVICE,
> > +                                0);
> >  }
> >
> >  static struct vring_packed_desc *alloc_indirect_packed(unsigned int 
> > total_sg,
> > @@ -3121,6 +3130,105 @@ const struct vring *virtqueue_get_vring(const 
> > struct virtqueue *vq)
> >  }
> >  EXPORT_SYMBOL_GPL(virtqueue_get_vring);
> >
> > +/**
> > + * virtqueue_map_alloc_coherent - alloc coherent mapping
> > + * @vdev: the virtio device we are talking to
> > + * @token: device specific mapping token
> > + * @size: the size of the buffer
> > + * @map_handle: the pointer to the mapped adress
> > + * @gfp: allocation flag (GFP_XXX)
> > + *
> > + * return virtual address or NULL on error
> > + */
> > +void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
> > +                                void *map_token, size_t size,
> > +                                dma_addr_t *map_handle, gfp_t gfp)
> > +{
> > +     if (vdev->map)
> > +             return vdev->map->alloc(map_token, size, map_handle, gfp);
> > +     else
> > +             return dma_alloc_coherent(map_token, size,
> > +                                       map_handle, gfp);
> > +}
> > +EXPORT_SYMBOL_GPL(virtqueue_map_alloc_coherent);
> > +
> > +/**
> > + * virtqueue_map_free_coherent - free coherent mapping
> > + * @vdev: the virtio device we are talking to
> > + * @token: device specific mapping token
> > + * @size: the size of the buffer
> > + * @map_handle: the mapped address that needs to be freed
> > + *
> > + */
> > +void virtqueue_map_free_coherent(struct virtio_device *vdev,
> > +                              void *map_token, size_t size, void *vaddr,
> > +                              dma_addr_t map_handle)
> > +{
> > +     if (vdev->map)
> > +             vdev->map->free(map_token, size, vaddr, map_handle, 0);
> > +     else
> > +             dma_free_coherent(map_token, size, vaddr, map_handle);
> > +}
> > +EXPORT_SYMBOL_GPL(virtqueue_map_free_coherent);
> > +
> > +/**
> > + * virtqueue_map_page_attrs - map a page to the device
> > + * @_vq: the virtqueue we are talking to
> > + * @page: the page that will be mapped by the device
> > + * @offset: the offset in the page for a buffer
> > + * @size: the buffer size
> > + * @dir: mapping direction
> > + * @attrs: mapping attributes
> > + *
> > + * Returns mapped address. Caller should check that by 
> > virtqueue_mapping_error().
> > + */
> > +dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
> > +                                 struct page *page,
> > +                                 unsigned long offset,
> > +                                 size_t size,
> > +                                 enum dma_data_direction dir,
> > +                                 unsigned long attrs)
> > +{
> > +     const struct vring_virtqueue *vq = to_vvq(_vq);
> > +     struct virtio_device *vdev = _vq->vdev;
> > +     void *map_token = vring_map_token(vq);
> > +
> > +     if (vdev->map)
> > +             return vdev->map->map_page(map_token,
> > +                                        page, offset, size,
> > +                                        dir, attrs);
> > +
> > +     return dma_map_page_attrs(map_token,
> > +                               page, offset, size,
> > +                               dir, attrs);
> > +}
> > +EXPORT_SYMBOL_GPL(virtqueue_map_page_attrs);
> > +
> > +/**
> > + * virtqueue_unmap_page_attrs - map a page to the device
> > + * @_vq: the virtqueue we are talking to
> > + * @map_handle: the mapped address
> > + * @size: the buffer size
> > + * @dir: mapping direction
> > + * @attrs: unmapping attributes
> > + */
> > +void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
> > +                             dma_addr_t map_handle,
> > +                             size_t size, enum dma_data_direction dir,
> > +                             unsigned long attrs)
> > +{
> > +     const struct vring_virtqueue *vq = to_vvq(_vq);
> > +     struct virtio_device *vdev = _vq->vdev;
> > +     void *map_token = vring_map_token(vq);
> > +
> > +     if (vdev->map)
> > +             vdev->map->unmap_page(map_token, map_handle,
> > +                                   size, dir, attrs);
> > +     else
> > +             dma_unmap_page_attrs(map_token, map_handle, size, dir, attrs);
>
>
> How about setting dma_unmap_page_attrs as the default value of 
> vdev->map->unmap_page?
>
> Then we can call vdev->map->unmap_page directly.
>
> Thanks.
>

This looks more clean but the problem is that the indirect call might
be slow when mitigation is enabled. So I tend to keep the hard code
condition here.

Thanks


Reply via email to