virtqueue_add_split() only supports virtual addresses, dma is completed
in virtqueue_add_split().

In some scenarios (such as the AF_XDP scenario), the memory is allocated
and DMA is completed in advance, so it is necessary for us to support
passing the DMA address to virtqueue_add_split().

Record this information in desc_state, we can skip unmap based on this
when executing dma unmap.

Signed-off-by: Xuan Zhuo <[email protected]>
---
 drivers/virtio/virtio_ring.c | 55 +++++++++++++++++++++++++++---------
 1 file changed, 42 insertions(+), 13 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c9f194c86aec..ec622403cbd5 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -70,6 +70,7 @@
 struct vring_desc_state_split {
        void *data;                     /* Data for callback. */
        struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
+       bool premapped;
 };
 
 struct vring_desc_state_packed {
@@ -434,7 +435,7 @@ static void vring_unmap_one_split_indirect(const struct 
vring_virtqueue *vq,
 }
 
 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
-                                         unsigned int i)
+                                         unsigned int i, bool premapped)
 {
        struct vring_desc_extra *extra = vq->split.desc_extra;
        u16 flags;
@@ -451,6 +452,9 @@ static unsigned int vring_unmap_one_split(const struct 
vring_virtqueue *vq,
                                 (flags & VRING_DESC_F_WRITE) ?
                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
+               if (premapped)
+                       goto out;
+
                dma_unmap_page(vring_dma_dev(vq),
                               extra[i].addr,
                               extra[i].len,
@@ -521,6 +525,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
                                      unsigned int in_sgs,
                                      void *data,
                                      void *ctx,
+                                     bool premapped,
                                      gfp_t gfp)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
@@ -582,9 +587,16 @@ static inline int virtqueue_add_split(struct virtqueue 
*_vq,
 
        for (n = 0; n < out_sgs; n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-                       dma_addr_t addr = vring_map_one_sg(vq, sg, 
DMA_TO_DEVICE);
-                       if (vring_mapping_error(vq, addr))
-                               goto unmap_release;
+                       dma_addr_t addr;
+
+                       if (premapped) {
+                               addr = sg_dma_address(sg);
+
+                       } else {
+                               addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+                               if (vring_mapping_error(vq, addr))
+                                       goto unmap_release;
+                       }
 
                        prev = i;
                        /* Note that we trust indirect descriptor
@@ -597,9 +609,16 @@ static inline int virtqueue_add_split(struct virtqueue 
*_vq,
        }
        for (; n < (out_sgs + in_sgs); n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-                       dma_addr_t addr = vring_map_one_sg(vq, sg, 
DMA_FROM_DEVICE);
-                       if (vring_mapping_error(vq, addr))
-                               goto unmap_release;
+                       dma_addr_t addr;
+
+                       if (premapped) {
+                               addr = sg_dma_address(sg);
+
+                       } else {
+                               addr = vring_map_one_sg(vq, sg, 
DMA_FROM_DEVICE);
+                               if (vring_mapping_error(vq, addr))
+                                       goto unmap_release;
+                       }
 
                        prev = i;
                        /* Note that we trust indirect descriptor
@@ -644,6 +663,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 
        /* Store token and indirect buffer state. */
        vq->split.desc_state[head].data = data;
+       vq->split.desc_state[head].premapped = premapped;
        if (indirect)
                vq->split.desc_state[head].indir_desc = desc;
        else
@@ -673,6 +693,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
        return 0;
 
 unmap_release:
+       if (premapped)
+               goto unmap_free;
+
        err_idx = i;
 
        if (indirect)
@@ -687,9 +710,10 @@ static inline int virtqueue_add_split(struct virtqueue 
*_vq,
                        vring_unmap_one_split_indirect(vq, &desc[i]);
                        i = virtio16_to_cpu(_vq->vdev, desc[i].next);
                } else
-                       i = vring_unmap_one_split(vq, i);
+                       i = vring_unmap_one_split(vq, i, false);
        }
 
+unmap_free:
        if (indirect)
                kfree(desc);
 
@@ -733,20 +757,23 @@ static void detach_buf_split(struct vring_virtqueue *vq, 
unsigned int head,
 {
        unsigned int i, j;
        __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
+       bool premapped;
 
        /* Clear data ptr. */
        vq->split.desc_state[head].data = NULL;
 
+       premapped = vq->split.desc_state[head].premapped;
+
        /* Put back on free list: unmap first-level descriptors and find end */
        i = head;
 
        while (vq->split.vring.desc[i].flags & nextflag) {
-               vring_unmap_one_split(vq, i);
+               vring_unmap_one_split(vq, i, premapped);
                i = vq->split.desc_extra[i].next;
                vq->vq.num_free++;
        }
 
-       vring_unmap_one_split(vq, i);
+       vring_unmap_one_split(vq, i, premapped);
        vq->split.desc_extra[i].next = vq->free_head;
        vq->free_head = head;
 
@@ -768,8 +795,10 @@ static void detach_buf_split(struct vring_virtqueue *vq, 
unsigned int head,
                                VRING_DESC_F_INDIRECT));
                BUG_ON(len == 0 || len % sizeof(struct vring_desc));
 
-               for (j = 0; j < len / sizeof(struct vring_desc); j++)
-                       vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+               if (!premapped) {
+                       for (j = 0; j < len / sizeof(struct vring_desc); j++)
+                               vring_unmap_one_split_indirect(vq, 
&indir_desc[j]);
+               }
 
                kfree(indir_desc);
                vq->split.desc_state[head].indir_desc = NULL;
@@ -2095,7 +2124,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
        return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
                                        out_sgs, in_sgs, data, ctx, gfp) :
                                 virtqueue_add_split(_vq, sgs, total_sg,
-                                       out_sgs, in_sgs, data, ctx, gfp);
+                                       out_sgs, in_sgs, data, ctx, premapped, 
gfp);
 }
 
 /**
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to