Let's determine the last descriptor by counting the number of sg. This
would be consistent with packed virtqueue implementation and ease the
future in-order implementation.

Acked-by: Eugenio Pérez <epere...@redhat.com>
Reviewed-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
Signed-off-by: Jason Wang <jasow...@redhat.com>
---
 drivers/virtio/virtio_ring.c | 21 ++++++++-------------
 1 file changed, 8 insertions(+), 13 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 1045c553ee65..0949675a3d12 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -574,7 +574,7 @@ static inline int virtqueue_add_split(struct 
vring_virtqueue *vq,
        struct vring_desc_extra *extra;
        struct scatterlist *sg;
        struct vring_desc *desc;
-       unsigned int i, n, avail, descs_used, prev, err_idx;
+       unsigned int i, n, avail, descs_used, err_idx, c = 0;
        int head;
        bool indirect;
 
@@ -631,6 +631,7 @@ static inline int virtqueue_add_split(struct 
vring_virtqueue *vq,
        }
 
        for (n = 0; n < out_sgs; n++) {
+               sg = sgs[n];
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
                        dma_addr_t addr;
                        u32 len;
@@ -638,12 +639,12 @@ static inline int virtqueue_add_split(struct 
vring_virtqueue *vq,
                        if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, 
&len, premapped))
                                goto unmap_release;
 
-                       prev = i;
                        /* Note that we trust indirect descriptor
                         * table since it use stream DMA mapping.
                         */
                        i = virtqueue_add_desc_split(vq, desc, extra, i, addr, 
len,
-                                                    VRING_DESC_F_NEXT,
+                                                    ++c == total_sg ?
+                                                    0 : VRING_DESC_F_NEXT,
                                                     premapped);
                }
        }
@@ -655,21 +656,15 @@ static inline int virtqueue_add_split(struct 
vring_virtqueue *vq,
                        if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, 
&len, premapped))
                                goto unmap_release;
 
-                       prev = i;
                        /* Note that we trust indirect descriptor
                         * table since it use stream DMA mapping.
                         */
-                       i = virtqueue_add_desc_split(vq, desc, extra, i, addr, 
len,
-                                                    VRING_DESC_F_NEXT |
-                                                    VRING_DESC_F_WRITE,
-                                                    premapped);
+                       i = virtqueue_add_desc_split(vq, desc, extra,
+                               i, addr, len,
+                               (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
+                               VRING_DESC_F_WRITE, premapped);
                }
        }
-       /* Last one doesn't continue. */
-       desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT);
-       if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
-               vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
-                       ~VRING_DESC_F_NEXT;
 
        if (indirect) {
                /* Now that the indirect table is filled in, map it. */
-- 
2.31.1


Reply via email to