Let's determine the last descriptor by counting the number of sg. This
would be consistent with packed virtqueue implementation and ease the
future in-order implementation.

Reviewed-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
Signed-off-by: Jason Wang <jasow...@redhat.com>
---
 drivers/virtio/virtio_ring.c | 25 +++++++++++++------------
 1 file changed, 13 insertions(+), 12 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index af32d1a1a1db..d5e4d4cd2487 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -570,7 +570,7 @@ static inline int virtqueue_add_split(struct 
vring_virtqueue *vq,
        struct vring_desc_extra *extra;
        struct scatterlist *sg;
        struct vring_desc *desc;
-       unsigned int i, n, avail, descs_used, prev, err_idx;
+       unsigned int i, n, c, avail, descs_used, err_idx;
        int head;
        bool indirect;
 
@@ -626,46 +626,47 @@ static inline int virtqueue_add_split(struct 
vring_virtqueue *vq,
                return -ENOSPC;
        }
 
+       c = 0;
        for (n = 0; n < out_sgs; n++) {
+               sg = sgs[n];
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
                        dma_addr_t addr;
                        u32 len;
+                       u16 flags = 0;
 
                        if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, 
&len, premapped))
                                goto unmap_release;
 
-                       prev = i;
+                       if (++c != total_sg)
+                               flags = VRING_DESC_F_NEXT;
+
                        /* Note that we trust indirect descriptor
                         * table since it use stream DMA mapping.
                         */
                        i = virtqueue_add_desc_split(vq, desc, extra, i, addr, 
len,
-                                                    VRING_DESC_F_NEXT,
+                                                    flags,
                                                     premapped);
                }
        }
        for (; n < (out_sgs + in_sgs); n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+                       u16 flags = VRING_DESC_F_WRITE;
                        dma_addr_t addr;
                        u32 len;
 
                        if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, 
&len, premapped))
                                goto unmap_release;
 
-                       prev = i;
+                       if (++c != total_sg)
+                               flags |= VRING_DESC_F_NEXT;
+
                        /* Note that we trust indirect descriptor
                         * table since it use stream DMA mapping.
                         */
                        i = virtqueue_add_desc_split(vq, desc, extra, i, addr, 
len,
-                                                    VRING_DESC_F_NEXT |
-                                                    VRING_DESC_F_WRITE,
-                                                    premapped);
+                                                    flags, premapped);
                }
        }
-       /* Last one doesn't continue. */
-       desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT);
-       if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
-               vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
-                       ~VRING_DESC_F_NEXT;
 
        if (indirect) {
                /* Now that the indirect table is filled in, map it. */
-- 
2.31.1


Reply via email to