Re: [Qemu-devel] [[RFC v3 07/12] virtio: fill/flush/pop for packed ring

2018-10-15 Thread Jason Wang




On 2018年10月11日 22:08, w...@redhat.com wrote:

From: Wei Xu 

Signed-off-by: Wei Xu 
---
  hw/virtio/virtio.c | 258 ++---
  1 file changed, 244 insertions(+), 14 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 13c6c98..d12a7e3 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -386,6 +386,21 @@ static void vring_packed_desc_read(VirtIODevice *vdev, 
VRingPackedDesc *desc,
  virtio_tswap16s(vdev, >id);
  }
  
+static void vring_packed_desc_write(VirtIODevice *vdev, VRingPackedDesc *desc,

+MemoryRegionCache *cache, int i)
+{
+virtio_tswap64s(vdev, >addr);
+virtio_tswap32s(vdev, >len);
+virtio_tswap16s(vdev, >id);
+virtio_tswap16s(vdev, >flags);
+address_space_write_cached(cache,
+   sizeof(VRingPackedDesc) * i, desc,
+   sizeof(VRingPackedDesc));
+address_space_cache_invalidate(cache,
+   sizeof(VRingPackedDesc) * i,
+   sizeof(VRingPackedDesc));
+}
+
  static void vring_packed_desc_read_flags(VirtIODevice *vdev,
  VRingPackedDesc *desc, MemoryRegionCache *cache, int i)
  {
@@ -559,19 +574,11 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
  }
  
  /* Called within rcu_read_lock().  */

-void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
  unsigned int len, unsigned int idx)
  {
  VRingUsedElem uelem;
  
-trace_virtqueue_fill(vq, elem, len, idx);

-
-virtqueue_unmap_sg(vq, elem, len);
-
-if (unlikely(vq->vdev->broken)) {
-return;
-}
-
  if (unlikely(!vq->vring.used)) {
  return;
  }
@@ -583,16 +590,64 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement 
*elem,
  vring_used_write(vq, , idx);
  }
  
-/* Called within rcu_read_lock().  */

-void virtqueue_flush(VirtQueue *vq, unsigned int count)
+static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
+unsigned int len, unsigned int idx)
  {
-uint16_t old, new;
+uint16_t w, head;
+VRingMemoryRegionCaches *caches;
+VRingPackedDesc desc = {
+.addr = 0,
+.flags = 0,
+};
+
+if (unlikely(!vq->vring.desc)) {
+return;
+}
+
+caches = vring_get_region_caches(vq);
+head = vq->used_idx + idx;
+head = head >= vq->vring.num ? (head - vq->vring.num) : head;
+vring_packed_desc_read(vq->vdev, , >desc, head);


Is this a must for reading descriptor once more? Shouldn't device 
maintain its own used_wrap_counter?



+
+w = (desc.flags & AVAIL_DESC_PACKED(1)) >> 7;
+desc.flags &= ~(AVAIL_DESC_PACKED(1) | USED_DESC_PACKED(1));
+desc.flags |= AVAIL_DESC_PACKED(w) | USED_DESC_PACKED(w);
+if (!(desc.flags & VRING_DESC_F_INDIRECT)) {
+if (!(desc.flags & VRING_DESC_F_WRITE)) {
+desc.len = 0;
+} else {
+desc.len = len;
+}
+}
+vring_packed_desc_write(vq->vdev, , >desc, head);
+
+/* Make sure flags has been updated */
+smp_mb();


This is wrong in two places:

- What you want is to make sure flag was updated after all other fields, 
you can do it in vring_packed_desc_write()

- A write barrier instead of a full barrier is needed.


+}
+
+void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+unsigned int len, unsigned int idx)
+{
+trace_virtqueue_fill(vq, elem, len, idx);
+
+virtqueue_unmap_sg(vq, elem, len);
  
  if (unlikely(vq->vdev->broken)) {

-vq->inuse -= count;
  return;
  }
  
+if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {

+virtqueue_packed_fill(vq, elem, len, idx);
+} else {
+virtqueue_split_fill(vq, elem, len, idx);
+}
+}
+
+/* Called within rcu_read_lock().  */
+static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
+{
+uint16_t old, new;
+
  if (unlikely(!vq->vring.used)) {
  return;
  }
@@ -608,6 +663,33 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
  vq->signalled_used_valid = false;
  }
  
+static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)

+{
+if (unlikely(!vq->vring.desc)) {
+return;
+}
+
+vq->inuse -= count;
+vq->used_idx += count;
+if (vq->used_idx >= vq->vring.num) {
+vq->used_idx -= vq->vring.num;
+}
+}
+
+void virtqueue_flush(VirtQueue *vq, unsigned int count)
+{
+if (unlikely(vq->vdev->broken)) {
+vq->inuse -= count;
+return;
+}
+
+if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+virtqueue_packed_flush(vq, count);
+} else {
+virtqueue_split_flush(vq, count);
+}
+}
+
  void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
   

[Qemu-devel] [[RFC v3 07/12] virtio: fill/flush/pop for packed ring

2018-10-11 Thread wexu
From: Wei Xu 

Signed-off-by: Wei Xu 
---
 hw/virtio/virtio.c | 258 ++---
 1 file changed, 244 insertions(+), 14 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 13c6c98..d12a7e3 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -386,6 +386,21 @@ static void vring_packed_desc_read(VirtIODevice *vdev, 
VRingPackedDesc *desc,
 virtio_tswap16s(vdev, >id);
 }
 
+static void vring_packed_desc_write(VirtIODevice *vdev, VRingPackedDesc *desc,
+MemoryRegionCache *cache, int i)
+{
+virtio_tswap64s(vdev, >addr);
+virtio_tswap32s(vdev, >len);
+virtio_tswap16s(vdev, >id);
+virtio_tswap16s(vdev, >flags);
+address_space_write_cached(cache,
+   sizeof(VRingPackedDesc) * i, desc,
+   sizeof(VRingPackedDesc));
+address_space_cache_invalidate(cache,
+   sizeof(VRingPackedDesc) * i,
+   sizeof(VRingPackedDesc));
+}
+
 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
 VRingPackedDesc *desc, MemoryRegionCache *cache, int i)
 {
@@ -559,19 +574,11 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
 }
 
 /* Called within rcu_read_lock().  */
-void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
 unsigned int len, unsigned int idx)
 {
 VRingUsedElem uelem;
 
-trace_virtqueue_fill(vq, elem, len, idx);
-
-virtqueue_unmap_sg(vq, elem, len);
-
-if (unlikely(vq->vdev->broken)) {
-return;
-}
-
 if (unlikely(!vq->vring.used)) {
 return;
 }
@@ -583,16 +590,64 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement 
*elem,
 vring_used_write(vq, , idx);
 }
 
-/* Called within rcu_read_lock().  */
-void virtqueue_flush(VirtQueue *vq, unsigned int count)
+static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
+unsigned int len, unsigned int idx)
 {
-uint16_t old, new;
+uint16_t w, head;
+VRingMemoryRegionCaches *caches;
+VRingPackedDesc desc = {
+.addr = 0,
+.flags = 0,
+};
+
+if (unlikely(!vq->vring.desc)) {
+return;
+}
+
+caches = vring_get_region_caches(vq);
+head = vq->used_idx + idx;
+head = head >= vq->vring.num ? (head - vq->vring.num) : head;
+vring_packed_desc_read(vq->vdev, , >desc, head);
+
+w = (desc.flags & AVAIL_DESC_PACKED(1)) >> 7;
+desc.flags &= ~(AVAIL_DESC_PACKED(1) | USED_DESC_PACKED(1));
+desc.flags |= AVAIL_DESC_PACKED(w) | USED_DESC_PACKED(w);
+if (!(desc.flags & VRING_DESC_F_INDIRECT)) {
+if (!(desc.flags & VRING_DESC_F_WRITE)) {
+desc.len = 0;
+} else {
+desc.len = len;
+}
+}
+vring_packed_desc_write(vq->vdev, , >desc, head);
+
+/* Make sure flags has been updated */
+smp_mb();
+}
+
+void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+unsigned int len, unsigned int idx)
+{
+trace_virtqueue_fill(vq, elem, len, idx);
+
+virtqueue_unmap_sg(vq, elem, len);
 
 if (unlikely(vq->vdev->broken)) {
-vq->inuse -= count;
 return;
 }
 
+if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+virtqueue_packed_fill(vq, elem, len, idx);
+} else {
+virtqueue_split_fill(vq, elem, len, idx);
+}
+}
+
+/* Called within rcu_read_lock().  */
+static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
+{
+uint16_t old, new;
+
 if (unlikely(!vq->vring.used)) {
 return;
 }
@@ -608,6 +663,33 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
 vq->signalled_used_valid = false;
 }
 
+static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
+{
+if (unlikely(!vq->vring.desc)) {
+return;
+}
+
+vq->inuse -= count;
+vq->used_idx += count;
+if (vq->used_idx >= vq->vring.num) {
+vq->used_idx -= vq->vring.num;
+}
+}
+
+void virtqueue_flush(VirtQueue *vq, unsigned int count)
+{
+if (unlikely(vq->vdev->broken)) {
+vq->inuse -= count;
+return;
+}
+
+if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+virtqueue_packed_flush(vq, count);
+} else {
+virtqueue_split_flush(vq, count);
+}
+}
+
 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
 unsigned int len)
 {
@@ -1091,7 +1173,7 @@ static void *virtqueue_alloc_element(size_t sz, unsigned 
out_num, unsigned in_nu
 return elem;
 }
 
-void *virtqueue_pop(VirtQueue *vq, size_t sz)
+static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
 {
 unsigned int i, head, max;
 VRingMemoryRegionCaches *caches;
@@ -1226,6 +1308,154 @@ err_undo_map:
 goto done;
 }