Re: [Qemu-block] [PATCH 2/3] xen-disk: add support for multi-page shared rings

2017-06-21 Thread Paul Durrant
> -Original Message-
> From: Stefano Stabellini [mailto:sstabell...@kernel.org]
> Sent: 20 June 2017 23:51
> To: Paul Durrant 
> Cc: xen-de...@lists.xenproject.org; qemu-de...@nongnu.org; qemu-
> bl...@nongnu.org; Stefano Stabellini ; Anthony
> Perard ; Kevin Wolf ;
> Max Reitz 
> Subject: Re: [PATCH 2/3] xen-disk: add support for multi-page shared rings
> 
> On Tue, 20 Jun 2017, Paul Durrant wrote:
> > The blkif protocol has had provision for negotiation of multi-page shared
> > rings for some time now and many guest OS have support in their frontend
> > drivers.
> >
> > This patch makes the necessary modifications to xen-disk support a shared
> > ring up to order 4 (i.e. 16 pages).
> >
> > Signed-off-by: Paul Durrant 
> 
> Thanks for the patch!
> 

You're welcome.

> > ---
> > Cc: Stefano Stabellini 
> > Cc: Anthony Perard 
> > Cc: Kevin Wolf 
> > Cc: Max Reitz 
> > ---
> >  hw/block/xen_disk.c | 141
> 
> >  1 file changed, 110 insertions(+), 31 deletions(-)
> >
> > diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> > index 9b06e3aa81..a9942d32db 100644
> > --- a/hw/block/xen_disk.c
> > +++ b/hw/block/xen_disk.c
> > @@ -36,8 +36,6 @@
> >
> >  static int batch_maps   = 0;
> >
> > -static int max_requests = 32;
> > -
> >  /* - */
> >
> >  #define BLOCK_SIZE  512
> > @@ -84,6 +82,8 @@ struct ioreq {
> >  BlockAcctCookie acct;
> >  };
> >
> > +#define MAX_RING_PAGE_ORDER 4
> > +
> >  struct XenBlkDev {
> >  struct XenDevicexendev;  /* must be first */
> >  char*params;
> > @@ -94,7 +94,8 @@ struct XenBlkDev {
> >  booldirectiosafe;
> >  const char  *fileproto;
> >  const char  *filename;
> > -int ring_ref;
> > +unsigned intring_ref[1 << MAX_RING_PAGE_ORDER];
> > +unsigned intnr_ring_ref;
> >  void*sring;
> >  int64_t file_blk;
> >  int64_t file_size;
> > @@ -110,6 +111,7 @@ struct XenBlkDev {
> >  int requests_total;
> >  int requests_inflight;
> >  int requests_finished;
> > +unsigned intmax_requests;
> >
> >  /* Persistent grants extension */
> >  gbooleanfeature_discard;
> > @@ -199,7 +201,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev
> *blkdev)
> >  struct ioreq *ioreq = NULL;
> >
> >  if (QLIST_EMPTY(>freelist)) {
> > -if (blkdev->requests_total >= max_requests) {
> > +if (blkdev->requests_total >= blkdev->max_requests) {
> >  goto out;
> >  }
> >  /* allocate new struct */
> > @@ -905,7 +907,7 @@ static void blk_handle_requests(struct XenBlkDev
> *blkdev)
> >  ioreq_runio_qemu_aio(ioreq);
> >  }
> >
> > -if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
> > +if (blkdev->more_work && blkdev->requests_inflight < blkdev-
> >max_requests) {
> >  qemu_bh_schedule(blkdev->bh);
> >  }
> >  }
> > @@ -918,15 +920,6 @@ static void blk_bh(void *opaque)
> >  blk_handle_requests(blkdev);
> >  }
> >
> > -/*
> > - * We need to account for the grant allocations requiring contiguous
> > - * chunks; the worst case number would be
> > - * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
> > - * but in order to keep things simple just use
> > - * 2 * max_req * max_seg.
> > - */
> > -#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
> > -
> >  static void blk_alloc(struct XenDevice *xendev)
> >  {
> >  struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev,
> xendev);
> > @@ -938,11 +931,6 @@ static void blk_alloc(struct XenDevice *xendev)
> >  if (xen_mode != XEN_EMULATE) {
> >  batch_maps = 1;
> >  }
> > -if (xengnttab_set_max_grants(xendev->gnttabdev,
> > -MAX_GRANTS(max_requests,
> BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
> > -xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
> > -  strerror(errno));
> > -}
> >  }
> >
> >  static void blk_parse_discard(struct XenBlkDev *blkdev)
> > @@ -1037,6 +1025,9 @@ static int blk_init(struct XenDevice *xendev)
> >!blkdev->feature_grant_copy);
> >  xenstore_write_be_int(>xendev, "info", info);
> >
> > +xenstore_write_be_int(>xendev, "max-ring-page-order",
> > +  MAX_RING_PAGE_ORDER);
> > +
> >  blk_parse_discard(blkdev);
> >
> >  g_free(directiosafe);
> > @@ -1058,12 +1049,25 @@ out_error:
> >  return -1;
> >  }
> >
> > +/*
> > + * We need to account for the grant 

Re: [Qemu-block] [PATCH 2/3] xen-disk: add support for multi-page shared rings

2017-06-20 Thread Stefano Stabellini
On Tue, 20 Jun 2017, Paul Durrant wrote:
> The blkif protocol has had provision for negotiation of multi-page shared
> rings for some time now and many guest OS have support in their frontend
> drivers.
> 
> This patch makes the necessary modifications to xen-disk support a shared
> ring up to order 4 (i.e. 16 pages).
> 
> Signed-off-by: Paul Durrant 

Thanks for the patch!

> ---
> Cc: Stefano Stabellini 
> Cc: Anthony Perard 
> Cc: Kevin Wolf 
> Cc: Max Reitz 
> ---
>  hw/block/xen_disk.c | 141 
> 
>  1 file changed, 110 insertions(+), 31 deletions(-)
> 
> diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> index 9b06e3aa81..a9942d32db 100644
> --- a/hw/block/xen_disk.c
> +++ b/hw/block/xen_disk.c
> @@ -36,8 +36,6 @@
>  
>  static int batch_maps   = 0;
>  
> -static int max_requests = 32;
> -
>  /* - */
>  
>  #define BLOCK_SIZE  512
> @@ -84,6 +82,8 @@ struct ioreq {
>  BlockAcctCookie acct;
>  };
>  
> +#define MAX_RING_PAGE_ORDER 4
> +
>  struct XenBlkDev {
>  struct XenDevicexendev;  /* must be first */
>  char*params;
> @@ -94,7 +94,8 @@ struct XenBlkDev {
>  booldirectiosafe;
>  const char  *fileproto;
>  const char  *filename;
> -int ring_ref;
> +unsigned intring_ref[1 << MAX_RING_PAGE_ORDER];
> +unsigned intnr_ring_ref;
>  void*sring;
>  int64_t file_blk;
>  int64_t file_size;
> @@ -110,6 +111,7 @@ struct XenBlkDev {
>  int requests_total;
>  int requests_inflight;
>  int requests_finished;
> +unsigned intmax_requests;
>  
>  /* Persistent grants extension */
>  gbooleanfeature_discard;
> @@ -199,7 +201,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
>  struct ioreq *ioreq = NULL;
>  
>  if (QLIST_EMPTY(>freelist)) {
> -if (blkdev->requests_total >= max_requests) {
> +if (blkdev->requests_total >= blkdev->max_requests) {
>  goto out;
>  }
>  /* allocate new struct */
> @@ -905,7 +907,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
>  ioreq_runio_qemu_aio(ioreq);
>  }
>  
> -if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
> +if (blkdev->more_work && blkdev->requests_inflight < 
> blkdev->max_requests) {
>  qemu_bh_schedule(blkdev->bh);
>  }
>  }
> @@ -918,15 +920,6 @@ static void blk_bh(void *opaque)
>  blk_handle_requests(blkdev);
>  }
>  
> -/*
> - * We need to account for the grant allocations requiring contiguous
> - * chunks; the worst case number would be
> - * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
> - * but in order to keep things simple just use
> - * 2 * max_req * max_seg.
> - */
> -#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
> -
>  static void blk_alloc(struct XenDevice *xendev)
>  {
>  struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, 
> xendev);
> @@ -938,11 +931,6 @@ static void blk_alloc(struct XenDevice *xendev)
>  if (xen_mode != XEN_EMULATE) {
>  batch_maps = 1;
>  }
> -if (xengnttab_set_max_grants(xendev->gnttabdev,
> -MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
> -xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
> -  strerror(errno));
> -}
>  }
>  
>  static void blk_parse_discard(struct XenBlkDev *blkdev)
> @@ -1037,6 +1025,9 @@ static int blk_init(struct XenDevice *xendev)
>!blkdev->feature_grant_copy);
>  xenstore_write_be_int(>xendev, "info", info);
>  
> +xenstore_write_be_int(>xendev, "max-ring-page-order",
> +  MAX_RING_PAGE_ORDER);
> +
>  blk_parse_discard(blkdev);
>  
>  g_free(directiosafe);
> @@ -1058,12 +1049,25 @@ out_error:
>  return -1;
>  }
>  
> +/*
> + * We need to account for the grant allocations requiring contiguous
> + * chunks; the worst case number would be
> + * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
> + * but in order to keep things simple just use
> + * 2 * max_req * max_seg.
> + */
> +#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
> +
>  static int blk_connect(struct XenDevice *xendev)
>  {
>  struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, 
> xendev);
>  int pers, index, qflags;
>  bool readonly = true;
>  bool writethrough = true;
> +int order, ring_ref;
> +unsigned int ring_size, max_grants;
> +unsigned int i;
> +uint32_t *domids;
>  
>  /* read-only ? */
>  if (blkdev->directiosafe) {
> 

[Qemu-block] [PATCH 2/3] xen-disk: add support for multi-page shared rings

2017-06-20 Thread Paul Durrant
The blkif protocol has had provision for negotiation of multi-page shared
rings for some time now and many guest OS have support in their frontend
drivers.

This patch makes the necessary modifications to xen-disk support a shared
ring up to order 4 (i.e. 16 pages).

Signed-off-by: Paul Durrant 
---
Cc: Stefano Stabellini 
Cc: Anthony Perard 
Cc: Kevin Wolf 
Cc: Max Reitz 
---
 hw/block/xen_disk.c | 141 
 1 file changed, 110 insertions(+), 31 deletions(-)

diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 9b06e3aa81..a9942d32db 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -36,8 +36,6 @@
 
 static int batch_maps   = 0;
 
-static int max_requests = 32;
-
 /* - */
 
 #define BLOCK_SIZE  512
@@ -84,6 +82,8 @@ struct ioreq {
 BlockAcctCookie acct;
 };
 
+#define MAX_RING_PAGE_ORDER 4
+
 struct XenBlkDev {
 struct XenDevicexendev;  /* must be first */
 char*params;
@@ -94,7 +94,8 @@ struct XenBlkDev {
 booldirectiosafe;
 const char  *fileproto;
 const char  *filename;
-int ring_ref;
+unsigned intring_ref[1 << MAX_RING_PAGE_ORDER];
+unsigned intnr_ring_ref;
 void*sring;
 int64_t file_blk;
 int64_t file_size;
@@ -110,6 +111,7 @@ struct XenBlkDev {
 int requests_total;
 int requests_inflight;
 int requests_finished;
+unsigned intmax_requests;
 
 /* Persistent grants extension */
 gbooleanfeature_discard;
@@ -199,7 +201,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
 struct ioreq *ioreq = NULL;
 
 if (QLIST_EMPTY(>freelist)) {
-if (blkdev->requests_total >= max_requests) {
+if (blkdev->requests_total >= blkdev->max_requests) {
 goto out;
 }
 /* allocate new struct */
@@ -905,7 +907,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
 ioreq_runio_qemu_aio(ioreq);
 }
 
-if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
+if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) 
{
 qemu_bh_schedule(blkdev->bh);
 }
 }
@@ -918,15 +920,6 @@ static void blk_bh(void *opaque)
 blk_handle_requests(blkdev);
 }
 
-/*
- * We need to account for the grant allocations requiring contiguous
- * chunks; the worst case number would be
- * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
- * but in order to keep things simple just use
- * 2 * max_req * max_seg.
- */
-#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
-
 static void blk_alloc(struct XenDevice *xendev)
 {
 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
@@ -938,11 +931,6 @@ static void blk_alloc(struct XenDevice *xendev)
 if (xen_mode != XEN_EMULATE) {
 batch_maps = 1;
 }
-if (xengnttab_set_max_grants(xendev->gnttabdev,
-MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
-xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
-  strerror(errno));
-}
 }
 
 static void blk_parse_discard(struct XenBlkDev *blkdev)
@@ -1037,6 +1025,9 @@ static int blk_init(struct XenDevice *xendev)
   !blkdev->feature_grant_copy);
 xenstore_write_be_int(>xendev, "info", info);
 
+xenstore_write_be_int(>xendev, "max-ring-page-order",
+  MAX_RING_PAGE_ORDER);
+
 blk_parse_discard(blkdev);
 
 g_free(directiosafe);
@@ -1058,12 +1049,25 @@ out_error:
 return -1;
 }
 
+/*
+ * We need to account for the grant allocations requiring contiguous
+ * chunks; the worst case number would be
+ * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
+ * but in order to keep things simple just use
+ * 2 * max_req * max_seg.
+ */
+#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
+
 static int blk_connect(struct XenDevice *xendev)
 {
 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
 int pers, index, qflags;
 bool readonly = true;
 bool writethrough = true;
+int order, ring_ref;
+unsigned int ring_size, max_grants;
+unsigned int i;
+uint32_t *domids;
 
 /* read-only ? */
 if (blkdev->directiosafe) {
@@ -1138,9 +1142,39 @@ static int blk_connect(struct XenDevice *xendev)
 xenstore_write_be_int64(>xendev, "sectors",
 blkdev->file_size / blkdev->file_blk);
 
-if (xenstore_read_fe_int(>xendev, "ring-ref", >ring_ref) 
== -1) {
+if (xenstore_read_fe_int(>xendev, "ring-page-order",
+ ) ==