Re: [Xen-devel] [PULL 3/3] xen-disk: add support for multi-page shared rings

2017-07-26 Thread Olaf Hering
On Tue, Jun 27, Stefano Stabellini wrote:

> From: Paul Durrant 
> The blkif protocol has had provision for negotiation of multi-page shared
> rings for some time now and many guest OS have support in their frontend
> drivers.

> +++ b/hw/block/xen_disk.c

> +domids = g_malloc0_n(blkdev->nr_ring_ref, sizeof(uint32_t));

According to [1] g_malloc0_n requires at least glib-2.24. As a result
compilation of qemu-2.10 fails in SLE11, which has just glib-2.22.

Olaf

[1] https://developer.gnome.org/glib/stable/glib-Memory-Allocation.html


signature.asc
Description: PGP signature
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PULL 3/3] xen-disk: add support for multi-page shared rings

2017-06-27 Thread Stefano Stabellini
From: Paul Durrant 

The blkif protocol has had provision for negotiation of multi-page shared
rings for some time now and many guest OS have support in their frontend
drivers.

This patch makes the necessary modifications to xen-disk support a shared
ring up to order 4 (i.e. 16 pages).

Signed-off-by: Paul Durrant 
Signed-off-by: Stefano Stabellini 
Reviewed-by: Stefano Stabellini 
---
 hw/block/xen_disk.c | 144 +---
 1 file changed, 113 insertions(+), 31 deletions(-)

diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 8218741..d42ed70 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -36,8 +36,6 @@
 
 static int batch_maps   = 0;
 
-static int max_requests = 32;
-
 /* - */
 
 #define BLOCK_SIZE  512
@@ -84,6 +82,8 @@ struct ioreq {
 BlockAcctCookie acct;
 };
 
+#define MAX_RING_PAGE_ORDER 4
+
 struct XenBlkDev {
 struct XenDevicexendev;  /* must be first */
 char*params;
@@ -94,7 +94,8 @@ struct XenBlkDev {
 booldirectiosafe;
 const char  *fileproto;
 const char  *filename;
-int ring_ref;
+unsigned intring_ref[1 << MAX_RING_PAGE_ORDER];
+unsigned intnr_ring_ref;
 void*sring;
 int64_t file_blk;
 int64_t file_size;
@@ -110,6 +111,7 @@ struct XenBlkDev {
 int requests_total;
 int requests_inflight;
 int requests_finished;
+unsigned intmax_requests;
 
 /* Persistent grants extension */
 gbooleanfeature_discard;
@@ -199,7 +201,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
 struct ioreq *ioreq = NULL;
 
 if (QLIST_EMPTY(&blkdev->freelist)) {
-if (blkdev->requests_total >= max_requests) {
+if (blkdev->requests_total >= blkdev->max_requests) {
 goto out;
 }
 /* allocate new struct */
@@ -904,7 +906,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
 ioreq_runio_qemu_aio(ioreq);
 }
 
-if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
+if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) 
{
 qemu_bh_schedule(blkdev->bh);
 }
 }
@@ -917,15 +919,6 @@ static void blk_bh(void *opaque)
 blk_handle_requests(blkdev);
 }
 
-/*
- * We need to account for the grant allocations requiring contiguous
- * chunks; the worst case number would be
- * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
- * but in order to keep things simple just use
- * 2 * max_req * max_seg.
- */
-#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
-
 static void blk_alloc(struct XenDevice *xendev)
 {
 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
@@ -937,11 +930,6 @@ static void blk_alloc(struct XenDevice *xendev)
 if (xen_mode != XEN_EMULATE) {
 batch_maps = 1;
 }
-if (xengnttab_set_max_grants(xendev->gnttabdev,
-MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
-xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
-  strerror(errno));
-}
 }
 
 static void blk_parse_discard(struct XenBlkDev *blkdev)
@@ -1036,6 +1024,9 @@ static int blk_init(struct XenDevice *xendev)
   !blkdev->feature_grant_copy);
 xenstore_write_be_int(&blkdev->xendev, "info", info);
 
+xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order",
+  MAX_RING_PAGE_ORDER);
+
 blk_parse_discard(blkdev);
 
 g_free(directiosafe);
@@ -1057,12 +1048,25 @@ out_error:
 return -1;
 }
 
+/*
+ * We need to account for the grant allocations requiring contiguous
+ * chunks; the worst case number would be
+ * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
+ * but in order to keep things simple just use
+ * 2 * max_req * max_seg.
+ */
+#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
+
 static int blk_connect(struct XenDevice *xendev)
 {
 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
 int pers, index, qflags;
 bool readonly = true;
 bool writethrough = true;
+int order, ring_ref;
+unsigned int ring_size, max_grants;
+unsigned int i;
+uint32_t *domids;
 
 /* read-only ? */
 if (blkdev->directiosafe) {
@@ -1137,9 +1141,42 @@ static int blk_connect(struct XenDevice *xendev)
 xenstore_write_be_int64(&blkdev->xendev, "sectors",
 blkdev->file_size / blkdev->file_blk);
 
-if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) 
== -1) {
+if (xenstore_read_fe_int(&blkdev->xendev, "ring-page-order",
+ &order) == -1) {
+blkdev->nr_ring_ref = 1;
+
+