Re: [Qemu-block] [PATCH v3 11/20] mirror: Switch mirror_do_read() to byte-based

2017-06-30 Thread Jeff Cody
On Tue, Jun 27, 2017 at 02:24:49PM -0500, Eric Blake wrote:
> We are gradually converting to byte-based interfaces, as they are
> easier to reason about than sector-based.  Convert another internal
> function (no semantic change).
> 
> Signed-off-by: Eric Blake 
> Reviewed-by: John Snow 
> 

Reviewed-by: Jeff Cody 

> ---
> v2: rebase to earlier changes
> ---
>  block/mirror.c | 75 
> ++
>  1 file changed, 33 insertions(+), 42 deletions(-)
> 
> diff --git a/block/mirror.c b/block/mirror.c
> index 1a4602a..81ff784 100644
> --- a/block/mirror.c
> +++ b/block/mirror.c
> @@ -196,7 +196,7 @@ static inline int mirror_clip_sectors(MirrorBlockJob *s,
>  /* Round offset and/or bytes to target cluster if COW is needed, and
>   * return the offset of the adjusted tail against original. */
>  static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
> -unsigned int *bytes)
> +uint64_t *bytes)
>  {
>  bool need_cow;
>  int ret = 0;
> @@ -204,6 +204,7 @@ static int mirror_cow_align(MirrorBlockJob *s, int64_t 
> *offset,
>  unsigned int align_bytes = *bytes;
>  int max_bytes = s->granularity * s->max_iov;
> 
> +assert(*bytes < INT_MAX);
>  need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
>  need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
>s->cow_bitmap);
> @@ -239,59 +240,50 @@ static inline void mirror_wait_for_io(MirrorBlockJob *s)
>  }
> 
>  /* Submit async read while handling COW.
> - * Returns: The number of sectors copied after and including sector_num,
> - *  excluding any sectors copied prior to sector_num due to 
> alignment.
> - *  This will be nb_sectors if no alignment is necessary, or
> - *  (new_end - sector_num) if tail is rounded up or down due to
> + * Returns: The number of bytes copied after and including offset,
> + *  excluding any bytes copied prior to offset due to alignment.
> + *  This will be @bytes if no alignment is necessary, or
> + *  (new_end - offset) if tail is rounded up or down due to
>   *  alignment or buffer limit.
>   */
> -static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
> -  int nb_sectors)
> +static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset,
> +   uint64_t bytes)
>  {
>  BlockBackend *source = s->common.blk;
> -int sectors_per_chunk, nb_chunks;
> -int ret;
> +int nb_chunks;
> +uint64_t ret;
>  MirrorOp *op;
> -int max_sectors;
> +uint64_t max_bytes;
> 
> -sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
> -max_sectors = sectors_per_chunk * s->max_iov;
> +max_bytes = s->granularity * s->max_iov;
> 
>  /* We can only handle as much as buf_size at a time. */
> -nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
> -nb_sectors = MIN(max_sectors, nb_sectors);
> -assert(nb_sectors);
> -assert(nb_sectors < BDRV_REQUEST_MAX_SECTORS);
> -ret = nb_sectors;
> +bytes = MIN(s->buf_size, MIN(max_bytes, bytes));
> +assert(bytes);
> +assert(bytes < BDRV_REQUEST_MAX_BYTES);
> +ret = bytes;
> 
>  if (s->cow_bitmap) {
> -int64_t offset = sector_num * BDRV_SECTOR_SIZE;
> -unsigned int bytes = nb_sectors * BDRV_SECTOR_SIZE;
> -int gap;
> -
> -gap = mirror_cow_align(s, , );
> -sector_num = offset / BDRV_SECTOR_SIZE;
> -nb_sectors = bytes / BDRV_SECTOR_SIZE;
> -ret += gap / BDRV_SECTOR_SIZE;
> +ret += mirror_cow_align(s, , );
>  }
> -assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
> -/* The sector range must meet granularity because:
> +assert(bytes <= s->buf_size);
> +/* The range will be sector-aligned because:
>   * 1) Caller passes in aligned values;
> - * 2) mirror_cow_align is used only when target cluster is larger. */
> -assert(!(sector_num % sectors_per_chunk));
> -nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
> + * 2) mirror_cow_align is used only when target cluster is larger.
> + * But it might not be cluster-aligned at end-of-file. */
> +assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
> +nb_chunks = DIV_ROUND_UP(bytes, s->granularity);
> 
>  while (s->buf_free_count < nb_chunks) {
> -trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
> - s->in_flight);
> +trace_mirror_yield_in_flight(s, offset, s->in_flight);
>  mirror_wait_for_io(s);
>  }
> 
>  /* Allocate a MirrorOp that is used as an AIO callback.  */
>  op = g_new(MirrorOp, 1);
>  op->s = s;
> -op->offset = sector_num * BDRV_SECTOR_SIZE;
> -op->bytes = nb_sectors * BDRV_SECTOR_SIZE;
> +op->offset = 

[Qemu-block] [PATCH v3 11/20] mirror: Switch mirror_do_read() to byte-based

2017-06-27 Thread Eric Blake
We are gradually converting to byte-based interfaces, as they are
easier to reason about than sector-based.  Convert another internal
function (no semantic change).

Signed-off-by: Eric Blake 
Reviewed-by: John Snow 

---
v2: rebase to earlier changes
---
 block/mirror.c | 75 ++
 1 file changed, 33 insertions(+), 42 deletions(-)

diff --git a/block/mirror.c b/block/mirror.c
index 1a4602a..81ff784 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -196,7 +196,7 @@ static inline int mirror_clip_sectors(MirrorBlockJob *s,
 /* Round offset and/or bytes to target cluster if COW is needed, and
  * return the offset of the adjusted tail against original. */
 static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
-unsigned int *bytes)
+uint64_t *bytes)
 {
 bool need_cow;
 int ret = 0;
@@ -204,6 +204,7 @@ static int mirror_cow_align(MirrorBlockJob *s, int64_t 
*offset,
 unsigned int align_bytes = *bytes;
 int max_bytes = s->granularity * s->max_iov;

+assert(*bytes < INT_MAX);
 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
   s->cow_bitmap);
@@ -239,59 +240,50 @@ static inline void mirror_wait_for_io(MirrorBlockJob *s)
 }

 /* Submit async read while handling COW.
- * Returns: The number of sectors copied after and including sector_num,
- *  excluding any sectors copied prior to sector_num due to alignment.
- *  This will be nb_sectors if no alignment is necessary, or
- *  (new_end - sector_num) if tail is rounded up or down due to
+ * Returns: The number of bytes copied after and including offset,
+ *  excluding any bytes copied prior to offset due to alignment.
+ *  This will be @bytes if no alignment is necessary, or
+ *  (new_end - offset) if tail is rounded up or down due to
  *  alignment or buffer limit.
  */
-static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
-  int nb_sectors)
+static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset,
+   uint64_t bytes)
 {
 BlockBackend *source = s->common.blk;
-int sectors_per_chunk, nb_chunks;
-int ret;
+int nb_chunks;
+uint64_t ret;
 MirrorOp *op;
-int max_sectors;
+uint64_t max_bytes;

-sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
-max_sectors = sectors_per_chunk * s->max_iov;
+max_bytes = s->granularity * s->max_iov;

 /* We can only handle as much as buf_size at a time. */
-nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
-nb_sectors = MIN(max_sectors, nb_sectors);
-assert(nb_sectors);
-assert(nb_sectors < BDRV_REQUEST_MAX_SECTORS);
-ret = nb_sectors;
+bytes = MIN(s->buf_size, MIN(max_bytes, bytes));
+assert(bytes);
+assert(bytes < BDRV_REQUEST_MAX_BYTES);
+ret = bytes;

 if (s->cow_bitmap) {
-int64_t offset = sector_num * BDRV_SECTOR_SIZE;
-unsigned int bytes = nb_sectors * BDRV_SECTOR_SIZE;
-int gap;
-
-gap = mirror_cow_align(s, , );
-sector_num = offset / BDRV_SECTOR_SIZE;
-nb_sectors = bytes / BDRV_SECTOR_SIZE;
-ret += gap / BDRV_SECTOR_SIZE;
+ret += mirror_cow_align(s, , );
 }
-assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
-/* The sector range must meet granularity because:
+assert(bytes <= s->buf_size);
+/* The range will be sector-aligned because:
  * 1) Caller passes in aligned values;
- * 2) mirror_cow_align is used only when target cluster is larger. */
-assert(!(sector_num % sectors_per_chunk));
-nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
+ * 2) mirror_cow_align is used only when target cluster is larger.
+ * But it might not be cluster-aligned at end-of-file. */
+assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
+nb_chunks = DIV_ROUND_UP(bytes, s->granularity);

 while (s->buf_free_count < nb_chunks) {
-trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
- s->in_flight);
+trace_mirror_yield_in_flight(s, offset, s->in_flight);
 mirror_wait_for_io(s);
 }

 /* Allocate a MirrorOp that is used as an AIO callback.  */
 op = g_new(MirrorOp, 1);
 op->s = s;
-op->offset = sector_num * BDRV_SECTOR_SIZE;
-op->bytes = nb_sectors * BDRV_SECTOR_SIZE;
+op->offset = offset;
+op->bytes = bytes;

 /* Now make a QEMUIOVector taking enough granularity-sized chunks
  * from s->buf_free.
@@ -299,7 +291,7 @@ static int mirror_do_read(MirrorBlockJob *s, int64_t 
sector_num,
 qemu_iovec_init(>qiov, nb_chunks);
 while (nb_chunks-- > 0) {
 MirrorBuffer *buf = QSIMPLEQ_FIRST(>buf_free);
-