Re: [Qemu-devel] [PATCH v4 17/21] backup: Switch backup_do_cow() to byte-based

2017-07-06 Thread Kevin Wolf
Am 05.07.2017 um 23:08 hat Eric Blake geschrieben:
> We are gradually converting to byte-based interfaces, as they are
> easier to reason about than sector-based.  Convert another internal
> function (no semantic change).
> 
> Signed-off-by: Eric Blake 
> Reviewed-by: John Snow 
> Reviewed-by: Jeff Cody 

Reviewed-by: Kevin Wolf 



[Qemu-devel] [PATCH v4 17/21] backup: Switch backup_do_cow() to byte-based

2017-07-05 Thread Eric Blake
We are gradually converting to byte-based interfaces, as they are
easier to reason about than sector-based.  Convert another internal
function (no semantic change).

Signed-off-by: Eric Blake 
Reviewed-by: John Snow 
Reviewed-by: Jeff Cody 

---
v2-v4: no change
---
 block/backup.c | 62 --
 1 file changed, 26 insertions(+), 36 deletions(-)

diff --git a/block/backup.c b/block/backup.c
index cfbd921..c029d44 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -91,7 +91,7 @@ static void cow_request_end(CowRequest *req)
 }

 static int coroutine_fn backup_do_cow(BackupBlockJob *job,
-  int64_t sector_num, int nb_sectors,
+  int64_t offset, uint64_t bytes,
   bool *error_is_read,
   bool is_write_notifier)
 {
@@ -101,34 +101,28 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
 QEMUIOVector bounce_qiov;
 void *bounce_buffer = NULL;
 int ret = 0;
-int64_t sectors_per_cluster = cluster_size_sectors(job);
-int64_t start, end; /* clusters */
+int64_t start, end; /* bytes */
 int n; /* bytes */

 qemu_co_rwlock_rdlock(>flush_rwlock);

-start = sector_num / sectors_per_cluster;
-end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
+end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);

-trace_backup_do_cow_enter(job, start * job->cluster_size,
-  sector_num * BDRV_SECTOR_SIZE,
-  nb_sectors * BDRV_SECTOR_SIZE);
+trace_backup_do_cow_enter(job, start, offset, bytes);

-wait_for_overlapping_requests(job, start * job->cluster_size,
-  end * job->cluster_size);
-cow_request_begin(_request, job, start * job->cluster_size,
-  end * job->cluster_size);
+wait_for_overlapping_requests(job, start, end);
+cow_request_begin(_request, job, start, end);

-for (; start < end; start++) {
-if (test_bit(start, job->done_bitmap)) {
-trace_backup_do_cow_skip(job, start * job->cluster_size);
+for (; start < end; start += job->cluster_size) {
+if (test_bit(start / job->cluster_size, job->done_bitmap)) {
+trace_backup_do_cow_skip(job, start);
 continue; /* already copied */
 }

-trace_backup_do_cow_process(job, start * job->cluster_size);
+trace_backup_do_cow_process(job, start);

-n = MIN(job->cluster_size,
-job->common.len - start * job->cluster_size);
+n = MIN(job->cluster_size, job->common.len - start);

 if (!bounce_buffer) {
 bounce_buffer = blk_blockalign(blk, job->cluster_size);
@@ -137,11 +131,10 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
 iov.iov_len = n;
 qemu_iovec_init_external(_qiov, , 1);

-ret = blk_co_preadv(blk, start * job->cluster_size,
-bounce_qiov.size, _qiov,
+ret = blk_co_preadv(blk, start, bounce_qiov.size, _qiov,
 is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
 if (ret < 0) {
-trace_backup_do_cow_read_fail(job, start * job->cluster_size, ret);
+trace_backup_do_cow_read_fail(job, start, ret);
 if (error_is_read) {
 *error_is_read = true;
 }
@@ -149,22 +142,22 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
 }

 if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
-ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
+ret = blk_co_pwrite_zeroes(job->target, start,
bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
 } else {
-ret = blk_co_pwritev(job->target, start * job->cluster_size,
+ret = blk_co_pwritev(job->target, start,
  bounce_qiov.size, _qiov,
  job->compress ? BDRV_REQ_WRITE_COMPRESSED : 
0);
 }
 if (ret < 0) {
-trace_backup_do_cow_write_fail(job, start * job->cluster_size, 
ret);
+trace_backup_do_cow_write_fail(job, start, ret);
 if (error_is_read) {
 *error_is_read = false;
 }
 goto out;
 }

-set_bit(start, job->done_bitmap);
+set_bit(start / job->cluster_size, job->done_bitmap);

 /* Publish progress, guest I/O counts as progress too.  Note that the
  * offset field is an opaque progress value, it is not a disk offset.
@@ -180,8 +173,7 @@ out:

 cow_request_end(_request);

-trace_backup_do_cow_return(job, sector_num * BDRV_SECTOR_SIZE,
-