Re: [Qemu-block] [PATCH v6 10/21] qmp: Add support of dirty-bitmap sync mode for drive-backup

2015-04-22 Thread Eric Blake
On 04/17/2015 05:49 PM, John Snow wrote:
 For dirty-bitmap sync mode, the block job will iterate through the
 given dirty bitmap to decide if a sector needs backup (backup all the
 dirty clusters and skip clean ones), just as allocation conditions of
 top sync mode.
 
 Signed-off-by: Fam Zheng f...@redhat.com
 Signed-off-by: John Snow js...@redhat.com
 ---

Reviewed-by: Eric Blake ebl...@redhat.com

-- 
Eric Blake   eblake redhat com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature


[Qemu-block] [PATCH v6 10/21] qmp: Add support of dirty-bitmap sync mode for drive-backup

2015-04-17 Thread John Snow
For dirty-bitmap sync mode, the block job will iterate through the
given dirty bitmap to decide if a sector needs backup (backup all the
dirty clusters and skip clean ones), just as allocation conditions of
top sync mode.

Signed-off-by: Fam Zheng f...@redhat.com
Signed-off-by: John Snow js...@redhat.com
---
 block.c   |   9 +++
 block/backup.c| 155 +++---
 block/mirror.c|   4 ++
 blockdev.c|  18 +-
 hmp.c |   3 +-
 include/block/block.h |   1 +
 include/block/block_int.h |   2 +
 qapi/block-core.json  |  14 +++--
 qmp-commands.hx   |   8 ++-
 9 files changed, 181 insertions(+), 33 deletions(-)

diff --git a/block.c b/block.c
index 8f08b6e..185cd7f 100644
--- a/block.c
+++ b/block.c
@@ -5717,6 +5717,15 @@ static void bdrv_reset_dirty(BlockDriverState *bs, 
int64_t cur_sector,
 }
 }
 
+/**
+ * Advance an HBitmapIter to an arbitrary offset.
+ */
+void bdrv_set_dirty_iter(HBitmapIter *hbi, int64_t offset)
+{
+assert(hbi-hb);
+hbitmap_iter_init(hbi, hbi-hb, offset);
+}
+
 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
 {
 return hbitmap_count(bitmap-bitmap);
diff --git a/block/backup.c b/block/backup.c
index 1c535b1..e77f7e8 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -37,6 +37,8 @@ typedef struct CowRequest {
 typedef struct BackupBlockJob {
 BlockJob common;
 BlockDriverState *target;
+/* bitmap for sync=dirty-bitmap */
+BdrvDirtyBitmap *sync_bitmap;
 MirrorSyncMode sync_mode;
 RateLimit limit;
 BlockdevOnError on_source_error;
@@ -242,6 +244,91 @@ static void backup_complete(BlockJob *job, void *opaque)
 g_free(data);
 }
 
+static bool coroutine_fn yield_and_check(BackupBlockJob *job)
+{
+if (block_job_is_cancelled(job-common)) {
+return true;
+}
+
+/* we need to yield so that qemu_aio_flush() returns.
+ * (without, VM does not reboot)
+ */
+if (job-common.speed) {
+uint64_t delay_ns = ratelimit_calculate_delay(job-limit,
+  job-sectors_read);
+job-sectors_read = 0;
+block_job_sleep_ns(job-common, QEMU_CLOCK_REALTIME, delay_ns);
+} else {
+block_job_sleep_ns(job-common, QEMU_CLOCK_REALTIME, 0);
+}
+
+if (block_job_is_cancelled(job-common)) {
+return true;
+}
+
+return false;
+}
+
+static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
+{
+bool error_is_read;
+int ret = 0;
+int clusters_per_iter;
+uint32_t granularity;
+int64_t sector;
+int64_t cluster;
+int64_t end;
+int64_t last_cluster = -1;
+BlockDriverState *bs = job-common.bs;
+HBitmapIter hbi;
+
+granularity = bdrv_dirty_bitmap_granularity(job-sync_bitmap);
+clusters_per_iter = MAX((granularity / BACKUP_CLUSTER_SIZE), 1);
+bdrv_dirty_iter_init(bs, job-sync_bitmap, hbi);
+
+/* Find the next dirty sector(s) */
+while ((sector = hbitmap_iter_next(hbi)) != -1) {
+cluster = sector / BACKUP_SECTORS_PER_CLUSTER;
+
+/* Fake progress updates for any clusters we skipped */
+if (cluster != last_cluster + 1) {
+job-common.offset += ((cluster - last_cluster - 1) *
+   BACKUP_CLUSTER_SIZE);
+}
+
+for (end = cluster + clusters_per_iter; cluster  end; cluster++) {
+do {
+if (yield_and_check(job)) {
+return ret;
+}
+ret = backup_do_cow(bs, cluster * BACKUP_SECTORS_PER_CLUSTER,
+BACKUP_SECTORS_PER_CLUSTER, 
error_is_read);
+if ((ret  0) 
+backup_error_action(job, error_is_read, -ret) ==
+BLOCK_ERROR_ACTION_REPORT) {
+return ret;
+}
+} while (ret  0);
+}
+
+/* If the bitmap granularity is smaller than the backup granularity,
+ * we need to advance the iterator pointer to the next cluster. */
+if (granularity  BACKUP_CLUSTER_SIZE) {
+bdrv_set_dirty_iter(hbi, cluster * BACKUP_SECTORS_PER_CLUSTER);
+}
+
+last_cluster = cluster - 1;
+}
+
+/* Play some final catchup with the progress meter */
+end = DIV_ROUND_UP(job-common.len, BACKUP_CLUSTER_SIZE);
+if (last_cluster + 1  end) {
+job-common.offset += ((end - last_cluster - 1) * BACKUP_CLUSTER_SIZE);
+}
+
+return ret;
+}
+
 static void coroutine_fn backup_run(void *opaque)
 {
 BackupBlockJob *job = opaque;
@@ -259,8 +346,7 @@ static void coroutine_fn backup_run(void *opaque)
 qemu_co_rwlock_init(job-flush_rwlock);
 
 start = 0;
-end = DIV_ROUND_UP(job-common.len / BDRV_SECTOR_SIZE,
-   BACKUP_SECTORS_PER_CLUSTER);
+end = DIV_ROUND_UP(job-common.len,