On 12/11/2014 09:17 AM, Vladimir Sementsov-Ogievskiy wrote:
Add blk_create and blk_free to remove code duplicates. Otherwise,
duplicates will rise in the following patches because of BlkMigBlock
sturcture extendin.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsement...@parallels.com>
---
  block-migration.c | 56 +++++++++++++++++++++++++++++--------------------------
  1 file changed, 30 insertions(+), 26 deletions(-)

diff --git a/block-migration.c b/block-migration.c
index 5b4aa0f..d0c825f 100644
--- a/block-migration.c
+++ b/block-migration.c
@@ -113,6 +113,30 @@ static void blk_mig_unlock(void)
      qemu_mutex_unlock(&block_mig_state.lock);
  }

+/* Only allocating and initializing structure fields, not copying any data. */
+
+static BlkMigBlock *blk_create(BlkMigDevState *bmds, int64_t sector,
+                                int nr_sectors)
+{
+    BlkMigBlock *blk = g_new(BlkMigBlock, 1);
+    blk->buf = g_malloc(BLOCK_SIZE);
+    blk->bmds = bmds;
+    blk->sector = sector;
+    blk->nr_sectors = nr_sectors;
+
+    blk->iov.iov_base = blk->buf;
+    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
+    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
+
+    return blk;
+}
+
+static void blk_free(BlkMigBlock *blk)
+{
+    g_free(blk->buf);
+    g_free(blk);
+}
+
  /* Must run outside of the iothread lock during the bulk phase,
   * or the VM will stall.
   */
@@ -285,15 +309,7 @@ static int mig_save_device_bulk(QEMUFile *f, 
BlkMigDevState *bmds)
          nr_sectors = total_sectors - cur_sector;
      }

-    blk = g_new(BlkMigBlock, 1);
-    blk->buf = g_malloc(BLOCK_SIZE);
-    blk->bmds = bmds;
-    blk->sector = cur_sector;
-    blk->nr_sectors = nr_sectors;
-
-    blk->iov.iov_base = blk->buf;
-    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
-    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
+    blk = blk_create(bmds, cur_sector, nr_sectors);

      blk_mig_lock();
      block_mig_state.submitted++;
@@ -467,17 +483,9 @@ static int mig_save_device_dirty(QEMUFile *f, 
BlkMigDevState *bmds,
              } else {
                  nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
              }
-            blk = g_new(BlkMigBlock, 1);
-            blk->buf = g_malloc(BLOCK_SIZE);
-            blk->bmds = bmds;
-            blk->sector = sector;
-            blk->nr_sectors = nr_sectors;
+            blk = blk_create(bmds, sector, nr_sectors);

              if (is_async) {
-                blk->iov.iov_base = blk->buf;
-                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
-                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
-

I suppose in the (!is_async) branch we don't reference iov/qiov again, but the functional difference caught my eye. It used to only be called under the "is_async" branch, but now is going to be executed unconditionally.

Is that fine?

                  blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
                                              nr_sectors, blk_mig_read_cb, blk);

@@ -492,8 +500,7 @@ static int mig_save_device_dirty(QEMUFile *f, 
BlkMigDevState *bmds,
                  }
                  blk_send(f, blk);

-                g_free(blk->buf);
-                g_free(blk);
+                blk_free(blk);
              }

              bdrv_reset_dirty_bitmap(bmds->bs, bmds->dirty_bitmap, sector,
@@ -508,8 +515,7 @@ static int mig_save_device_dirty(QEMUFile *f, 
BlkMigDevState *bmds,

  error:
      DPRINTF("Error reading sector %" PRId64 "\n", sector);
-    g_free(blk->buf);
-    g_free(blk);
+    blk_free(blk);
      return ret;
  }

@@ -560,8 +566,7 @@ static int flush_blks(QEMUFile *f)
          blk_send(f, blk);
          blk_mig_lock();

-        g_free(blk->buf);
-        g_free(blk);
+        blk_free(blk);

          block_mig_state.read_done--;
          block_mig_state.transferred++;
@@ -612,8 +617,7 @@ static void blk_mig_cleanup(void)

      while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
          QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
-        g_free(blk->buf);
-        g_free(blk);
+        blk_free(blk);
      }
      blk_mig_unlock();
  }


Reviewed-by: John Snow <js...@redhat.com>

Reply via email to