Re: [Qemu-devel] [PATCH RFC 3/4] Curling: the sender

2013-09-11 Thread junqing . wang
hi,


 +bool create = false;
 This variable is never set.

It is set in the following 'if' block.
 +create = true;===

 -migration_bitmap = bitmap_new(ram_pages);
 -bitmap_set(migration_bitmap, 0, ram_pages);
 -migration_dirty_pages = ram_pages;
 +if (!ft_enabled() || !migration_bitmap)  {
 +migration_bitmap = bitmap_new(ram_pages);
 +bitmap_set(migration_bitmap, 0, ram_pages);
 +migration_dirty_pages = ram_pages;
 +create = true;   ==
 +}

Nothing in this patch sets the migration_bitmap to anything.

Let me explain all the odd 'if'  block:
1  +if (!ft_enabled() || !migration_bitmap)  {
2  +if (!ft_enabled() || create) {
3  +if (!ft_enabled()) {

As I mentioned in the commit log: 
 We need to handle the variables related to live migration very
 carefully. So the new migration does not restart from the very
 begin of the migration, instead, it continues the previous
 migration.

Some variables should not be reset after one migration, because
the next one need these variables to continue the migration.
This explains all the if ft_enabled()

Besides, some variables need to be initialized at the first migration of 
curling.
That explains the if create and if  !migration_bitmap

 +if (ft_enabled()) {
 +if (old_vm_running) {
 +qemu_mutex_lock_iothread();
 +vm_start();
 +qemu_mutex_unlock_iothread();
 +
 +current_time = 
 qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 +time_spent = current_time - migration_start_time;
 +DPRINTF(this migration lasts for % PRId64 ms\n,
 +time_spent);
 +if (time_spent  time_window) {
 +g_usleep((time_window - time_spent)*1000);

Why are we waiting here?  If we are migration faster than allowed,  why
we are waiting?

Looping fast is not good, that means we enter iothread lock and do vm stop more 
frequently. The performance will drop and vm user will experience input stall 
if we do not sleep.

How to deal with this is a difficult issue, any suggestion is welcomed.

THIS IS ONE OF THE TWO MAIN PROBLEMS.  The other one is related to the magic 
number 0xfeedcafe.




Re: [Qemu-devel] [PATCH RFC 3/4] Curling: the sender

2013-09-10 Thread Juan Quintela
Jules Wang junqing.w...@cs2c.com.cn wrote:
 By leveraging live migration feature, the sender simply starts a
 new migration when the previous migration is completed.

 We need to handle the variables related to live migration very
 carefully. So the new migration does not restart from the very
 begin of the migration, instead, it continues the previous
 migration.

 Signed-off-by: Jules Wang junqing.w...@cs2c.com.cn
 ---
  arch_init.c | 18 +-
  migration.c | 23 ++-
  savevm.c|  4 
  3 files changed, 39 insertions(+), 6 deletions(-)

 diff --git a/arch_init.c b/arch_init.c
 index e47e139..5d006f6 100644
 --- a/arch_init.c
 +++ b/arch_init.c
 @@ -611,10 +611,14 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
  {
  RAMBlock *block;
  int64_t ram_pages = last_ram_offset()  TARGET_PAGE_BITS;
 +bool create = false;

This variable is never set.

  
 -migration_bitmap = bitmap_new(ram_pages);
 -bitmap_set(migration_bitmap, 0, ram_pages);
 -migration_dirty_pages = ram_pages;
 +if (!ft_enabled() || !migration_bitmap)  {
 +migration_bitmap = bitmap_new(ram_pages);

Nothing in this patch sets the migration_bitmap to anything.


 +bitmap_set(migration_bitmap, 0, ram_pages);
 +migration_dirty_pages = ram_pages;
 +create = true;
 +}
  mig_throttle_on = false;
  dirty_rate_high_cnt = 0;



 @@ -634,7 +638,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
  qemu_mutex_lock_iothread();
  qemu_mutex_lock_ramlist();
  bytes_transferred = 0;
 -reset_ram_globals();
 +if (!ft_enabled() || create) {
 +reset_ram_globals();
 +}
  
  memory_global_dirty_log_start();
  migration_bitmap_sync();
 @@ -744,7 +750,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
  }
  
  ram_control_after_iterate(f, RAM_CONTROL_FINISH);
 -migration_end();
 +if (!ft_enabled()) {
 +migration_end();
 +}

What you want here?  My guess is that you want to sent device state
without sending the end of migration command,  right?


  qemu_mutex_unlock_ramlist();
  qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
 diff --git a/migration.c b/migration.c
 index 59c8f32..d8a9b2d 100644
 --- a/migration.c
 +++ b/migration.c
 @@ -567,6 +567,7 @@ static void *migration_thread(void *opaque)
  int64_t max_size = 0;
  int64_t start_time = initial_time;
  bool old_vm_running = false;
 +int  time_window = 100;
  
  DPRINTF(beginning savevm\n);
  qemu_savevm_state_begin(s-file, s-params);
 @@ -578,6 +579,8 @@ static void *migration_thread(void *opaque)
  
  while (s-state == MIG_STATE_ACTIVE) {
  int64_t current_time;
 +int64_t time_spent;
 +int64_t migration_start_time = 
 qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  uint64_t pending_size;
  
  if (!qemu_file_rate_limit(s-file)) {
 @@ -607,10 +610,28 @@ static void *migration_thread(void *opaque)
  break;
  }
  
 -if (!qemu_file_get_error(s-file)) {
 +if (!qemu_file_get_error(s-file)  !ft_enabled()) {
  migrate_set_state(s, MIG_STATE_ACTIVE, 
 MIG_STATE_COMPLETED);
  break;
  }
 +
 +if (ft_enabled()) {
 +if (old_vm_running) {
 +qemu_mutex_lock_iothread();
 +vm_start();
 +qemu_mutex_unlock_iothread();
 +
 +current_time = 
 qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 +time_spent = current_time - migration_start_time;
 +DPRINTF(this migration lasts for % PRId64 ms\n,
 +time_spent);
 +if (time_spent  time_window) {
 +g_usleep((time_window - time_spent)*1000);

Why are we waiting here?  If we are migration faster than allowed,  why
we are waiting?

 +initial_time += time_window - time_spent;
 +}
 +}
 +qemu_savevm_state_begin(s-file, s-params);
 +}
  }
  }
  
 diff --git a/savevm.c b/savevm.c
 index c536aa4..6daf690 100644
 --- a/savevm.c
 +++ b/savevm.c
 @@ -1824,6 +1824,7 @@ static void vmstate_save(QEMUFile *f, SaveStateEntry 
 *se)
  #define QEMU_VM_SECTION_END  0x03
  #define QEMU_VM_SECTION_FULL 0x04
  #define QEMU_VM_SUBSECTION   0x05
 +#define QEMU_VM_EOF_MAGIC0xFeedCafe
  
  bool qemu_savevm_state_blocked(Error **errp)
  {
 @@ -1983,6 +1984,9 @@ void qemu_savevm_state_complete(QEMUFile *f)
  }
  
  qemu_put_byte(f, QEMU_VM_EOF);
 +if (ft_enabled()) {
 +qemu_put_be32(f, QEMU_VM_EOF_MAGIC);
 +}
  qemu_fflush(f);
  }



[Qemu-devel] [PATCH RFC 3/4] Curling: the sender

2013-09-10 Thread Jules Wang
By leveraging live migration feature, the sender simply starts a
new migration when the previous migration is completed.

We need to handle the variables related to live migration very
carefully. So the new migration does not restart from the very
begin of the migration, instead, it continues the previous
migration.

Signed-off-by: Jules Wang junqing.w...@cs2c.com.cn
---
 arch_init.c | 18 +-
 migration.c | 23 ++-
 savevm.c|  4 
 3 files changed, 39 insertions(+), 6 deletions(-)

diff --git a/arch_init.c b/arch_init.c
index e47e139..5d006f6 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -611,10 +611,14 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
 {
 RAMBlock *block;
 int64_t ram_pages = last_ram_offset()  TARGET_PAGE_BITS;
+bool create = false;
 
-migration_bitmap = bitmap_new(ram_pages);
-bitmap_set(migration_bitmap, 0, ram_pages);
-migration_dirty_pages = ram_pages;
+if (!ft_enabled() || !migration_bitmap)  {
+migration_bitmap = bitmap_new(ram_pages);
+bitmap_set(migration_bitmap, 0, ram_pages);
+migration_dirty_pages = ram_pages;
+create = true;
+}
 mig_throttle_on = false;
 dirty_rate_high_cnt = 0;
 
@@ -634,7 +638,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
 qemu_mutex_lock_iothread();
 qemu_mutex_lock_ramlist();
 bytes_transferred = 0;
-reset_ram_globals();
+if (!ft_enabled() || create) {
+reset_ram_globals();
+}
 
 memory_global_dirty_log_start();
 migration_bitmap_sync();
@@ -744,7 +750,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
 }
 
 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
-migration_end();
+if (!ft_enabled()) {
+migration_end();
+}
 
 qemu_mutex_unlock_ramlist();
 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
diff --git a/migration.c b/migration.c
index 59c8f32..d8a9b2d 100644
--- a/migration.c
+++ b/migration.c
@@ -567,6 +567,7 @@ static void *migration_thread(void *opaque)
 int64_t max_size = 0;
 int64_t start_time = initial_time;
 bool old_vm_running = false;
+int  time_window = 100;
 
 DPRINTF(beginning savevm\n);
 qemu_savevm_state_begin(s-file, s-params);
@@ -578,6 +579,8 @@ static void *migration_thread(void *opaque)
 
 while (s-state == MIG_STATE_ACTIVE) {
 int64_t current_time;
+int64_t time_spent;
+int64_t migration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 uint64_t pending_size;
 
 if (!qemu_file_rate_limit(s-file)) {
@@ -607,10 +610,28 @@ static void *migration_thread(void *opaque)
 break;
 }
 
-if (!qemu_file_get_error(s-file)) {
+if (!qemu_file_get_error(s-file)  !ft_enabled()) {
 migrate_set_state(s, MIG_STATE_ACTIVE, 
MIG_STATE_COMPLETED);
 break;
 }
+
+if (ft_enabled()) {
+if (old_vm_running) {
+qemu_mutex_lock_iothread();
+vm_start();
+qemu_mutex_unlock_iothread();
+
+current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+time_spent = current_time - migration_start_time;
+DPRINTF(this migration lasts for % PRId64 ms\n,
+time_spent);
+if (time_spent  time_window) {
+g_usleep((time_window - time_spent)*1000);
+initial_time += time_window - time_spent;
+}
+}
+qemu_savevm_state_begin(s-file, s-params);
+}
 }
 }
 
diff --git a/savevm.c b/savevm.c
index c536aa4..6daf690 100644
--- a/savevm.c
+++ b/savevm.c
@@ -1824,6 +1824,7 @@ static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
 #define QEMU_VM_SECTION_END  0x03
 #define QEMU_VM_SECTION_FULL 0x04
 #define QEMU_VM_SUBSECTION   0x05
+#define QEMU_VM_EOF_MAGIC0xFeedCafe
 
 bool qemu_savevm_state_blocked(Error **errp)
 {
@@ -1983,6 +1984,9 @@ void qemu_savevm_state_complete(QEMUFile *f)
 }
 
 qemu_put_byte(f, QEMU_VM_EOF);
+if (ft_enabled()) {
+qemu_put_be32(f, QEMU_VM_EOF_MAGIC);
+}
 qemu_fflush(f);
 }
 
-- 
1.8.0.1