Re: [PULL 13/38] migration: Non multifd migration don't care about multifd flushes

2023-10-19 Thread Juan Quintela
Michael Tokarev  wrote:
> 17.10.2023 11:29, Juan Quintela:
>> RDMA was having trouble because
>> migrate_multifd_flush_after_each_section() can only be true or false,
>> but we don't want to send any flush when we are not in multifd
>> migration.
>> CC: Fabiano Rosas > Fixes: 294e5a4034e81 ("multifd: Only flush once each full round of memory")
>
> Is it worth to pick it up for stable-8.1?

Yeap, please.

Later, Juan.




Re: [PULL 13/38] migration: Non multifd migration don't care about multifd flushes

2023-10-19 Thread Michael Tokarev

17.10.2023 11:29, Juan Quintela:

RDMA was having trouble because
migrate_multifd_flush_after_each_section() can only be true or false,
but we don't want to send any flush when we are not in multifd
migration.

CC: Fabiano Rosas 

Is it worth to pick it up for stable-8.1?

/mjt



[PULL 13/38] migration: Non multifd migration don't care about multifd flushes

2023-10-17 Thread Juan Quintela
RDMA was having trouble because
migrate_multifd_flush_after_each_section() can only be true or false,
but we don't want to send any flush when we are not in multifd
migration.

CC: Fabiano Rosas 
Reviewed-by: Li Zhijian 
Reviewed-by: Peter Xu 
Signed-off-by: Juan Quintela 
Message-ID: <20231011205548.10571-2-quint...@redhat.com>
---
 migration/ram.c | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index d3d9c8b65b..acb8f95f00 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1395,7 +1395,8 @@ static int find_dirty_block(RAMState *rs, 
PageSearchStatus *pss)
 pss->page = 0;
 pss->block = QLIST_NEXT_RCU(pss->block, next);
 if (!pss->block) {
-if (!migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() &&
+!migrate_multifd_flush_after_each_section()) {
 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
 int ret = multifd_send_sync_main(f);
 if (ret < 0) {
@@ -3072,7 +3073,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
 return ret;
 }
 
-if (!migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
 }
 
@@ -3184,7 +3185,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
 out:
 if (ret >= 0
 && migration_is_setup_or_active(migrate_get_current()->state)) {
-if (migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
 ret = 
multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
 if (ret < 0) {
 return ret;
@@ -3261,7 +3262,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
 return ret;
 }
 
-if (!migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
 }
 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
@@ -3768,7 +3769,8 @@ int ram_load_postcopy(QEMUFile *f, int channel)
 break;
 case RAM_SAVE_FLAG_EOS:
 /* normal exit */
-if (migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() &&
+migrate_multifd_flush_after_each_section()) {
 multifd_recv_sync_main();
 }
 break;
@@ -4046,7 +4048,8 @@ static int ram_load_precopy(QEMUFile *f)
 break;
 case RAM_SAVE_FLAG_EOS:
 /* normal exit */
-if (migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() &&
+migrate_multifd_flush_after_each_section()) {
 multifd_recv_sync_main();
 }
 break;
-- 
2.41.0




[PULL 13/38] migration: Non multifd migration don't care about multifd flushes

2023-10-16 Thread Juan Quintela
RDMA was having trouble because
migrate_multifd_flush_after_each_section() can only be true or false,
but we don't want to send any flush when we are not in multifd
migration.

CC: Fabiano Rosas 
Reviewed-by: Li Zhijian 
Reviewed-by: Peter Xu 
Signed-off-by: Juan Quintela 
Message-ID: <20231011205548.10571-2-quint...@redhat.com>
---
 migration/ram.c | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index d3d9c8b65b..acb8f95f00 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1395,7 +1395,8 @@ static int find_dirty_block(RAMState *rs, 
PageSearchStatus *pss)
 pss->page = 0;
 pss->block = QLIST_NEXT_RCU(pss->block, next);
 if (!pss->block) {
-if (!migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() &&
+!migrate_multifd_flush_after_each_section()) {
 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
 int ret = multifd_send_sync_main(f);
 if (ret < 0) {
@@ -3072,7 +3073,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
 return ret;
 }
 
-if (!migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
 }
 
@@ -3184,7 +3185,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
 out:
 if (ret >= 0
 && migration_is_setup_or_active(migrate_get_current()->state)) {
-if (migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
 ret = 
multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
 if (ret < 0) {
 return ret;
@@ -3261,7 +3262,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
 return ret;
 }
 
-if (!migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
 }
 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
@@ -3768,7 +3769,8 @@ int ram_load_postcopy(QEMUFile *f, int channel)
 break;
 case RAM_SAVE_FLAG_EOS:
 /* normal exit */
-if (migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() &&
+migrate_multifd_flush_after_each_section()) {
 multifd_recv_sync_main();
 }
 break;
@@ -4046,7 +4048,8 @@ static int ram_load_precopy(QEMUFile *f)
 break;
 case RAM_SAVE_FLAG_EOS:
 /* normal exit */
-if (migrate_multifd_flush_after_each_section()) {
+if (migrate_multifd() &&
+migrate_multifd_flush_after_each_section()) {
 multifd_recv_sync_main();
 }
 break;
-- 
2.41.0