Re: [PATCH v4 08/23] multifd: Move iov from pages to params

2022-01-27 Thread Dr. David Alan Gilbert
* Juan Quintela (quint...@redhat.com) wrote:
> "Dr. David Alan Gilbert"  wrote:
> > * Juan Quintela (quint...@redhat.com) wrote:
> >> This will allow us to reduce the number of system calls on the next patch.
> >> 
> >> Signed-off-by: Juan Quintela 
> >> ---
> >>  migration/multifd.h |  8 ++--
> >>  migration/multifd.c | 34 --
> >>  2 files changed, 30 insertions(+), 12 deletions(-)
> >> 
> >> diff --git a/migration/multifd.h b/migration/multifd.h
> >> index e57adc783b..c3f18af364 100644
> >> --- a/migration/multifd.h
> >> +++ b/migration/multifd.h
> >> @@ -62,8 +62,6 @@ typedef struct {
> >>  uint64_t packet_num;
> >>  /* offset of each page */
> >>  ram_addr_t *offset;
> >> -/* pointer to each page */
> >> -struct iovec *iov;
> >>  RAMBlock *block;
> >>  } MultiFDPages_t;
> >>  
> >> @@ -110,6 +108,10 @@ typedef struct {
> >>  uint64_t num_pages;
> >>  /* syncs main thread and channels */
> >>  QemuSemaphore sem_sync;
> >> +/* buffers to send */
> >> +struct iovec *iov;
> >> +/* number of iovs used */
> >> +uint32_t iovs_num;
> >>  /* used for compression methods */
> >>  void *data;
> >>  }  MultiFDSendParams;
> >> @@ -149,6 +151,8 @@ typedef struct {
> >>  uint64_t num_pages;
> >>  /* syncs main thread and channels */
> >>  QemuSemaphore sem_sync;
> >> +/* buffers to recv */
> >> +struct iovec *iov;
> >
> > Why is there the asymmetry between send and recv, where the send
> > has the iovs_num and the recv doesn't?
> 
> When we are sending data, we have the normal page and the iov, so it is
> normal_pages + 1.  On reception side, we have to read first the header,
> because that is where normal_pages is stored.
> 
> I can drop iovs_num on the send side and add a comment, but I think that
> the new variable is more descriptive.
> 
> Or I can add iovs_num to the recv_side and just do a iovs_num =
> normal_pages, but it seems a bit pointless, no?

OK, it would be great to add a comment; because it jumps out as a little
odd.


Reviewed-by: Dr. David Alan Gilbert 

> 
> Later, Juan.
> 
-- 
Dr. David Alan Gilbert / dgilb...@redhat.com / Manchester, UK




Re: [PATCH v4 08/23] multifd: Move iov from pages to params

2022-01-25 Thread Juan Quintela
"Dr. David Alan Gilbert"  wrote:
> * Juan Quintela (quint...@redhat.com) wrote:
>> This will allow us to reduce the number of system calls on the next patch.
>> 
>> Signed-off-by: Juan Quintela 
>> ---
>>  migration/multifd.h |  8 ++--
>>  migration/multifd.c | 34 --
>>  2 files changed, 30 insertions(+), 12 deletions(-)
>> 
>> diff --git a/migration/multifd.h b/migration/multifd.h
>> index e57adc783b..c3f18af364 100644
>> --- a/migration/multifd.h
>> +++ b/migration/multifd.h
>> @@ -62,8 +62,6 @@ typedef struct {
>>  uint64_t packet_num;
>>  /* offset of each page */
>>  ram_addr_t *offset;
>> -/* pointer to each page */
>> -struct iovec *iov;
>>  RAMBlock *block;
>>  } MultiFDPages_t;
>>  
>> @@ -110,6 +108,10 @@ typedef struct {
>>  uint64_t num_pages;
>>  /* syncs main thread and channels */
>>  QemuSemaphore sem_sync;
>> +/* buffers to send */
>> +struct iovec *iov;
>> +/* number of iovs used */
>> +uint32_t iovs_num;
>>  /* used for compression methods */
>>  void *data;
>>  }  MultiFDSendParams;
>> @@ -149,6 +151,8 @@ typedef struct {
>>  uint64_t num_pages;
>>  /* syncs main thread and channels */
>>  QemuSemaphore sem_sync;
>> +/* buffers to recv */
>> +struct iovec *iov;
>
> Why is there the asymmetry between send and recv, where the send
> has the iovs_num and the recv doesn't?

When we are sending data, we have the normal page and the iov, so it is
normal_pages + 1.  On reception side, we have to read first the header,
because that is where normal_pages is stored.

I can drop iovs_num on the send side and add a comment, but I think that
the new variable is more descriptive.

Or I can add iovs_num to the recv_side and just do a iovs_num =
normal_pages, but it seems a bit pointless, no?

Later, Juan.




Re: [PATCH v4 08/23] multifd: Move iov from pages to params

2022-01-18 Thread Dr. David Alan Gilbert
* Juan Quintela (quint...@redhat.com) wrote:
> This will allow us to reduce the number of system calls on the next patch.
> 
> Signed-off-by: Juan Quintela 
> ---
>  migration/multifd.h |  8 ++--
>  migration/multifd.c | 34 --
>  2 files changed, 30 insertions(+), 12 deletions(-)
> 
> diff --git a/migration/multifd.h b/migration/multifd.h
> index e57adc783b..c3f18af364 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -62,8 +62,6 @@ typedef struct {
>  uint64_t packet_num;
>  /* offset of each page */
>  ram_addr_t *offset;
> -/* pointer to each page */
> -struct iovec *iov;
>  RAMBlock *block;
>  } MultiFDPages_t;
>  
> @@ -110,6 +108,10 @@ typedef struct {
>  uint64_t num_pages;
>  /* syncs main thread and channels */
>  QemuSemaphore sem_sync;
> +/* buffers to send */
> +struct iovec *iov;
> +/* number of iovs used */
> +uint32_t iovs_num;
>  /* used for compression methods */
>  void *data;
>  }  MultiFDSendParams;
> @@ -149,6 +151,8 @@ typedef struct {
>  uint64_t num_pages;
>  /* syncs main thread and channels */
>  QemuSemaphore sem_sync;
> +/* buffers to recv */
> +struct iovec *iov;

Why is there the asymmetry between send and recv, where the send
has the iovs_num and the recv doesn't?

Dave

>  /* used for de-compression methods */
>  void *data;
>  } MultiFDRecvParams;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 4d62850258..f75bd3c188 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -86,7 +86,16 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, 
> Error **errp)
>   */
>  static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
>  {
> -p->next_packet_size = p->pages->num * qemu_target_page_size();
> +MultiFDPages_t *pages = p->pages;
> +size_t page_size = qemu_target_page_size();
> +
> +for (int i = 0; i < p->pages->num; i++) {
> +p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i];
> +p->iov[p->iovs_num].iov_len = page_size;
> +p->iovs_num++;
> +}
> +
> +p->next_packet_size = p->pages->num * page_size;
>  p->flags |= MULTIFD_FLAG_NOCOMP;
>  return 0;
>  }
> @@ -104,7 +113,7 @@ static int nocomp_send_prepare(MultiFDSendParams *p, 
> Error **errp)
>   */
>  static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error 
> **errp)
>  {
> -return qio_channel_writev_all(p->c, p->pages->iov, used, errp);
> +return qio_channel_writev_all(p->c, p->iov, p->iovs_num, errp);
>  }
>  
>  /**
> @@ -146,13 +155,18 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p)
>  static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
>  {
>  uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
> +size_t page_size = qemu_target_page_size();
>  
>  if (flags != MULTIFD_FLAG_NOCOMP) {
>  error_setg(errp, "multifd %u: flags received %x flags expected %x",
> p->id, flags, MULTIFD_FLAG_NOCOMP);
>  return -1;
>  }
> -return qio_channel_readv_all(p->c, p->pages->iov, p->pages->num, errp);
> +for (int i = 0; i < p->pages->num; i++) {
> +p->iov[i].iov_base = p->pages->block->host + p->pages->offset[i];
> +p->iov[i].iov_len = page_size;
> +}
> +return qio_channel_readv_all(p->c, p->iov, p->pages->num, errp);
>  }
>  
>  static MultiFDMethods multifd_nocomp_ops = {
> @@ -242,7 +256,6 @@ static MultiFDPages_t *multifd_pages_init(size_t size)
>  MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
>  
>  pages->allocated = size;
> -pages->iov = g_new0(struct iovec, size);
>  pages->offset = g_new0(ram_addr_t, size);
>  
>  return pages;
> @@ -254,8 +267,6 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
>  pages->allocated = 0;
>  pages->packet_num = 0;
>  pages->block = NULL;
> -g_free(pages->iov);
> -pages->iov = NULL;
>  g_free(pages->offset);
>  pages->offset = NULL;
>  g_free(pages);
> @@ -365,8 +376,6 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams 
> *p, Error **errp)
>  return -1;
>  }
>  p->pages->offset[i] = offset;
> -p->pages->iov[i].iov_base = block->host + offset;
> -p->pages->iov[i].iov_len = page_size;
>  }
>  
>  return 0;
> @@ -470,8 +479,6 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, 
> ram_addr_t offset)
>  
>  if (pages->block == block) {
>  pages->offset[pages->num] = offset;
> -pages->iov[pages->num].iov_base = block->host + offset;
> -pages->iov[pages->num].iov_len = qemu_target_page_size();
>  pages->num++;
>  
>  if (pages->num < pages->allocated) {
> @@ -567,6 +574,8 @@ void multifd_save_cleanup(void)
>  p->packet_len = 0;
>  g_free(p->packet);
>  p->packet = NULL;
> +g_free(p->iov);
> +p->iov = NULL;
>  

[PATCH v4 08/23] multifd: Move iov from pages to params

2022-01-11 Thread Juan Quintela
This will allow us to reduce the number of system calls on the next patch.

Signed-off-by: Juan Quintela 
---
 migration/multifd.h |  8 ++--
 migration/multifd.c | 34 --
 2 files changed, 30 insertions(+), 12 deletions(-)

diff --git a/migration/multifd.h b/migration/multifd.h
index e57adc783b..c3f18af364 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -62,8 +62,6 @@ typedef struct {
 uint64_t packet_num;
 /* offset of each page */
 ram_addr_t *offset;
-/* pointer to each page */
-struct iovec *iov;
 RAMBlock *block;
 } MultiFDPages_t;
 
@@ -110,6 +108,10 @@ typedef struct {
 uint64_t num_pages;
 /* syncs main thread and channels */
 QemuSemaphore sem_sync;
+/* buffers to send */
+struct iovec *iov;
+/* number of iovs used */
+uint32_t iovs_num;
 /* used for compression methods */
 void *data;
 }  MultiFDSendParams;
@@ -149,6 +151,8 @@ typedef struct {
 uint64_t num_pages;
 /* syncs main thread and channels */
 QemuSemaphore sem_sync;
+/* buffers to recv */
+struct iovec *iov;
 /* used for de-compression methods */
 void *data;
 } MultiFDRecvParams;
diff --git a/migration/multifd.c b/migration/multifd.c
index 4d62850258..f75bd3c188 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -86,7 +86,16 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error 
**errp)
  */
 static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
 {
-p->next_packet_size = p->pages->num * qemu_target_page_size();
+MultiFDPages_t *pages = p->pages;
+size_t page_size = qemu_target_page_size();
+
+for (int i = 0; i < p->pages->num; i++) {
+p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i];
+p->iov[p->iovs_num].iov_len = page_size;
+p->iovs_num++;
+}
+
+p->next_packet_size = p->pages->num * page_size;
 p->flags |= MULTIFD_FLAG_NOCOMP;
 return 0;
 }
@@ -104,7 +113,7 @@ static int nocomp_send_prepare(MultiFDSendParams *p, Error 
**errp)
  */
 static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
 {
-return qio_channel_writev_all(p->c, p->pages->iov, used, errp);
+return qio_channel_writev_all(p->c, p->iov, p->iovs_num, errp);
 }
 
 /**
@@ -146,13 +155,18 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p)
 static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
 {
 uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
+size_t page_size = qemu_target_page_size();
 
 if (flags != MULTIFD_FLAG_NOCOMP) {
 error_setg(errp, "multifd %u: flags received %x flags expected %x",
p->id, flags, MULTIFD_FLAG_NOCOMP);
 return -1;
 }
-return qio_channel_readv_all(p->c, p->pages->iov, p->pages->num, errp);
+for (int i = 0; i < p->pages->num; i++) {
+p->iov[i].iov_base = p->pages->block->host + p->pages->offset[i];
+p->iov[i].iov_len = page_size;
+}
+return qio_channel_readv_all(p->c, p->iov, p->pages->num, errp);
 }
 
 static MultiFDMethods multifd_nocomp_ops = {
@@ -242,7 +256,6 @@ static MultiFDPages_t *multifd_pages_init(size_t size)
 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
 
 pages->allocated = size;
-pages->iov = g_new0(struct iovec, size);
 pages->offset = g_new0(ram_addr_t, size);
 
 return pages;
@@ -254,8 +267,6 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
 pages->allocated = 0;
 pages->packet_num = 0;
 pages->block = NULL;
-g_free(pages->iov);
-pages->iov = NULL;
 g_free(pages->offset);
 pages->offset = NULL;
 g_free(pages);
@@ -365,8 +376,6 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, 
Error **errp)
 return -1;
 }
 p->pages->offset[i] = offset;
-p->pages->iov[i].iov_base = block->host + offset;
-p->pages->iov[i].iov_len = page_size;
 }
 
 return 0;
@@ -470,8 +479,6 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, 
ram_addr_t offset)
 
 if (pages->block == block) {
 pages->offset[pages->num] = offset;
-pages->iov[pages->num].iov_base = block->host + offset;
-pages->iov[pages->num].iov_len = qemu_target_page_size();
 pages->num++;
 
 if (pages->num < pages->allocated) {
@@ -567,6 +574,8 @@ void multifd_save_cleanup(void)
 p->packet_len = 0;
 g_free(p->packet);
 p->packet = NULL;
+g_free(p->iov);
+p->iov = NULL;
 multifd_send_state->ops->send_cleanup(p, _err);
 if (local_err) {
 migrate_set_error(migrate_get_current(), local_err);
@@ -654,6 +663,7 @@ static void *multifd_send_thread(void *opaque)
 uint32_t used = p->pages->num;
 uint64_t packet_num = p->packet_num;
 uint32_t flags = p->flags;
+p->iovs_num = 0;
 
 if (used) {
 ret =