Re: [Qemu-devel] [PATCH v10 22/24] migration: Create ram_multifd_page
On Wed, Mar 07, 2018 at 12:00:08PM +0100, Juan Quintela wrote: > The function still don't use multifd, but we have simplified > ram_save_page, xbzrle and RDMA stuff is gone. We have added a new > counter. > > Signed-off-by: Juan Quintela > > -- > Add last_page parameter > Add commets for done and address > Remove multifd field, it is the same than normal pages > Merge next patch, now we send multiple pages at a time > Remove counter for multifd pages, it is identical to normal pages > Use iovec's instead of creating the equivalent. > Clear memory used by pages (dave) > Use g_new0(danp) > define MULTIFD_CONTINUE > now pages member is a pointer > Fix off-by-one in number of pages in one packet > Remove RAM_SAVE_FLAG_MULTIFD_PAGE > --- > migration/ram.c| 144 > - > migration/trace-events | 3 +- > 2 files changed, 144 insertions(+), 3 deletions(-) > > diff --git a/migration/ram.c b/migration/ram.c > index 4efac0c20c..df9646ed2e 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -54,6 +54,7 @@ > #include "migration/block.h" > #include "sysemu/sysemu.h" > #include "qemu/uuid.h" > +#include "qemu/iov.h" > > /***/ > /* ram save/restore */ > @@ -397,7 +398,19 @@ static void compress_threads_save_setup(void) > > /* Multiple fd's */ > > +typedef struct { > +/* number of used pages */ > +uint32_t used; > +/* number of allocated pages */ > +uint32_t allocated; > +/* global number of generated multifd packets */ > +uint32_t seq; > +struct iovec *iov; > +RAMBlock *block; > +} multifd_pages_t; > + > struct MultiFDSendParams { > +/* not changed */ > uint8_t id; > char *name; > QemuThread thread; > @@ -405,8 +418,15 @@ struct MultiFDSendParams { > QemuSemaphore sem; > QemuMutex mutex; > bool running; > +/* protected by param mutex */ > bool quit; > bool sync; > +multifd_pages_t *pages; > +/* how many patches has sent this channel */ > +uint32_t packets_sent; > +/* protected by multifd mutex */ > +/* has the thread finish the last submitted job */ > +bool done; > }; > typedef struct MultiFDSendParams MultiFDSendParams; > > @@ -416,8 +436,31 @@ struct { > int count; > /* syncs main thread and channels */ > QemuSemaphore sem_main; > +QemuMutex mutex; > +QemuSemaphore sem; > +multifd_pages_t *pages; > } *multifd_send_state; > > +static void multifd_pages_init(multifd_pages_t **ppages, size_t size) > +{ > +multifd_pages_t *pages = g_new0(multifd_pages_t, 1); > + > +pages->allocated = size; > +pages->iov = g_new0(struct iovec, size); > +*ppages = pages; > +} > + > +static void multifd_pages_clear(multifd_pages_t *pages) > +{ > +pages->used = 0; > +pages->allocated = 0; > +pages->seq = 0; > +pages->block = NULL; > +g_free(pages->iov); > +pages->iov = NULL; > +g_free(pages); > +} > + > static void terminate_multifd_send_threads(Error *errp) > { > int i; > @@ -464,10 +507,14 @@ int multifd_save_cleanup(Error **errp) > qemu_sem_destroy(&p->sem); > g_free(p->name); > p->name = NULL; > +multifd_pages_clear(p->pages); > +p->pages = NULL; > } > qemu_sem_destroy(&multifd_send_state->sem_main); > g_free(multifd_send_state->params); > multifd_send_state->params = NULL; > +multifd_pages_clear(multifd_send_state->pages); > +multifd_send_state->pages = NULL; > g_free(multifd_send_state); > multifd_send_state = NULL; > return ret; > @@ -516,6 +563,7 @@ static void *multifd_send_thread(void *opaque) > terminate_multifd_send_threads(local_err); > return NULL; > } > +qemu_sem_post(&multifd_send_state->sem); > > while (true) { > qemu_sem_wait(&p->sem); > @@ -530,9 +578,23 @@ static void *multifd_send_thread(void *opaque) > qemu_mutex_unlock(&p->mutex); > break; > } > +if (p->pages->used) { > +p->pages->used = 0; > +qemu_mutex_unlock(&p->mutex); > + > +trace_multifd_send(p->id, p->pages->seq, p->pages->used); > +/* ToDo: send page here */ > + > +qemu_mutex_lock(&multifd_send_state->mutex); > +p->done = true; > +p->packets_sent++; > +qemu_mutex_unlock(&multifd_send_state->mutex); > +qemu_sem_post(&multifd_send_state->sem); > +continue; > +} > qemu_mutex_unlock(&p->mutex); > } > -trace_multifd_send_thread_end(p->id); > +trace_multifd_send_thread_end(p->id, p->packets_sent); > > return NULL; > } > @@ -571,7 +633,10 @@ int multifd_save_setup(void) > multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); > atomic_set(&multifd_send_state->count, 0); > qemu_sem_init(&multifd_s
[Qemu-devel] [PATCH v10 22/24] migration: Create ram_multifd_page
The function still don't use multifd, but we have simplified ram_save_page, xbzrle and RDMA stuff is gone. We have added a new counter. Signed-off-by: Juan Quintela -- Add last_page parameter Add commets for done and address Remove multifd field, it is the same than normal pages Merge next patch, now we send multiple pages at a time Remove counter for multifd pages, it is identical to normal pages Use iovec's instead of creating the equivalent. Clear memory used by pages (dave) Use g_new0(danp) define MULTIFD_CONTINUE now pages member is a pointer Fix off-by-one in number of pages in one packet Remove RAM_SAVE_FLAG_MULTIFD_PAGE --- migration/ram.c| 144 - migration/trace-events | 3 +- 2 files changed, 144 insertions(+), 3 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 4efac0c20c..df9646ed2e 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -54,6 +54,7 @@ #include "migration/block.h" #include "sysemu/sysemu.h" #include "qemu/uuid.h" +#include "qemu/iov.h" /***/ /* ram save/restore */ @@ -397,7 +398,19 @@ static void compress_threads_save_setup(void) /* Multiple fd's */ +typedef struct { +/* number of used pages */ +uint32_t used; +/* number of allocated pages */ +uint32_t allocated; +/* global number of generated multifd packets */ +uint32_t seq; +struct iovec *iov; +RAMBlock *block; +} multifd_pages_t; + struct MultiFDSendParams { +/* not changed */ uint8_t id; char *name; QemuThread thread; @@ -405,8 +418,15 @@ struct MultiFDSendParams { QemuSemaphore sem; QemuMutex mutex; bool running; +/* protected by param mutex */ bool quit; bool sync; +multifd_pages_t *pages; +/* how many patches has sent this channel */ +uint32_t packets_sent; +/* protected by multifd mutex */ +/* has the thread finish the last submitted job */ +bool done; }; typedef struct MultiFDSendParams MultiFDSendParams; @@ -416,8 +436,31 @@ struct { int count; /* syncs main thread and channels */ QemuSemaphore sem_main; +QemuMutex mutex; +QemuSemaphore sem; +multifd_pages_t *pages; } *multifd_send_state; +static void multifd_pages_init(multifd_pages_t **ppages, size_t size) +{ +multifd_pages_t *pages = g_new0(multifd_pages_t, 1); + +pages->allocated = size; +pages->iov = g_new0(struct iovec, size); +*ppages = pages; +} + +static void multifd_pages_clear(multifd_pages_t *pages) +{ +pages->used = 0; +pages->allocated = 0; +pages->seq = 0; +pages->block = NULL; +g_free(pages->iov); +pages->iov = NULL; +g_free(pages); +} + static void terminate_multifd_send_threads(Error *errp) { int i; @@ -464,10 +507,14 @@ int multifd_save_cleanup(Error **errp) qemu_sem_destroy(&p->sem); g_free(p->name); p->name = NULL; +multifd_pages_clear(p->pages); +p->pages = NULL; } qemu_sem_destroy(&multifd_send_state->sem_main); g_free(multifd_send_state->params); multifd_send_state->params = NULL; +multifd_pages_clear(multifd_send_state->pages); +multifd_send_state->pages = NULL; g_free(multifd_send_state); multifd_send_state = NULL; return ret; @@ -516,6 +563,7 @@ static void *multifd_send_thread(void *opaque) terminate_multifd_send_threads(local_err); return NULL; } +qemu_sem_post(&multifd_send_state->sem); while (true) { qemu_sem_wait(&p->sem); @@ -530,9 +578,23 @@ static void *multifd_send_thread(void *opaque) qemu_mutex_unlock(&p->mutex); break; } +if (p->pages->used) { +p->pages->used = 0; +qemu_mutex_unlock(&p->mutex); + +trace_multifd_send(p->id, p->pages->seq, p->pages->used); +/* ToDo: send page here */ + +qemu_mutex_lock(&multifd_send_state->mutex); +p->done = true; +p->packets_sent++; +qemu_mutex_unlock(&multifd_send_state->mutex); +qemu_sem_post(&multifd_send_state->sem); +continue; +} qemu_mutex_unlock(&p->mutex); } -trace_multifd_send_thread_end(p->id); +trace_multifd_send_thread_end(p->id, p->packets_sent); return NULL; } @@ -571,7 +633,10 @@ int multifd_save_setup(void) multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); atomic_set(&multifd_send_state->count, 0); qemu_sem_init(&multifd_send_state->sem_main, 0); - +qemu_mutex_init(&multifd_send_state->mutex); +qemu_sem_init(&multifd_send_state->sem, 0); +multifd_pages_init(&multifd_send_state->pages, + migrate_multifd_page_count()); for (i = 0; i < thread_count; i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; @@ -579,6 +644,8 @@ int multifd_save_