Re: [Qemu-devel] [PATCH v6 16/19] migration: Test new fd infrastructure
On Tue, Aug 08, 2017 at 06:26:26PM +0200, Juan Quintela wrote: > We just send the address through the alternate channels and test that it > is ok. > > Signed-off-by: Juan Quintela> --- > migration/ram.c | 50 ++ > 1 file changed, 50 insertions(+) > > diff --git a/migration/ram.c b/migration/ram.c > index eb0015e..42ad126 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -479,8 +479,26 @@ static void *multifd_send_thread(void *opaque) > break; > } > if (p->pages.num) { > +int i; > +int num; > + > +num = p->pages.num; > p->pages.num = 0; > qemu_mutex_unlock(>mutex); > + > +for (i = 0; i < num; i++) { > +if (qio_channel_write(p->c, > + (const char > *)>pages.iov[i].iov_base, > + sizeof(uint8_t *), _abort) > +!= sizeof(uint8_t *)) { > +MigrationState *s = migrate_get_current(); > + > +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, > + MIGRATION_STATUS_FAILED); > +terminate_multifd_send_threads(); > +return NULL; > +} > +} > qemu_mutex_lock(_send_state->mutex); > p->done = true; > qemu_mutex_unlock(_send_state->mutex); > @@ -640,6 +658,7 @@ void multifd_load_cleanup(void) > static void *multifd_recv_thread(void *opaque) > { > MultiFDRecvParams *p = opaque; > +uint8_t *recv_address; > > qemu_sem_post(>ready); > while (true) { > @@ -649,7 +668,38 @@ static void *multifd_recv_thread(void *opaque) > break; > } > if (p->pages.num) { > +int i; > +int num; > + > +num = p->pages.num; > p->pages.num = 0; > + > +for (i = 0; i < num; i++) { > +if (qio_channel_read(p->c, > + (char *)_address, > + sizeof(uint8_t *), _abort) > +!= sizeof(uint8_t *)) { > +MigrationState *s = migrate_get_current(); > + > +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, > + MIGRATION_STATUS_FAILED); > +terminate_multifd_recv_threads(); > +return NULL; > +} > +if (recv_address != p->pages.iov[i].iov_base) { > +MigrationState *s = migrate_get_current(); > + > +printf("We received %p what we were expecting %p (%d)\n", > + recv_address, > + p->pages.iov[i].iov_base, i); > + > +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, > + MIGRATION_STATUS_FAILED); > +terminate_multifd_recv_threads(); > +return NULL; > +} > +} > + > p->done = true; > qemu_mutex_unlock(>mutex); > qemu_sem_post(>ready); Looks like all this code is deleting in patch 18. So IMHO just drop this patch entirely. Regards, Daniel -- |: https://berrange.com -o-https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o-https://fstop138.berrange.com :| |: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|
Re: [Qemu-devel] [PATCH v6 16/19] migration: Test new fd infrastructure
On Tue, Aug 08, 2017 at 06:26:26PM +0200, Juan Quintela wrote: > We just send the address through the alternate channels and test that it > is ok. > > Signed-off-by: Juan Quintela> --- > migration/ram.c | 50 ++ > 1 file changed, 50 insertions(+) > > diff --git a/migration/ram.c b/migration/ram.c > index eb0015e..42ad126 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -479,8 +479,26 @@ static void *multifd_send_thread(void *opaque) > break; > } > if (p->pages.num) { > +int i; > +int num; > + > +num = p->pages.num; > p->pages.num = 0; > qemu_mutex_unlock(>mutex); > + > +for (i = 0; i < num; i++) { > +if (qio_channel_write(p->c, > + (const char > *)>pages.iov[i].iov_base, > + sizeof(uint8_t *), _abort) > +!= sizeof(uint8_t *)) { Must not use error_abort - this kils the entire VM if we hit an I/O error. Treating short-writes a fatal is also really not desirable. > +MigrationState *s = migrate_get_current(); > + > +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, > + MIGRATION_STATUS_FAILED); > +terminate_multifd_send_threads(); > +return NULL; > +} > +} > qemu_mutex_lock(_send_state->mutex); > p->done = true; > qemu_mutex_unlock(_send_state->mutex); > @@ -640,6 +658,7 @@ void multifd_load_cleanup(void) > static void *multifd_recv_thread(void *opaque) > { > MultiFDRecvParams *p = opaque; > +uint8_t *recv_address; > > qemu_sem_post(>ready); > while (true) { > @@ -649,7 +668,38 @@ static void *multifd_recv_thread(void *opaque) > break; > } > if (p->pages.num) { > +int i; > +int num; > + > +num = p->pages.num; > p->pages.num = 0; > + > +for (i = 0; i < num; i++) { > +if (qio_channel_read(p->c, > + (char *)_address, > + sizeof(uint8_t *), _abort) > +!= sizeof(uint8_t *)) { Again, don't use error_abort, and you should handle short reads. > +MigrationState *s = migrate_get_current(); > + > +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, > + MIGRATION_STATUS_FAILED); > +terminate_multifd_recv_threads(); > +return NULL; > +} > +if (recv_address != p->pages.iov[i].iov_base) { > +MigrationState *s = migrate_get_current(); > + > +printf("We received %p what we were expecting %p (%d)\n", > + recv_address, > + p->pages.iov[i].iov_base, i); Should use a trace event probe for this. > + > +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, > + MIGRATION_STATUS_FAILED); > +terminate_multifd_recv_threads(); > +return NULL; > +} > +} > + Regards, Daniel -- |: https://berrange.com -o-https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o-https://fstop138.berrange.com :| |: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|
[Qemu-devel] [PATCH v6 16/19] migration: Test new fd infrastructure
We just send the address through the alternate channels and test that it is ok. Signed-off-by: Juan Quintela--- migration/ram.c | 50 ++ 1 file changed, 50 insertions(+) diff --git a/migration/ram.c b/migration/ram.c index eb0015e..42ad126 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -479,8 +479,26 @@ static void *multifd_send_thread(void *opaque) break; } if (p->pages.num) { +int i; +int num; + +num = p->pages.num; p->pages.num = 0; qemu_mutex_unlock(>mutex); + +for (i = 0; i < num; i++) { +if (qio_channel_write(p->c, + (const char *)>pages.iov[i].iov_base, + sizeof(uint8_t *), _abort) +!= sizeof(uint8_t *)) { +MigrationState *s = migrate_get_current(); + +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); +terminate_multifd_send_threads(); +return NULL; +} +} qemu_mutex_lock(_send_state->mutex); p->done = true; qemu_mutex_unlock(_send_state->mutex); @@ -640,6 +658,7 @@ void multifd_load_cleanup(void) static void *multifd_recv_thread(void *opaque) { MultiFDRecvParams *p = opaque; +uint8_t *recv_address; qemu_sem_post(>ready); while (true) { @@ -649,7 +668,38 @@ static void *multifd_recv_thread(void *opaque) break; } if (p->pages.num) { +int i; +int num; + +num = p->pages.num; p->pages.num = 0; + +for (i = 0; i < num; i++) { +if (qio_channel_read(p->c, + (char *)_address, + sizeof(uint8_t *), _abort) +!= sizeof(uint8_t *)) { +MigrationState *s = migrate_get_current(); + +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); +terminate_multifd_recv_threads(); +return NULL; +} +if (recv_address != p->pages.iov[i].iov_base) { +MigrationState *s = migrate_get_current(); + +printf("We received %p what we were expecting %p (%d)\n", + recv_address, + p->pages.iov[i].iov_base, i); + +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); +terminate_multifd_recv_threads(); +return NULL; +} +} + p->done = true; qemu_mutex_unlock(>mutex); qemu_sem_post(>ready); -- 2.9.4
[Qemu-devel] [PATCH v6 16/19] migration: Test new fd infrastructure
We just send the address through the alternate channels and test that it is ok. Signed-off-by: Juan Quintela--- migration/ram.c | 50 ++ 1 file changed, 50 insertions(+) diff --git a/migration/ram.c b/migration/ram.c index eb0015e..42ad126 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -479,8 +479,26 @@ static void *multifd_send_thread(void *opaque) break; } if (p->pages.num) { +int i; +int num; + +num = p->pages.num; p->pages.num = 0; qemu_mutex_unlock(>mutex); + +for (i = 0; i < num; i++) { +if (qio_channel_write(p->c, + (const char *)>pages.iov[i].iov_base, + sizeof(uint8_t *), _abort) +!= sizeof(uint8_t *)) { +MigrationState *s = migrate_get_current(); + +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); +terminate_multifd_send_threads(); +return NULL; +} +} qemu_mutex_lock(_send_state->mutex); p->done = true; qemu_mutex_unlock(_send_state->mutex); @@ -640,6 +658,7 @@ void multifd_load_cleanup(void) static void *multifd_recv_thread(void *opaque) { MultiFDRecvParams *p = opaque; +uint8_t *recv_address; qemu_sem_post(>ready); while (true) { @@ -649,7 +668,38 @@ static void *multifd_recv_thread(void *opaque) break; } if (p->pages.num) { +int i; +int num; + +num = p->pages.num; p->pages.num = 0; + +for (i = 0; i < num; i++) { +if (qio_channel_read(p->c, + (char *)_address, + sizeof(uint8_t *), _abort) +!= sizeof(uint8_t *)) { +MigrationState *s = migrate_get_current(); + +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); +terminate_multifd_recv_threads(); +return NULL; +} +if (recv_address != p->pages.iov[i].iov_base) { +MigrationState *s = migrate_get_current(); + +printf("We received %p what we were expecting %p (%d)\n", + recv_address, + p->pages.iov[i].iov_base, i); + +migrate_set_state(>state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); +terminate_multifd_recv_threads(); +return NULL; +} +} + p->done = true; qemu_mutex_unlock(>mutex); qemu_sem_post(>ready); -- 2.9.4