On 2/19/25 21:34, Maciej S. Szmigiero wrote:
From: "Maciej S. Szmigiero" <maciej.szmigi...@oracle.com>
Since it's important to finish loading device state transferred via the
main migration channel (via save_live_iterate SaveVMHandler) before
starting loading the data asynchronously transferred via multifd the thread
doing the actual loading of the multifd transferred data is only started
from switchover_start SaveVMHandler.
switchover_start handler is called when MIG_CMD_SWITCHOVER_START
sub-command of QEMU_VM_COMMAND is received via the main migration channel.
This sub-command is only sent after all save_live_iterate data have already
been posted so it is safe to commence loading of the multifd-transferred
device state upon receiving it - loading of save_live_iterate data happens
synchronously in the main migration thread (much like the processing of
MIG_CMD_SWITCHOVER_START) so by the time MIG_CMD_SWITCHOVER_START is
processed all the proceeding data must have already been loaded.
Signed-off-by: Maciej S. Szmigiero <maciej.szmigi...@oracle.com>
---
hw/vfio/migration-multifd.c | 225 ++++++++++++++++++++++++++++++++++++
hw/vfio/migration-multifd.h | 2 +
hw/vfio/migration.c | 12 ++
hw/vfio/trace-events | 5 +
4 files changed, 244 insertions(+)
diff --git a/hw/vfio/migration-multifd.c b/hw/vfio/migration-multifd.c
index 5d5ee1393674..b3a88c062769 100644
--- a/hw/vfio/migration-multifd.c
+++ b/hw/vfio/migration-multifd.c
@@ -42,8 +42,13 @@ typedef struct VFIOStateBuffer {
} VFIOStateBuffer;
typedef struct VFIOMultifd {
+ QemuThread load_bufs_thread;
+ bool load_bufs_thread_running;
+ bool load_bufs_thread_want_exit;
+
VFIOStateBuffers load_bufs;
QemuCond load_bufs_buffer_ready_cond;
+ QemuCond load_bufs_thread_finished_cond;
QemuMutex load_bufs_mutex; /* Lock order: this lock -> BQL */
uint32_t load_buf_idx;
uint32_t load_buf_idx_last;
@@ -179,6 +184,175 @@ bool vfio_load_state_buffer(void *opaque, char *data,
size_t data_size,
return true;
}
+static int vfio_load_bufs_thread_load_config(VFIODevice *vbasedev)
+{
+ return -EINVAL;
+}
please move to next patch.
+static VFIOStateBuffer *vfio_load_state_buffer_get(VFIOMultifd *multifd)
+{
+ VFIOStateBuffer *lb;
+ guint bufs_len;
guint: I guess it's ok to use here. It is not common practice in VFIO.
+
+ bufs_len = vfio_state_buffers_size_get(&multifd->load_bufs);
+ if (multifd->load_buf_idx >= bufs_len) {
+ assert(multifd->load_buf_idx == bufs_len);
+ return NULL;
+ }
+
+ lb = vfio_state_buffers_at(&multifd->load_bufs,
+ multifd->load_buf_idx);
Could be one line. minor.
+ if (!lb->is_present) {
+ return NULL;
+ }
+
+ return lb;
+}
+
+static bool vfio_load_state_buffer_write(VFIODevice *vbasedev,
+ VFIOStateBuffer *lb,
+ Error **errp)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+ g_autofree char *buf = NULL;
+ char *buf_cur;
+ size_t buf_len;
+
+ if (!lb->len) {
+ return true;
+ }
+
+ trace_vfio_load_state_device_buffer_load_start(vbasedev->name,
+ multifd->load_buf_idx);
I thin we can move this trace event to vfio_load_bufs_thread()
+ /* lb might become re-allocated when we drop the lock */
+ buf = g_steal_pointer(&lb->data);
+ buf_cur = buf;
+ buf_len = lb->len;
+ while (buf_len > 0) {
+ ssize_t wr_ret;
+ int errno_save;
+
+ /*
+ * Loading data to the device takes a while,
+ * drop the lock during this process.
+ */
+ qemu_mutex_unlock(&multifd->load_bufs_mutex);
+ wr_ret = write(migration->data_fd, buf_cur, buf_len);> +
errno_save = errno;
+ qemu_mutex_lock(&multifd->load_bufs_mutex);
+
+ if (wr_ret < 0) {
+ error_setg(errp,
+ "writing state buffer %" PRIu32 " failed: %d",
+ multifd->load_buf_idx, errno_save);
+ return false;
+ }
+
+ assert(wr_ret <= buf_len);
+ buf_len -= wr_ret;
+ buf_cur += wr_ret;
+ }
+
+ trace_vfio_load_state_device_buffer_load_end(vbasedev->name,
+ multifd->load_buf_idx);
and drop this trace event.
In which case, we can modify the parameters of vfio_load_state_buffer_write()
to use directly a 'VFIOMultifd *multifd'and an fd instead of
"migration->data_fd".
+
+ return true;
+}
+
+static bool vfio_load_bufs_thread_want_exit(VFIOMultifd *multifd,
+ bool *should_quit)
+{
+ return multifd->load_bufs_thread_want_exit || qatomic_read(should_quit);
+}
+
+/*
+ * This thread is spawned by vfio_multifd_switchover_start() which gets
+ * called upon encountering the switchover point marker in main migration
+ * stream.
+ *
+ * It exits after either:
+ * * completing loading the remaining device state and device config, OR:
+ * * encountering some error while doing the above, OR:
+ * * being forcefully aborted by the migration core by it setting should_quit
+ * or by vfio_load_cleanup_load_bufs_thread() setting
+ * multifd->load_bufs_thread_want_exit.
+ */
+static bool vfio_load_bufs_thread(void *opaque, bool *should_quit, Error
**errp)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+ bool ret = true;
+ int config_ret;
No needed IMO. see below.
+
+ assert(multifd);
+ QEMU_LOCK_GUARD(&multifd->load_bufs_mutex);
+
+ assert(multifd->load_bufs_thread_running);
We could add a trace event for the start and the end of the thread.
+ while (true) {
+ VFIOStateBuffer *lb;
+
+ /*
+ * Always check cancellation first after the buffer_ready wait below in
+ * case that cond was signalled by
vfio_load_cleanup_load_bufs_thread().
+ */
+ if (vfio_load_bufs_thread_want_exit(multifd, should_quit)) {
+ error_setg(errp, "operation cancelled");
+ ret = false;
+ goto ret_signal;
goto thread_exit ?
+ }
+
+ assert(multifd->load_buf_idx <= multifd->load_buf_idx_last);
+
+ lb = vfio_load_state_buffer_get(multifd);
+ if (!lb) {
+ trace_vfio_load_state_device_buffer_starved(vbasedev->name,
+ multifd->load_buf_idx);
+ qemu_cond_wait(&multifd->load_bufs_buffer_ready_cond,
+ &multifd->load_bufs_mutex);
+ continue;
+ }
+
+ if (multifd->load_buf_idx == multifd->load_buf_idx_last) {
+ break;
+ }
+
+ if (multifd->load_buf_idx == 0) {
+ trace_vfio_load_state_device_buffer_start(vbasedev->name);
+ }
+
+ if (!vfio_load_state_buffer_write(vbasedev, lb, errp)) {
+ ret = false;
+ goto ret_signal;
+ }
+
+ if (multifd->load_buf_idx == multifd->load_buf_idx_last - 1) {
+ trace_vfio_load_state_device_buffer_end(vbasedev->name);
+ }
+
+ multifd->load_buf_idx++;
+ }
if ret is assigned to true here, the "ret = false" can dropped
+ config_ret = vfio_load_bufs_thread_load_config(vbasedev);
+ if (config_ret) {
+ error_setg(errp, "load config state failed: %d", config_ret);
+ ret = false;
+ }
please move to next patch. This is adding nothing to this patch
since it's returning -EINVAL.
Thanks,
C.
+ret_signal:
+ /*
+ * Notify possibly waiting vfio_load_cleanup_load_bufs_thread() that
+ * this thread is exiting.
+ */
+ multifd->load_bufs_thread_running = false;
+ qemu_cond_signal(&multifd->load_bufs_thread_finished_cond);
+
+ return ret;
+}
+
VFIOMultifd *vfio_multifd_new(void)
{
VFIOMultifd *multifd = g_new(VFIOMultifd, 1);
@@ -191,11 +365,42 @@ VFIOMultifd *vfio_multifd_new(void)
multifd->load_buf_idx_last = UINT32_MAX;
qemu_cond_init(&multifd->load_bufs_buffer_ready_cond);
+ multifd->load_bufs_thread_running = false;
+ multifd->load_bufs_thread_want_exit = false;
+ qemu_cond_init(&multifd->load_bufs_thread_finished_cond);
+
return multifd;
}
+/*
+ * Terminates vfio_load_bufs_thread by setting
+ * multifd->load_bufs_thread_want_exit and signalling all the conditions
+ * the thread could be blocked on.
+ *
+ * Waits for the thread to signal that it had finished.
+ */
+static void vfio_load_cleanup_load_bufs_thread(VFIOMultifd *multifd)
+{
+ /* The lock order is load_bufs_mutex -> BQL so unlock BQL here first */
+ bql_unlock();
+ WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) {
+ while (multifd->load_bufs_thread_running) {
+ multifd->load_bufs_thread_want_exit = true;
+
+ qemu_cond_signal(&multifd->load_bufs_buffer_ready_cond);
+ qemu_cond_wait(&multifd->load_bufs_thread_finished_cond,
+ &multifd->load_bufs_mutex);
+ }
+ }
+ bql_lock();
+}
+
void vfio_multifd_free(VFIOMultifd *multifd)
{
+ vfio_load_cleanup_load_bufs_thread(multifd);
+
+ qemu_cond_destroy(&multifd->load_bufs_thread_finished_cond);
+ vfio_state_buffers_destroy(&multifd->load_bufs);
qemu_cond_destroy(&multifd->load_bufs_buffer_ready_cond);
qemu_mutex_destroy(&multifd->load_bufs_mutex);
@@ -225,3 +430,23 @@ bool vfio_multifd_transfer_setup(VFIODevice *vbasedev, Error **errp)
return true;
}
+
+int vfio_multifd_switchover_start(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+
+ assert(multifd);
+
+ /* The lock order is load_bufs_mutex -> BQL so unlock BQL here first */
+ bql_unlock();
+ WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) {
+ assert(!multifd->load_bufs_thread_running);
+ multifd->load_bufs_thread_running = true;
+ }
+ bql_lock();
+
+ qemu_loadvm_start_load_thread(vfio_load_bufs_thread, vbasedev);
+
+ return 0;
+}
diff --git a/hw/vfio/migration-multifd.h b/hw/vfio/migration-multifd.h
index d5ab7d6f85f5..09cbb437d9d1 100644
--- a/hw/vfio/migration-multifd.h
+++ b/hw/vfio/migration-multifd.h
@@ -25,4 +25,6 @@ bool vfio_multifd_transfer_setup(VFIODevice *vbasedev, Error
**errp);
bool vfio_load_state_buffer(void *opaque, char *data, size_t data_size,
Error **errp);
+int vfio_multifd_switchover_start(VFIODevice *vbasedev);
+
#endif
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index abaf4d08d4a9..85f54cb22df2 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -793,6 +793,17 @@ static bool vfio_switchover_ack_needed(void *opaque)
return vfio_precopy_supported(vbasedev);
}
+static int vfio_switchover_start(void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+
+ if (vfio_multifd_transfer_enabled(vbasedev)) {
+ return vfio_multifd_switchover_start(vbasedev);
+ }
+
+ return 0;
+}
+
static const SaveVMHandlers savevm_vfio_handlers = {
.save_prepare = vfio_save_prepare,
.save_setup = vfio_save_setup,
@@ -808,6 +819,7 @@ static const SaveVMHandlers savevm_vfio_handlers = {
.load_state = vfio_load_state,
.load_state_buffer = vfio_load_state_buffer,
.switchover_ack_needed = vfio_switchover_ack_needed,
+ .switchover_start = vfio_switchover_start,
};
/* ---------------------------------------------------------------------- */
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 042a3dc54a33..418b378ebd29 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -154,6 +154,11 @@ vfio_load_device_config_state_end(const char *name) " (%s)"
vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size
%"PRIu64" ret %d"
vfio_load_state_device_buffer_incoming(const char *name, uint32_t idx) " (%s) idx
%"PRIu32
+vfio_load_state_device_buffer_start(const char *name) " (%s)"
+vfio_load_state_device_buffer_starved(const char *name, uint32_t idx) " (%s) idx
%"PRIu32
+vfio_load_state_device_buffer_load_start(const char *name, uint32_t idx) " (%s) idx
%"PRIu32
+vfio_load_state_device_buffer_load_end(const char *name, uint32_t idx) " (%s) idx
%"PRIu32
+vfio_load_state_device_buffer_end(const char *name) " (%s)"
vfio_migration_realize(const char *name) " (%s)"
vfio_migration_set_device_state(const char *name, const char *state) " (%s) state
%s"
vfio_migration_set_state(const char *name, const char *new_state, const char
*recover_state) " (%s) new state %s, recover state %s"