Re: [PATCH 33/33] qemu: process: Extract code for submitting event handling to separate thread

2021-07-21 Thread Jano Tomko
On a %A in %Y, Peter Krempa wrote:
> The submission of the event to the helper thread has a verbose cleanup
> path which was duplicated in all the event handlers. Simplify it by
> extracting the code into a helper named 'qemuProcessEventSubmit' and
> reuse it where appropriate.
> 
> Signed-off-by: Peter Krempa 
> ---
>  src/qemu/qemu_process.c | 113 +++-
>  1 file changed, 43 insertions(+), 70 deletions(-)
> 
> diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
> index 1bdfbce697..4c1130945f 100644
> --- a/src/qemu/qemu_process.c
> +++ b/src/qemu/qemu_process.c

> 
> @@ -1740,11 +1722,8 @@ qemuProcessHandlePRManagerStatusChanged(qemuMonitor 
> *mon G_GNUC_UNUSED,
>  processEvent->eventType = QEMU_PROCESS_EVENT_PR_DISCONNECT;
>  processEvent->vm = virObjectRef(vm);
> 
> -if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
> -qemuProcessEventFree(processEvent);
> -virObjectUnref(vm);
> -goto cleanup;
> -}
> +qemuProcessEventSubmit(driver, );
> +
> 
>   cleanup:
>  virObjectUnlock(vm);

Extra newline.

Jano



[PATCH 33/33] qemu: process: Extract code for submitting event handling to separate thread

2021-07-21 Thread Peter Krempa
The submission of the event to the helper thread has a verbose cleanup
path which was duplicated in all the event handlers. Simplify it by
extracting the code into a helper named 'qemuProcessEventSubmit' and
reuse it where appropriate.

Signed-off-by: Peter Krempa 
---
 src/qemu/qemu_process.c | 113 +++-
 1 file changed, 43 insertions(+), 70 deletions(-)

diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 1bdfbce697..4c1130945f 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -280,6 +280,33 @@ qemuConnectAgent(virQEMUDriver *driver, virDomainObj *vm)
 }


+/**
+ * qemuProcessEventSubmit:
+ * @driver: QEMU driver object
+ * @event: pointer to the variable holding the event processing data (stolen 
and cleared)
+ *
+ * Submits @event to be processed by the asynchronous event handling thread.
+ * In case when submission of the handling fails @event is properly freed and
+ * cleared. If (*event)->vm is non-NULL the domain object is uref'd before 
freeing
+ * @event.
+ */
+static void
+qemuProcessEventSubmit(virQEMUDriver *driver,
+   struct qemuProcessEvent **event)
+{
+if (!*event)
+return;
+
+if (virThreadPoolSendJob(driver->workerPool, 0, *event) < 0) {
+if ((*event)->vm)
+virObjectUnref((*event)->vm);
+qemuProcessEventFree(*event);
+}
+
+*event = NULL;
+}
+
+
 /*
  * This is a callback registered with a qemuMonitor *instance,
  * and to be invoked when the monitor console hits an end of file
@@ -310,11 +337,7 @@ qemuProcessHandleMonitorEOF(qemuMonitor *mon,
 processEvent->eventType = QEMU_PROCESS_EVENT_MONITOR_EOF;
 processEvent->vm = virObjectRef(vm);

-if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
-virObjectUnref(vm);
-qemuProcessEventFree(processEvent);
-goto cleanup;
-}
+qemuProcessEventSubmit(driver, );

 /* We don't want this EOF handler to be called over and over while the
  * thread is waiting for a job.
@@ -833,10 +856,8 @@ qemuProcessHandleWatchdog(qemuMonitor *mon G_GNUC_UNUSED,
  * deleted before handling watchdog event is finished.
  */
 processEvent->vm = virObjectRef(vm);
-if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
-virObjectUnref(vm);
-qemuProcessEventFree(processEvent);
-}
+
+qemuProcessEventSubmit(driver, );
 }

 virObjectUnlock(vm);
@@ -925,7 +946,6 @@ qemuProcessHandleBlockJob(qemuMonitor *mon G_GNUC_UNUSED,
 {
 qemuDomainObjPrivate *priv;
 virQEMUDriver *driver = opaque;
-struct qemuProcessEvent *processEvent = NULL;
 virDomainDiskDef *disk;
 g_autoptr(qemuBlockJobData) job = NULL;
 char *data = NULL;
@@ -954,7 +974,7 @@ qemuProcessHandleBlockJob(qemuMonitor *mon G_GNUC_UNUSED,
 virDomainObjBroadcast(vm);
 } else {
 /* there is no waiting SYNC API, dispatch the update to a thread */
-processEvent = g_new0(struct qemuProcessEvent, 1);
+struct qemuProcessEvent *processEvent = g_new0(struct 
qemuProcessEvent, 1);

 processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
 data = g_strdup(diskAlias);
@@ -963,16 +983,10 @@ qemuProcessHandleBlockJob(qemuMonitor *mon G_GNUC_UNUSED,
 processEvent->action = type;
 processEvent->status = status;

-if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
-virObjectUnref(vm);
-goto cleanup;
-}
-
-processEvent = NULL;
+qemuProcessEventSubmit(driver, );
 }

  cleanup:
-qemuProcessEventFree(processEvent);
 virObjectUnlock(vm);
 }

@@ -986,7 +1000,6 @@ qemuProcessHandleJobStatusChange(qemuMonitor *mon 
G_GNUC_UNUSED,
 {
 virQEMUDriver *driver = opaque;
 qemuDomainObjPrivate *priv;
-struct qemuProcessEvent *processEvent = NULL;
 qemuBlockJobData *job = NULL;
 int jobnewstate;

@@ -1016,23 +1029,18 @@ qemuProcessHandleJobStatusChange(qemuMonitor *mon 
G_GNUC_UNUSED,
 VIR_DEBUG("job '%s' handled synchronously", jobname);
 virDomainObjBroadcast(vm);
 } else {
+struct qemuProcessEvent *processEvent = g_new0(struct 
qemuProcessEvent, 1);
+
 VIR_DEBUG("job '%s' handled by event thread", jobname);
-processEvent = g_new0(struct qemuProcessEvent, 1);

 processEvent->eventType = QEMU_PROCESS_EVENT_JOB_STATUS_CHANGE;
 processEvent->vm = virObjectRef(vm);
 processEvent->data = virObjectRef(job);

-if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
-virObjectUnref(vm);
-goto cleanup;
-}
-
-processEvent = NULL;
+qemuProcessEventSubmit(driver, );
 }

  cleanup:
-qemuProcessEventFree(processEvent);
 virObjectUnlock(vm);
 }

@@ -1288,10 +1296,7 @@ qemuProcessHandleGuestPanic(qemuMonitor *mon 
G_GNUC_UNUSED,