job mutex will be used to protect the job struct elements and list,
replacing AioContext locks.
Right now use a shared lock for all jobs, in order to keep things
simple. Once the AioContext lock is gone, we can introduce per-job
locks.
To simplify the switch from aiocontext to job lock, introduce
*nop* lock/unlock functions and macros. Once everything is protected
by jobs, we can add the mutex and remove the aiocontext.
Since job_mutex is already being used, add static
_job_{lock/unlock}.
Signed-off-by: Emanuele Giuseppe Esposito <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
---
include/qemu/job.h | 18 ++++++++++++++++++
job.c | 39 +++++++++++++++++++++++++++------------
2 files changed, 45 insertions(+), 12 deletions(-)
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 7e9e59f4b8..ccf7826426 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -297,6 +297,24 @@ typedef enum JobCreateFlags {
JOB_MANUAL_DISMISS = 0x04,
} JobCreateFlags;
+/**
+ * job_lock:
+ *
+ * Take the mutex protecting the list of jobs and their status.
+ * Most functions called by the monitor need to call job_lock
+ * and job_unlock manually. On the other hand, function called
+ * by the block jobs themselves and by the block layer will take the
+ * lock for you.
+ */
+void job_lock(void);
+
+/**
+ * job_unlock:
+ *
+ * Release the mutex protecting the list of jobs and their status.
+ */
+void job_unlock(void);
+
/**
* Allocate and return a new job transaction. Jobs can be added to the
* transaction using job_txn_add_job().
diff --git a/job.c b/job.c
index 94b142684f..0e4dacf028 100644
--- a/job.c
+++ b/job.c
@@ -32,6 +32,12 @@
#include "trace/trace-root.h"
#include "qapi/qapi-events-job.h"
+/*
+ * job_mutex protects the jobs list, but also makes the
+ * struct job fields thread-safe.
+ */
+static QemuMutex job_mutex;
+
static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
/* Job State Transition Table */
@@ -74,17 +80,26 @@ struct JobTxn {
int refcnt;
};
-/* Right now, this mutex is only needed to synchronize accesses to job->busy
- * and job->sleep_timer, such as concurrent calls to job_do_yield and
- * job_enter. */
-static QemuMutex job_mutex;
+#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */
+
+#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */
+
+void job_lock(void)
+{
+ /* nop */
+}
+
+void job_unlock(void)
+{
+ /* nop */
+}
-static void job_lock(void)
+static void _job_lock(void)
{
qemu_mutex_lock(&job_mutex);
}
-static void job_unlock(void)
+static void _job_unlock(void)
{
qemu_mutex_unlock(&job_mutex);
}
@@ -449,21 +464,21 @@ void job_enter_cond(Job *job, bool(*fn)(Job *job))
return;
}
- job_lock();
+ _job_lock();
if (job->busy) {
- job_unlock();
+ _job_unlock();
return;
}
if (fn && !fn(job)) {
- job_unlock();
+ _job_unlock();
return;
}
assert(!job->deferred_to_main_loop);
timer_del(&job->sleep_timer);
job->busy = true;
- job_unlock();
+ _job_unlock();
aio_co_enter(job->aio_context, job->co);
}
@@ -480,13 +495,13 @@ void job_enter(Job *job)
* called explicitly. */
static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
{
- job_lock();
+ _job_lock();
if (ns != -1) {
timer_mod(&job->sleep_timer, ns);
}
job->busy = false;
job_event_idle(job);
- job_unlock();
+ _job_unlock();
qemu_coroutine_yield();
/* Set by job_enter_cond() before re-entering the coroutine. */
--
2.27.0