Nested BDRV_POLL_WHILE() calls can occur. Currently
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.
This patch converts the bool wait_->need_kick flag to an unsigned
wait_->num_waiters counter.
Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
the condition again after the inner caller completes (invoking the inner
caller counts as aio_poll() progress).
Reported-by: "fuweiwei (C)"
Cc: Paolo Bonzini
Signed-off-by: Stefan Hajnoczi
---
v2:
* Rebase onto qemu.git/master now that AIO_WAIT_WHILE() has landed
[Kevin]
include/block/aio-wait.h | 61
util/aio-wait.c | 2 +-
2 files changed, 31 insertions(+), 32 deletions(-)
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index a48c744fa8..74cde07bef 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -50,8 +50,8 @@
* }
*/
typedef struct {
-/* Is the main loop waiting for a kick? Accessed with atomic ops. */
-bool need_kick;
+/* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
+unsigned num_waiters;
} AioWait;
/**
@@ -71,35 +71,34 @@ typedef struct {
* wait on conditions between two IOThreads since that could lead to deadlock,
* go via the main loop instead.
*/
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
-bool waited_ = false; \
-bool busy_ = true; \
-AioWait *wait_ = (wait);\
-AioContext *ctx_ = (ctx); \
-if (in_aio_context_home_thread(ctx_)) { \
-while ((cond) || busy_) { \
-busy_ = aio_poll(ctx_, (cond)); \
-waited_ |= !!(cond) | busy_;\
-} \
-} else {\
-assert(qemu_get_current_aio_context() ==\
- qemu_get_aio_context()); \
-assert(!wait_->need_kick); \
-/* Set wait_->need_kick before evaluating cond. */ \
-atomic_mb_set(_->need_kick, true); \
-while (busy_) { \
-if ((cond)) { \
-waited_ = busy_ = true; \
-aio_context_release(ctx_); \
-aio_poll(qemu_get_aio_context(), true); \
-aio_context_acquire(ctx_); \
-} else {\
-busy_ = aio_poll(ctx_, false); \
-waited_ |= busy_; \
-} \
-} \
-atomic_set(_->need_kick, false); \
-} \
+#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
+bool waited_ = false; \
+bool busy_ = true; \
+AioWait *wait_ = (wait); \
+AioContext *ctx_ = (ctx); \
+if (in_aio_context_home_thread(ctx_)) {\
+while ((cond) || busy_) { \
+busy_ = aio_poll(ctx_, (cond));\
+waited_ |= !!(cond) | busy_; \
+} \
+} else { \
+assert(qemu_get_current_aio_context() == \
+ qemu_get_aio_context());\
+/* Increment wait_->num_waiters before evaluating cond. */ \
+atomic_inc(_->num_waiters); \
+while (busy_) {\
+if ((cond)) { \
+waited_ = busy_ = true;\
+aio_context_release(ctx_); \
+aio_poll(qemu_get_aio_context(), true);\
+aio_context_acquire(ctx_); \
+} else { \
+busy_ = aio_poll(ctx_, false); \
+waited_ |= busy_; \
+} \
+}