Re: [Qemu-devel] [PATCH v2] block: make BDRV_POLL_WHILE() re-entrancy safe

2018-03-07 Thread Eric Blake

On 03/07/2018 06:46 AM, Stefan Hajnoczi wrote:

Nested BDRV_POLL_WHILE() calls can occur.  Currently
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.

This patch converts the bool wait_->need_kick flag to an unsigned
wait_->num_waiters counter.

Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
the condition again after the inner caller completes (invoking the inner
caller counts as aio_poll() progress).

Reported-by: "fuweiwei (C)" 
Cc: Paolo Bonzini 
Signed-off-by: Stefan Hajnoczi 
---
v2:
  * Rebase onto qemu.git/master now that AIO_WAIT_WHILE() has landed
[Kevin]

  include/block/aio-wait.h | 61 


Looks big due to whitespace change when column for trailing \ changed. 
Viewing the diff with whitespace ignored made it easier to review.


Reviewed-by: Eric Blake 

diff --git c/include/block/aio-wait.h w/include/block/aio-wait.h
index a48c744fa87..74cde07bef3 100644
--- c/include/block/aio-wait.h
+++ w/include/block/aio-wait.h
@@ -50,8 +50,8 @@
  *   }
  */
 typedef struct {
-/* Is the main loop waiting for a kick?  Accessed with atomic ops. */
-bool need_kick;
+/* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic 
ops. */

+unsigned num_waiters;
 } AioWait;

 /**
@@ -84,9 +84,8 @@ typedef struct {
 } else {   \
 assert(qemu_get_current_aio_context() ==   \
qemu_get_aio_context());\
-assert(!wait_->need_kick);  \
-/* Set wait_->need_kick before evaluating cond.  */ \
-atomic_mb_set(_->need_kick, true); \
+/* Increment wait_->num_waiters before evaluating cond. */ \
+atomic_inc(_->num_waiters);   \
 while (busy_) {\
 if ((cond)) {  \
 waited_ = busy_ = true;\
@@ -98,7 +97,7 @@ typedef struct {
 waited_ |= busy_;  \
 }  \
 }  \
-atomic_set(_->need_kick, false);   \
+atomic_dec(_->num_waiters);   \
 }  \
 waited_; })


--
Eric Blake, Principal Software Engineer
Red Hat, Inc.   +1-919-301-3266
Virtualization:  qemu.org | libvirt.org



[Qemu-devel] [PATCH v2] block: make BDRV_POLL_WHILE() re-entrancy safe

2018-03-07 Thread Stefan Hajnoczi
Nested BDRV_POLL_WHILE() calls can occur.  Currently
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.

This patch converts the bool wait_->need_kick flag to an unsigned
wait_->num_waiters counter.

Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
the condition again after the inner caller completes (invoking the inner
caller counts as aio_poll() progress).

Reported-by: "fuweiwei (C)" 
Cc: Paolo Bonzini 
Signed-off-by: Stefan Hajnoczi 
---
v2:
 * Rebase onto qemu.git/master now that AIO_WAIT_WHILE() has landed
   [Kevin]

 include/block/aio-wait.h | 61 
 util/aio-wait.c  |  2 +-
 2 files changed, 31 insertions(+), 32 deletions(-)

diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index a48c744fa8..74cde07bef 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -50,8 +50,8 @@
  *   }
  */
 typedef struct {
-/* Is the main loop waiting for a kick?  Accessed with atomic ops. */
-bool need_kick;
+/* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
+unsigned num_waiters;
 } AioWait;
 
 /**
@@ -71,35 +71,34 @@ typedef struct {
  * wait on conditions between two IOThreads since that could lead to deadlock,
  * go via the main loop instead.
  */
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({  \
-bool waited_ = false;   \
-bool busy_ = true;  \
-AioWait *wait_ = (wait);\
-AioContext *ctx_ = (ctx);   \
-if (in_aio_context_home_thread(ctx_)) { \
-while ((cond) || busy_) {   \
-busy_ = aio_poll(ctx_, (cond)); \
-waited_ |= !!(cond) | busy_;\
-}   \
-} else {\
-assert(qemu_get_current_aio_context() ==\
-   qemu_get_aio_context()); \
-assert(!wait_->need_kick);  \
-/* Set wait_->need_kick before evaluating cond.  */ \
-atomic_mb_set(_->need_kick, true); \
-while (busy_) { \
-if ((cond)) {   \
-waited_ = busy_ = true; \
-aio_context_release(ctx_);  \
-aio_poll(qemu_get_aio_context(), true); \
-aio_context_acquire(ctx_);  \
-} else {\
-busy_ = aio_poll(ctx_, false);  \
-waited_ |= busy_;   \
-}   \
-}   \
-atomic_set(_->need_kick, false);   \
-}   \
+#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
+bool waited_ = false;  \
+bool busy_ = true; \
+AioWait *wait_ = (wait);   \
+AioContext *ctx_ = (ctx);  \
+if (in_aio_context_home_thread(ctx_)) {\
+while ((cond) || busy_) {  \
+busy_ = aio_poll(ctx_, (cond));\
+waited_ |= !!(cond) | busy_;   \
+}  \
+} else {   \
+assert(qemu_get_current_aio_context() ==   \
+   qemu_get_aio_context());\
+/* Increment wait_->num_waiters before evaluating cond. */ \
+atomic_inc(_->num_waiters);   \
+while (busy_) {\
+if ((cond)) {  \
+waited_ = busy_ = true;\
+aio_context_release(ctx_); \
+aio_poll(qemu_get_aio_context(), true);\
+aio_context_acquire(ctx_); \
+} else {   \
+busy_ = aio_poll(ctx_, false); \
+waited_ |= busy_;  \
+}  \
+}