Re: [Qemu-devel] [PATCH 15/16] async: remove unnecessary inc/dec pairs

2017-01-18 Thread Stefan Hajnoczi
On Fri, Jan 13, 2017 at 02:17:30PM +0100, Paolo Bonzini wrote:
> Pull the increment/decrement pair out of aio_bh_poll and into the
> callers.
> 
> Signed-off-by: Paolo Bonzini 
> ---
>  aio-posix.c |  8 +++-
>  aio-win32.c |  8 
>  async.c | 12 ++--
>  3 files changed, 13 insertions(+), 15 deletions(-)

Reviewed-by: Stefan Hajnoczi 


signature.asc
Description: PGP signature


[Qemu-devel] [PATCH 15/16] async: remove unnecessary inc/dec pairs

2017-01-13 Thread Paolo Bonzini
Pull the increment/decrement pair out of aio_bh_poll and into the
callers.

Signed-off-by: Paolo Bonzini 
---
 aio-posix.c |  8 +++-
 aio-win32.c |  8 
 async.c | 12 ++--
 3 files changed, 13 insertions(+), 15 deletions(-)

diff --git a/aio-posix.c b/aio-posix.c
index 51e92b8..2537bcd 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -425,9 +425,8 @@ static bool aio_dispatch_handlers(AioContext *ctx)
 
 void aio_dispatch(AioContext *ctx)
 {
-aio_bh_poll(ctx);
-
 qemu_lockcnt_inc(&ctx->list_lock);
+aio_bh_poll(ctx);
 aio_dispatch_handlers(ctx);
 qemu_lockcnt_dec(&ctx->list_lock);
 
@@ -678,16 +677,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
 }
 
 npfd = 0;
-qemu_lockcnt_dec(&ctx->list_lock);
 
 progress |= aio_bh_poll(ctx);
 
 if (ret > 0) {
-qemu_lockcnt_inc(&ctx->list_lock);
 progress |= aio_dispatch_handlers(ctx);
-qemu_lockcnt_dec(&ctx->list_lock);
 }
 
+qemu_lockcnt_dec(&ctx->list_lock);
+
 progress |= timerlistgroup_run_timers(&ctx->tlg);
 
 return progress;
diff --git a/aio-win32.c b/aio-win32.c
index 442a179..bca496a 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -253,8 +253,6 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE 
event)
 bool progress = false;
 AioHandler *tmp;
 
-qemu_lockcnt_inc(&ctx->list_lock);
-
 /*
  * We have to walk very carefully in case aio_set_fd_handler is
  * called while we're walking.
@@ -305,14 +303,15 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE 
event)
 }
 }
 
-qemu_lockcnt_dec(&ctx->list_lock);
 return progress;
 }
 
 void aio_dispatch(AioContext *ctx)
 {
+qemu_lockcnt_inc(&ctx->list_lock);
 aio_bh_poll(ctx);
 aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
+qemu_lockcnt_dec(&ctx->list_lock);
 timerlistgroup_run_timers(&ctx->tlg);
 }
 
@@ -349,7 +348,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
 }
 }
 
-qemu_lockcnt_dec(&ctx->list_lock);
 first = true;
 
 /* ctx->notifier is always registered.  */
@@ -392,6 +390,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
 progress |= aio_dispatch_handlers(ctx, event);
 } while (count > 0);
 
+qemu_lockcnt_dec(&ctx->list_lock);
+
 progress |= timerlistgroup_run_timers(&ctx->tlg);
 return progress;
 }
diff --git a/async.c b/async.c
index 1839aa5..c471b1e 100644
--- a/async.c
+++ b/async.c
@@ -89,15 +89,16 @@ void aio_bh_call(QEMUBH *bh)
 bh->cb(bh->opaque);
 }
 
-/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
+/* Multiple occurrences of aio_bh_poll cannot be called concurrently.
+ * The count in ctx->list_lock is incremented before the call, and is
+ * not affected by the call.
+ */
 int aio_bh_poll(AioContext *ctx)
 {
 QEMUBH *bh, **bhp, *next;
 int ret;
 bool deleted = false;
 
-qemu_lockcnt_inc(&ctx->list_lock);
-
 ret = 0;
 for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
 next = atomic_rcu_read(&bh->next);
@@ -122,11 +123,10 @@ int aio_bh_poll(AioContext *ctx)
 
 /* remove deleted bhs */
 if (!deleted) {
-qemu_lockcnt_dec(&ctx->list_lock);
 return ret;
 }
 
-if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
+if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
 bhp = &ctx->first_bh;
 while (*bhp) {
 bh = *bhp;
@@ -137,7 +137,7 @@ int aio_bh_poll(AioContext *ctx)
 bhp = &bh->next;
 }
 }
-qemu_lockcnt_unlock(&ctx->list_lock);
+qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
 }
 return ret;
 }
-- 
2.9.3