Avoid entering the slow path of qemu_lockcnt_dec_and_lock if no bottom half has to be deleted.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- async.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/async.c b/async.c index 95927fc..6f8184b 100644 --- a/async.c +++ b/async.c @@ -92,6 +92,7 @@ int aio_bh_poll(AioContext *ctx) { QEMUBH *bh, **bhp, *next; int ret; + bool deleted = false; qemu_lockcnt_inc(&ctx->list_lock); @@ -114,9 +115,17 @@ int aio_bh_poll(AioContext *ctx) aio_bh_call(bh); aio_context_release(ctx); } + if (bh->deleted) { + deleted = true; + } } /* remove deleted bhs */ + if (!deleted) { + qemu_lockcnt_dec(&ctx->list_lock); + return ret; + } + if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) { bhp = &ctx->first_bh; while (*bhp) { @@ -130,7 +139,6 @@ int aio_bh_poll(AioContext *ctx) } qemu_lockcnt_unlock(&ctx->list_lock); } - return ret; } -- 2.9.3