Avoid entering the slow path of qemu_lockcnt_dec_and_lock if no bottom half has to be deleted. If a bottom half deletes itself, it will be picked up on the next visit of the list, or when the AioContext itself is finalized.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- async.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/async.c b/async.c index 198c53d..1fce3e4 100644 --- a/async.c +++ b/async.c @@ -64,19 +64,24 @@ int aio_bh_poll(AioContext *ctx) { QEMUBH *bh, **bhp, *next; int ret; + bool deleted = false; qemu_lockcnt_inc(&ctx->list_lock); ret = 0; for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) { next = atomic_rcu_read(&bh->next); + if (bh->deleted) { + deleted = true; + continue; + } /* The atomic_xchg is paired with the one in qemu_bh_schedule. The * implicit memory barrier ensures that the callback sees all writes * done by the scheduling thread. It also ensures that the scheduling * thread sees the zero before bh->cb has run, and thus will call * aio_notify again if necessary. */ - if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) { + if (atomic_xchg(&bh->scheduled, 0)) { /* Idle BHs don't count as progress */ if (!bh->idle) { ret = 1; @@ -89,6 +94,11 @@ int aio_bh_poll(AioContext *ctx) } /* remove deleted bhs */ + if (!deleted) { + qemu_lockcnt_dec(&ctx->list_lock); + return ret; + } + if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) { bhp = &ctx->first_bh; while (*bhp) { @@ -102,7 +112,6 @@ int aio_bh_poll(AioContext *ctx) } qemu_lockcnt_unlock(&ctx->list_lock); } - return ret; } -- 2.4.3