Hello! Please take a look at the new version of the patch here: https://lore.kernel.org/all/20250224161719.3831357-1-mor...@ispras.ru Thank you!
February 20, 2025 6:10 AM, "Paolo Bonzini" <pbonz...@redhat.com> wrote: > On 2/19/25 17:12, Vitalii Mordan wrote: > >> diff --git a/util/thread-pool.c b/util/thread-pool.c >> index 27eb777e85..6c5f4d085b 100644 >> --- a/util/thread-pool.c >> +++ b/util/thread-pool.c >> @@ -111,9 +111,8 @@ static void *worker_thread(void *opaque) >> ret = req->func(req->arg); > > req->ret = ret; >> - /* Write ret before state. */ >> - smp_wmb(); >> - req->state = THREAD_DONE; >> + /* Atomically update state after setting ret. */ >> + qatomic_store_release(&req->state, THREAD_DONE); > > This is good. > >> @@ -180,7 +179,7 @@ static void thread_pool_completion_bh(void *opaque) > > restart: >> QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { >> - if (elem->state != THREAD_DONE) { >> + if (qatomic_load_acquire(&elem->state) != THREAD_DONE) { > > This is good, but it needs a comment and it can replace the smp_rmb() below. > >> continue; >> } > > @@ -223,12 +222,12 @@ static void thread_pool_cancel(BlockAIOCB *acb) >> trace_thread_pool_cancel(elem, elem->common.opaque); > > QEMU_LOCK_GUARD(&pool->lock); >> - if (elem->state == THREAD_QUEUED) { >> + if (qatomic_load_acquire(&elem->state) == THREAD_QUEUED) { > > This is not ordering anything so it can be qatomic_read/qatomic_set > (technically the one below > doesn't even need that, but it's fine). > >> QTAILQ_REMOVE(&pool->request_list, elem, reqs); >> qemu_bh_schedule(pool->completion_bh); > > - elem->state = THREAD_DONE; >> elem->ret = -ECANCELED; >> + qatomic_store_release(&elem->state, THREAD_DONE); >> } > > } >> @@ -251,8 +250,8 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, >> void *arg, >> req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque); >> req->func = func; >> req->arg = arg; >> - req->state = THREAD_QUEUED; >> req->pool = pool; >> + qatomic_store_release(&req->state, THREAD_QUEUED); > > This does not need any atomic access, because there is ordering via > pool->lock (which protects the > request_list). There's no need to do store-release and load-acquire except to > order with another > store or load, and in fact adding unnecessary acquire/release is harder to > understand. > > Paolo > >> QLIST_INSERT_HEAD(&pool->head, req, all); -- Vitalii Mordan Linux Verification Center, ISPRAS web: http://linuxtesting.org e-mail: mor...@ispras.ru