Re: [Qemu-devel] [PATCH 07/18] block: introduce bdrv_poll_while and bdrv_wakeup

2016-10-17 Thread Paolo Bonzini


On 16/10/2016 12:25, Stefan Hajnoczi wrote:
> On Thu, Oct 13, 2016 at 07:34:11PM +0200, Paolo Bonzini wrote:
>> @@ -485,9 +474,14 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
>>  atomic_inc(>in_flight);
>>  }
>>  
>> +void bdrv_wakeup(BlockDriverState *bs)
>> +{
>> +}
> 
> Please write a doc comment explaining the semantics of this new API.
> 
> Since it's a nop here it may be better to introduce bdrv_wakeup() in
> another patch.

Okay, will do.

Paolo



Re: [Qemu-devel] [PATCH 07/18] block: introduce bdrv_poll_while and bdrv_wakeup

2016-10-14 Thread Fam Zheng
On Thu, 10/13 19:34, Paolo Bonzini wrote:
> We want the BDS event loop to run exclusively in the iothread that
> owns the BDS's AioContext.  This function and macro provides the
> synchronization between the two event loops.
> 
> Signed-off-by: Paolo Bonzini 
> ---
>  block/block-backend.c |  7 +--
>  block/io.c| 47 
> +++
>  block/qed-table.c | 16 
>  block/qed.c   |  4 +++-
>  include/block/block.h |  9 +
>  include/block/block_int.h |  1 +
>  6 files changed, 29 insertions(+), 55 deletions(-)
> 
> diff --git a/block/block-backend.c b/block/block-backend.c
> index 234df1e..c5c2597 100644
> --- a/block/block-backend.c
> +++ b/block/block-backend.c
> @@ -878,7 +878,6 @@ static int blk_prw(BlockBackend *blk, int64_t offset, 
> uint8_t *buf,
> int64_t bytes, CoroutineEntry co_entry,
> BdrvRequestFlags flags)
>  {
> -AioContext *aio_context;
>  QEMUIOVector qiov;
>  struct iovec iov;
>  Coroutine *co;
> @@ -900,11 +899,7 @@ static int blk_prw(BlockBackend *blk, int64_t offset, 
> uint8_t *buf,
>  
>  co = qemu_coroutine_create(co_entry, );
>  qemu_coroutine_enter(co);
> -
> -aio_context = blk_get_aio_context(blk);
> -while (rwco.ret == NOT_DONE) {
> -aio_poll(aio_context, true);
> -}
> +bdrv_poll_while(blk_bs(blk), rwco.ret == NOT_DONE);

Can we make it "BDRV_POLL_WHILE"? With lower case the mental model of a reader
would be "evaluate rwco.ret == NOT_DONE" first before doing the poll.

>  
>  return rwco.ret;
>  }
> diff --git a/block/io.c b/block/io.c
> index afec968..7d3dcfc 100644
> --- a/block/io.c
> +++ b/block/io.c
> @@ -156,23 +156,12 @@ bool bdrv_requests_pending(BlockDriverState *bs)
>  return false;
>  }
>  
> -static bool bdrv_drain_poll(BlockDriverState *bs)
> -{
> -bool waited = false;
> -
> -while (atomic_read(>in_flight) > 0) {
> -aio_poll(bdrv_get_aio_context(bs), true);
> -waited = true;
> -}
> -return waited;
> -}
> -
>  static bool bdrv_drain_recurse(BlockDriverState *bs)
>  {
>  BdrvChild *child;
>  bool waited;
>  
> -waited = bdrv_drain_poll(bs);
> +waited = bdrv_poll_while(bs, atomic_read(>in_flight) > 0);
>  
>  if (bs->drv && bs->drv->bdrv_drain) {
>  bs->drv->bdrv_drain(bs);
> @@ -485,9 +474,14 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
>  atomic_inc(>in_flight);
>  }
>  
> +void bdrv_wakeup(BlockDriverState *bs)
> +{
> +}
> +
>  void bdrv_dec_in_flight(BlockDriverState *bs)
>  {
>  atomic_dec(>in_flight);
> +bdrv_wakeup(bs);
>  }
>  
>  static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
> @@ -597,13 +591,9 @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
>  /* Fast-path if already in coroutine context */
>  bdrv_rw_co_entry();
>  } else {
> -AioContext *aio_context = bdrv_get_aio_context(child->bs);
> -
>  co = qemu_coroutine_create(bdrv_rw_co_entry, );
>  qemu_coroutine_enter(co);
> -while (rwco.ret == NOT_DONE) {
> -aio_poll(aio_context, true);
> -}
> +bdrv_poll_while(child->bs, rwco.ret == NOT_DONE);
>  }
>  return rwco.ret;
>  }
> @@ -1845,14 +1835,10 @@ int64_t bdrv_get_block_status_above(BlockDriverState 
> *bs,
>  /* Fast-path if already in coroutine context */
>  bdrv_get_block_status_above_co_entry();
>  } else {
> -AioContext *aio_context = bdrv_get_aio_context(bs);
> -
>  co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
> );
>  qemu_coroutine_enter(co);
> -while (!data.done) {
> -aio_poll(aio_context, true);
> -}
> +bdrv_poll_while(bs, !data.done);
>  }
>  return data.ret;
>  }
> @@ -2411,13 +2397,9 @@ int bdrv_flush(BlockDriverState *bs)
>  /* Fast-path if already in coroutine context */
>  bdrv_flush_co_entry(_co);
>  } else {
> -AioContext *aio_context = bdrv_get_aio_context(bs);
> -
>  co = qemu_coroutine_create(bdrv_flush_co_entry, _co);
>  qemu_coroutine_enter(co);
> -while (flush_co.ret == NOT_DONE) {
> -aio_poll(aio_context, true);
> -}
> +bdrv_poll_while(bs, flush_co.ret == NOT_DONE);
>  }
>  
>  return flush_co.ret;
> @@ -2543,13 +2525,9 @@ int bdrv_pdiscard(BlockDriverState *bs, int64_t 
> offset, int count)
>  /* Fast-path if already in coroutine context */
>  bdrv_pdiscard_co_entry();
>  } else {
> -AioContext *aio_context = bdrv_get_aio_context(bs);
> -
>  co = qemu_coroutine_create(bdrv_pdiscard_co_entry, );
>  qemu_coroutine_enter(co);
> -while (rwco.ret == NOT_DONE) {
> -aio_poll(aio_context, true);
> -}
> +

Re: [Qemu-devel] [PATCH 07/18] block: introduce bdrv_poll_while and bdrv_wakeup

2016-10-14 Thread Paolo Bonzini


On 14/10/2016 12:42, Fam Zheng wrote:
>> > diff --git a/block/qed.c b/block/qed.c
>> > index 1a7ef0a..dcb5fb9 100644
>> > --- a/block/qed.c
>> > +++ b/block/qed.c
>> > @@ -354,7 +354,9 @@ static void qed_start_need_check_timer(BDRVQEDState *s)
>> >  static void qed_cancel_need_check_timer(BDRVQEDState *s)
>> >  {
>> >  trace_qed_cancel_need_check_timer(s);
>> > -timer_del(s->need_check_timer);
>> > +if (s->need_check_timer) {
>> > +timer_del(s->need_check_timer);
>> > +}
>> >  }
> 
> This belongs to a separate patch, or deserves an explanation in the commit
> message.

It probably belongs in no patch, actually.

Paolo



[Qemu-devel] [PATCH 07/18] block: introduce bdrv_poll_while and bdrv_wakeup

2016-10-13 Thread Paolo Bonzini
We want the BDS event loop to run exclusively in the iothread that
owns the BDS's AioContext.  This function and macro provides the
synchronization between the two event loops.

Signed-off-by: Paolo Bonzini 
---
 block/block-backend.c |  7 +--
 block/io.c| 47 +++
 block/qed-table.c | 16 
 block/qed.c   |  4 +++-
 include/block/block.h |  9 +
 include/block/block_int.h |  1 +
 6 files changed, 29 insertions(+), 55 deletions(-)

diff --git a/block/block-backend.c b/block/block-backend.c
index 234df1e..c5c2597 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -878,7 +878,6 @@ static int blk_prw(BlockBackend *blk, int64_t offset, 
uint8_t *buf,
int64_t bytes, CoroutineEntry co_entry,
BdrvRequestFlags flags)
 {
-AioContext *aio_context;
 QEMUIOVector qiov;
 struct iovec iov;
 Coroutine *co;
@@ -900,11 +899,7 @@ static int blk_prw(BlockBackend *blk, int64_t offset, 
uint8_t *buf,
 
 co = qemu_coroutine_create(co_entry, );
 qemu_coroutine_enter(co);
-
-aio_context = blk_get_aio_context(blk);
-while (rwco.ret == NOT_DONE) {
-aio_poll(aio_context, true);
-}
+bdrv_poll_while(blk_bs(blk), rwco.ret == NOT_DONE);
 
 return rwco.ret;
 }
diff --git a/block/io.c b/block/io.c
index afec968..7d3dcfc 100644
--- a/block/io.c
+++ b/block/io.c
@@ -156,23 +156,12 @@ bool bdrv_requests_pending(BlockDriverState *bs)
 return false;
 }
 
-static bool bdrv_drain_poll(BlockDriverState *bs)
-{
-bool waited = false;
-
-while (atomic_read(>in_flight) > 0) {
-aio_poll(bdrv_get_aio_context(bs), true);
-waited = true;
-}
-return waited;
-}
-
 static bool bdrv_drain_recurse(BlockDriverState *bs)
 {
 BdrvChild *child;
 bool waited;
 
-waited = bdrv_drain_poll(bs);
+waited = bdrv_poll_while(bs, atomic_read(>in_flight) > 0);
 
 if (bs->drv && bs->drv->bdrv_drain) {
 bs->drv->bdrv_drain(bs);
@@ -485,9 +474,14 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
 atomic_inc(>in_flight);
 }
 
+void bdrv_wakeup(BlockDriverState *bs)
+{
+}
+
 void bdrv_dec_in_flight(BlockDriverState *bs)
 {
 atomic_dec(>in_flight);
+bdrv_wakeup(bs);
 }
 
 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
@@ -597,13 +591,9 @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
 /* Fast-path if already in coroutine context */
 bdrv_rw_co_entry();
 } else {
-AioContext *aio_context = bdrv_get_aio_context(child->bs);
-
 co = qemu_coroutine_create(bdrv_rw_co_entry, );
 qemu_coroutine_enter(co);
-while (rwco.ret == NOT_DONE) {
-aio_poll(aio_context, true);
-}
+bdrv_poll_while(child->bs, rwco.ret == NOT_DONE);
 }
 return rwco.ret;
 }
@@ -1845,14 +1835,10 @@ int64_t bdrv_get_block_status_above(BlockDriverState 
*bs,
 /* Fast-path if already in coroutine context */
 bdrv_get_block_status_above_co_entry();
 } else {
-AioContext *aio_context = bdrv_get_aio_context(bs);
-
 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
);
 qemu_coroutine_enter(co);
-while (!data.done) {
-aio_poll(aio_context, true);
-}
+bdrv_poll_while(bs, !data.done);
 }
 return data.ret;
 }
@@ -2411,13 +2397,9 @@ int bdrv_flush(BlockDriverState *bs)
 /* Fast-path if already in coroutine context */
 bdrv_flush_co_entry(_co);
 } else {
-AioContext *aio_context = bdrv_get_aio_context(bs);
-
 co = qemu_coroutine_create(bdrv_flush_co_entry, _co);
 qemu_coroutine_enter(co);
-while (flush_co.ret == NOT_DONE) {
-aio_poll(aio_context, true);
-}
+bdrv_poll_while(bs, flush_co.ret == NOT_DONE);
 }
 
 return flush_co.ret;
@@ -2543,13 +2525,9 @@ int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, 
int count)
 /* Fast-path if already in coroutine context */
 bdrv_pdiscard_co_entry();
 } else {
-AioContext *aio_context = bdrv_get_aio_context(bs);
-
 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, );
 qemu_coroutine_enter(co);
-while (rwco.ret == NOT_DONE) {
-aio_poll(aio_context, true);
-}
+bdrv_poll_while(bs, rwco.ret == NOT_DONE);
 }
 
 return rwco.ret;
@@ -2608,11 +2586,8 @@ int bdrv_ioctl(BlockDriverState *bs, unsigned long int 
req, void *buf)
 bdrv_co_ioctl_entry();
 } else {
 Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry, );
-
 qemu_coroutine_enter(co);
-while (data.ret == -EINPROGRESS) {
-aio_poll(bdrv_get_aio_context(bs), true);
-}
+bdrv_poll_while(bs, data.ret == -EINPROGRESS);
 }