[PATCH 1/8] blk-mq: move hctx lock/unlock into a helper

2018-01-09 Thread Tejun Heo
From: Jens Axboe 

Move the RCU vs SRCU logic into lock/unlock helpers, which makes
the actual functional bits within the locked region much easier
to read.

tj: Reordered in front of timeout revamp patches and added the missing
blk_mq_run_hw_queue() conversion.

Signed-off-by: Jens Axboe 
Signed-off-by: Tejun Heo 
---
 block/blk-mq.c | 66 --
 1 file changed, 32 insertions(+), 34 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 111e1aa..ddc9261 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -557,6 +557,22 @@ static void __blk_mq_complete_request(struct request *rq)
put_cpu();
 }
 
+static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
+{
+   if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+   rcu_read_unlock();
+   else
+   srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
+}
+
+static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
+{
+   if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+   rcu_read_lock();
+   else
+   *srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
+}
+
 /**
  * blk_mq_complete_request - end I/O on a request
  * @rq:the request being processed
@@ -1214,17 +1230,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx 
*hctx)
 */
WARN_ON_ONCE(in_interrupt());
 
-   if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
-   rcu_read_lock();
-   blk_mq_sched_dispatch_requests(hctx);
-   rcu_read_unlock();
-   } else {
-   might_sleep();
+   might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
 
-   srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
-   blk_mq_sched_dispatch_requests(hctx);
-   srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
-   }
+   hctx_lock(hctx, _idx);
+   blk_mq_sched_dispatch_requests(hctx);
+   hctx_unlock(hctx, srcu_idx);
 }
 
 /*
@@ -1296,17 +1306,10 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, 
bool async)
 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
 * quiesced.
 */
-   if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
-   rcu_read_lock();
-   need_run = !blk_queue_quiesced(hctx->queue) &&
-   blk_mq_hctx_has_pending(hctx);
-   rcu_read_unlock();
-   } else {
-   srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
-   need_run = !blk_queue_quiesced(hctx->queue) &&
-   blk_mq_hctx_has_pending(hctx);
-   srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
-   }
+   hctx_lock(hctx, _idx);
+   need_run = !blk_queue_quiesced(hctx->queue) &&
+   blk_mq_hctx_has_pending(hctx);
+   hctx_unlock(hctx, srcu_idx);
 
if (need_run) {
__blk_mq_delay_run_hw_queue(hctx, async, 0);
@@ -1618,7 +1621,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx 
*hctx, struct request *rq)
 
 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
-   blk_qc_t *cookie, bool may_sleep)
+   blk_qc_t *cookie)
 {
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
@@ -1668,25 +1671,20 @@ static void __blk_mq_try_issue_directly(struct 
blk_mq_hw_ctx *hctx,
}
 
 insert:
-   blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
+   blk_mq_sched_insert_request(rq, false, run_queue, false,
+   hctx->flags & BLK_MQ_F_BLOCKING);
 }
 
 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, blk_qc_t *cookie)
 {
-   if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
-   rcu_read_lock();
-   __blk_mq_try_issue_directly(hctx, rq, cookie, false);
-   rcu_read_unlock();
-   } else {
-   unsigned int srcu_idx;
+   int srcu_idx;
 
-   might_sleep();
+   might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
 
-   srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
-   __blk_mq_try_issue_directly(hctx, rq, cookie, true);
-   srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
-   }
+   hctx_lock(hctx, _idx);
+   __blk_mq_try_issue_directly(hctx, rq, cookie);
+   hctx_unlock(hctx, srcu_idx);
 }
 
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
-- 
2.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/8] blk-mq: move hctx lock/unlock into a helper

2018-01-08 Thread Bart Van Assche
On Mon, 2018-01-08 at 11:15 -0800, Tejun Heo wrote:
> +static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
> +{
> + if (!(hctx->flags & BLK_MQ_F_BLOCKING))
> + rcu_read_unlock();
> + else
> + srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
> +}
> +
> +static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
> +{
> + if (!(hctx->flags & BLK_MQ_F_BLOCKING))
> + rcu_read_lock();
> + else
> + *srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
> +}

A minor comment: please consider to reorder these two functions such that the
lock function appears first and the unlock function second. Anyway:

Reviewed-by: Bart Van Assche 



[PATCH 1/8] blk-mq: move hctx lock/unlock into a helper

2018-01-08 Thread Tejun Heo
From: Jens Axboe 

Move the RCU vs SRCU logic into lock/unlock helpers, which makes
the actual functional bits within the locked region much easier
to read.

tj: Reordered in front of timeout revamp patches and added the missing
blk_mq_run_hw_queue() conversion.

Signed-off-by: Jens Axboe 
Signed-off-by: Tejun Heo 
---
 block/blk-mq.c | 66 --
 1 file changed, 32 insertions(+), 34 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 111e1aa..ddc9261 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -557,6 +557,22 @@ static void __blk_mq_complete_request(struct request *rq)
put_cpu();
 }
 
+static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
+{
+   if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+   rcu_read_unlock();
+   else
+   srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
+}
+
+static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
+{
+   if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+   rcu_read_lock();
+   else
+   *srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
+}
+
 /**
  * blk_mq_complete_request - end I/O on a request
  * @rq:the request being processed
@@ -1214,17 +1230,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx 
*hctx)
 */
WARN_ON_ONCE(in_interrupt());
 
-   if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
-   rcu_read_lock();
-   blk_mq_sched_dispatch_requests(hctx);
-   rcu_read_unlock();
-   } else {
-   might_sleep();
+   might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
 
-   srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
-   blk_mq_sched_dispatch_requests(hctx);
-   srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
-   }
+   hctx_lock(hctx, _idx);
+   blk_mq_sched_dispatch_requests(hctx);
+   hctx_unlock(hctx, srcu_idx);
 }
 
 /*
@@ -1296,17 +1306,10 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, 
bool async)
 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
 * quiesced.
 */
-   if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
-   rcu_read_lock();
-   need_run = !blk_queue_quiesced(hctx->queue) &&
-   blk_mq_hctx_has_pending(hctx);
-   rcu_read_unlock();
-   } else {
-   srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
-   need_run = !blk_queue_quiesced(hctx->queue) &&
-   blk_mq_hctx_has_pending(hctx);
-   srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
-   }
+   hctx_lock(hctx, _idx);
+   need_run = !blk_queue_quiesced(hctx->queue) &&
+   blk_mq_hctx_has_pending(hctx);
+   hctx_unlock(hctx, srcu_idx);
 
if (need_run) {
__blk_mq_delay_run_hw_queue(hctx, async, 0);
@@ -1618,7 +1621,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx 
*hctx, struct request *rq)
 
 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
-   blk_qc_t *cookie, bool may_sleep)
+   blk_qc_t *cookie)
 {
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
@@ -1668,25 +1671,20 @@ static void __blk_mq_try_issue_directly(struct 
blk_mq_hw_ctx *hctx,
}
 
 insert:
-   blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
+   blk_mq_sched_insert_request(rq, false, run_queue, false,
+   hctx->flags & BLK_MQ_F_BLOCKING);
 }
 
 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, blk_qc_t *cookie)
 {
-   if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
-   rcu_read_lock();
-   __blk_mq_try_issue_directly(hctx, rq, cookie, false);
-   rcu_read_unlock();
-   } else {
-   unsigned int srcu_idx;
+   int srcu_idx;
 
-   might_sleep();
+   might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
 
-   srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
-   __blk_mq_try_issue_directly(hctx, rq, cookie, true);
-   srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
-   }
+   hctx_lock(hctx, _idx);
+   __blk_mq_try_issue_directly(hctx, rq, cookie);
+   hctx_unlock(hctx, srcu_idx);
 }
 
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
-- 
2.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html