This patch split blk_mq_sched_dispatch_requests()
into two parts:

1) the 1st part is for checking if queue is busy, and
handle the busy situation

2) the 2nd part is moved to __blk_mq_sched_dispatch_requests()
which focuses on dispatch from sw queue or scheduler queue.

Signed-off-by: Ming Lei <ming....@redhat.com>
---
 block/blk-mq-sched.c | 42 +++++++++++++++++++++++++-----------------
 1 file changed, 25 insertions(+), 17 deletions(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index cc0687a4d0ab..07ff53187617 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -89,16 +89,37 @@ static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx 
*hctx)
        return false;
 }
 
-void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
+static void __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 {
        struct request_queue *q = hctx->queue;
        struct elevator_queue *e = q->elevator;
        const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
-       bool can_go = true;
-       LIST_HEAD(rq_list);
        struct request *(*dispatch_fn)(struct blk_mq_hw_ctx *) =
                has_sched_dispatch ? e->type->ops.mq.dispatch_request :
                        blk_mq_dispatch_rq_from_ctxs;
+       LIST_HEAD(rq_list);
+
+       if (!has_sched_dispatch && !(hctx->flags & BLK_MQ_F_SHARED_DEPTH)) {
+               blk_mq_flush_busy_ctxs(hctx, &rq_list);
+               blk_mq_dispatch_rq_list(q, &rq_list);
+               return;
+       }
+
+       do {
+               struct request *rq;
+
+               rq = dispatch_fn(hctx);
+               if (!rq)
+                       break;
+               list_add(&rq->queuelist, &rq_list);
+       } while (blk_mq_dispatch_rq_list(q, &rq_list));
+}
+
+void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
+{
+       struct request_queue *q = hctx->queue;
+       bool can_go = true;
+       LIST_HEAD(rq_list);
 
        /* RCU or SRCU read lock is needed before checking quiesced flag */
        if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
@@ -144,25 +165,12 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx 
*hctx)
        if (!can_go || test_bit(BLK_MQ_S_BUSY, &hctx->state))
                return;
 
-       if (!has_sched_dispatch && !(hctx->flags & BLK_MQ_F_SHARED_DEPTH)) {
-               blk_mq_flush_busy_ctxs(hctx, &rq_list);
-               blk_mq_dispatch_rq_list(q, &rq_list);
-               return;
-       }
-
        /*
         * We want to dispatch from the scheduler if we had no work left
         * on the dispatch list, OR if we did have work but weren't able
         * to make progress.
         */
-       do {
-               struct request *rq;
-
-               rq = dispatch_fn(hctx);
-               if (!rq)
-                       break;
-               list_add(&rq->queuelist, &rq_list);
-       } while (blk_mq_dispatch_rq_list(q, &rq_list));
+       __blk_mq_sched_dispatch_requests(hctx);
 }
 
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
-- 
2.9.4

Reply via email to