In legacy path, when one request is executed via blk_execute_rq(),
the request is added to the front of q->queue_head directly,
and I/O scheduler's queue is bypassed because either merging
or sorting isn't needed.

When SCSI device is put into quiece state, such as during
system suspend, we need to add the RQF_PM request into
front of the queue.

This patch fixes I/O hang after system resume by taking
the similar implementation of legacy path.

Tested-by: Oleksandr Natalenko <[email protected]>
Reported-by: Oleksandr Natalenko <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
 block/blk-core.c     |  2 +-
 block/blk-exec.c     |  2 +-
 block/blk-flush.c    |  2 +-
 block/blk-mq-sched.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 block/blk-mq-sched.h |  2 ++
 5 files changed, 63 insertions(+), 3 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index dbecbf4a64e0..fb75bc646ebc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2330,7 +2330,7 @@ blk_status_t blk_insert_cloned_request(struct 
request_queue *q, struct request *
        if (q->mq_ops) {
                if (blk_queue_io_stat(q))
                        blk_account_io_start(rq, true);
-               blk_mq_sched_insert_request(rq, false, true, false, false);
+               blk_mq_sched_insert_request_bypass(rq, false, true, false, 
false);
                return BLK_STS_OK;
        }
 
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 5c0f3dc446dc..4565aa6bb624 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -61,7 +61,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct 
gendisk *bd_disk,
         * be reused after dying flag is set
         */
        if (q->mq_ops) {
-               blk_mq_sched_insert_request(rq, at_head, true, false, false);
+               blk_mq_sched_insert_request_bypass(rq, at_head, true, false, 
false);
                return;
        }
 
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ed5fe322abba..51e89e5c525a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -463,7 +463,7 @@ void blk_insert_flush(struct request *rq)
        if ((policy & REQ_FSEQ_DATA) &&
            !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
                if (q->mq_ops)
-                       blk_mq_sched_insert_request(rq, false, true, false, 
false);
+                       blk_mq_sched_insert_request_bypass(rq, false, true, 
false, false);
                else
                        list_add_tail(&rq->queuelist, &q->queue_head);
                return;
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 8d97df40fc28..b40dd063d61f 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -354,6 +354,64 @@ static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx 
*hctx,
                blk_mq_add_to_requeue_list(rq, false, true);
 }
 
+static void blk_mq_flush_hctx(struct blk_mq_hw_ctx *hctx,
+                             struct elevator_queue *e,
+                             const bool has_sched_dispatch,
+                             struct list_head *rqs)
+{
+       LIST_HEAD(list);
+
+       if (!has_sched_dispatch)
+               blk_mq_flush_busy_ctxs(hctx, &list);
+       else {
+               while (true) {
+                       struct request *rq;
+
+                       rq = e->type->ops.mq.dispatch_request(hctx);
+                       if (!rq)
+                               break;
+                       list_add_tail(&rq->queuelist, &list);
+               }
+       }
+
+       list_splice_tail(&list, rqs);
+}
+
+void blk_mq_sched_insert_request_bypass(struct request *rq, bool at_head,
+                                       bool run_queue, bool async,
+                                       bool can_block)
+{
+       struct request_queue *q = rq->q;
+       struct elevator_queue *e = q->elevator;
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+       LIST_HEAD(list);
+       const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
+
+       if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) {
+               blk_mq_sched_insert_flush(hctx, rq, can_block);
+               return;
+       }
+
+       if (at_head)
+               list_add_tail(&rq->queuelist, &list);
+       else {
+               blk_mq_flush_hctx(hctx, e, has_sched_dispatch, &list);
+               list_add_tail(&rq->queuelist, &list);
+               run_queue = true;
+       }
+
+       spin_lock(&hctx->lock);
+       if (at_head)
+               list_splice(&list, &hctx->dispatch);
+       else
+               list_splice_tail(&list, &hctx->dispatch);
+       spin_unlock(&hctx->lock);
+
+       if (run_queue)
+               blk_mq_run_hw_queue(hctx, async);
+}
+
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
                                 bool run_queue, bool async, bool can_block)
 {
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 9267d0b7c197..4d01697a627f 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -18,6 +18,8 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
 
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
                                 bool run_queue, bool async, bool can_block);
+void blk_mq_sched_insert_request_bypass(struct request *rq, bool at_head,
+                                       bool run_queue, bool async, bool 
can_block);
 void blk_mq_sched_insert_requests(struct request_queue *q,
                                  struct blk_mq_ctx *ctx,
                                  struct list_head *list, bool run_queue_async);
-- 
2.9.5

Reply via email to