If hctx becomes dead, all in-queue IO requests aimed at this hctx have to
be re-submitted, so cover requests queued in scheduler queue.

Cc: John Garry <john.ga...@huawei.com>
Cc: Bart Van Assche <bvanass...@acm.org>
Cc: Hannes Reinecke <h...@suse.com>
Cc: Christoph Hellwig <h...@lst.de>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Keith Busch <keith.bu...@intel.com>
Reviewed-by: Hannes Reinecke <h...@suse.com>
Signed-off-by: Ming Lei <ming....@redhat.com>
---
 block/blk-mq.c | 30 +++++++++++++++++++++++++-----
 1 file changed, 25 insertions(+), 5 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 17f0a9ef32a8..06081966549f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2305,6 +2305,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, 
struct hlist_node *node)
        enum hctx_type type;
        bool hctx_dead;
        struct request *rq;
+       struct elevator_queue *e;
 
        hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
        ctx = __blk_mq_get_ctx(hctx->queue, cpu);
@@ -2315,12 +2316,31 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, 
struct hlist_node *node)
        hctx_dead = cpumask_first_and(hctx->cpumask, cpu_online_mask) >=
                nr_cpu_ids;
 
-       spin_lock(&ctx->lock);
-       if (!list_empty(&ctx->rq_lists[type])) {
-               list_splice_init(&ctx->rq_lists[type], &tmp);
-               blk_mq_hctx_clear_pending(hctx, ctx);
+       e = hctx->queue->elevator;
+       if (!e) {
+               spin_lock(&ctx->lock);
+               if (!list_empty(&ctx->rq_lists[type])) {
+                       list_splice_init(&ctx->rq_lists[type], &tmp);
+                       blk_mq_hctx_clear_pending(hctx, ctx);
+               }
+               spin_unlock(&ctx->lock);
+       } else if (hctx_dead) {
+               LIST_HEAD(sched_tmp);
+
+               while ((rq = e->type->ops.dispatch_request(hctx))) {
+                       if (rq->mq_hctx != hctx)
+                               list_add(&rq->queuelist, &sched_tmp);
+                       else
+                               list_add(&rq->queuelist, &tmp);
+               }
+
+               while (!list_empty(&sched_tmp)) {
+                       rq = list_entry(sched_tmp.next, struct request,
+                                       queuelist);
+                       list_del_init(&rq->queuelist);
+                       blk_mq_sched_insert_request(rq, true, true, true);
+               }
        }
-       spin_unlock(&ctx->lock);
 
        if (!hctx_dead) {
                if (list_empty(&tmp))
-- 
2.20.1

Reply via email to