Most of blk-mq drivers depend on managed IRQ's auto-affinity to setup
up queue mapping. Thomas mentioned the following point[1]:

"
 That was the constraint of managed interrupts from the very beginning:

  The driver/subsystem has to quiesce the interrupt line and the associated
  queue _before_ it gets shutdown in CPU unplug and not fiddle with it
  until it's restarted by the core when the CPU is plugged in again.
"

However, current blk-mq implementation doesn't quiesce hw queue before
the last CPU in the hctx is shutdown. Even worse, CPUHP_BLK_MQ_DEAD is
one cpuhp state handled after the CPU is down, so there isn't any chance
to quiesce hctx for blk-mq wrt. CPU hotplug.

Add new cpuhp state of CPUHP_AP_BLK_MQ_ONLINE for blk-mq to stop queues
and wait for completion of in-flight requests.

[1] 
https://lore.kernel.org/linux-block/[email protected]/

Cc: Bart Van Assche <[email protected]>
Cc: Hannes Reinecke <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Keith Busch <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
 block/blk-mq-tag.c         |  2 +-
 block/blk-mq-tag.h         |  2 ++
 block/blk-mq.c             | 65 ++++++++++++++++++++++++++++++++++++++
 include/linux/blk-mq.h     |  1 +
 include/linux/cpuhotplug.h |  1 +
 5 files changed, 70 insertions(+), 1 deletion(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 008388e82b5c..31828b82552b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -325,7 +325,7 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, 
struct sbitmap_queue *bt,
  *             true to continue iterating tags, false to stop.
  * @priv:      Will be passed as second argument to @fn.
  */
-static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
                busy_tag_iter_fn *fn, void *priv)
 {
        if (tags->nr_reserved_tags)
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 61deab0b5a5a..321fd6f440e6 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -35,6 +35,8 @@ extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                void *priv);
+void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+               busy_tag_iter_fn *fn, void *priv);
 
 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
                                                 struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ec791156e9cc..d991c122abf2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2225,6 +2225,61 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct 
blk_mq_tags *tags,
        return -ENOMEM;
 }
 
+static bool blk_mq_count_inflight_rq(struct request *rq, void *data,
+                                    bool reserved)
+{
+       unsigned *count = data;
+
+       if ((blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT))
+               (*count)++;
+
+       return true;
+}
+
+static unsigned blk_mq_tags_inflight_rqs(struct blk_mq_tags *tags)
+{
+       unsigned count = 0;
+
+       blk_mq_all_tag_busy_iter(tags, blk_mq_count_inflight_rq, &count);
+
+       return count;
+}
+
+static void blk_mq_drain_inflight_rqs(struct blk_mq_hw_ctx *hctx)
+{
+       while (1) {
+               if (!blk_mq_tags_inflight_rqs(hctx->tags))
+                       break;
+               msleep(5);
+       }
+}
+
+static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
+{
+       struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
+                       struct blk_mq_hw_ctx, cpuhp_online);
+       unsigned prev_cpu = -1;
+
+       while (true) {
+               unsigned next_cpu = cpumask_next_and(prev_cpu, hctx->cpumask,
+                               cpu_online_mask);
+
+               if (next_cpu >= nr_cpu_ids)
+                       break;
+
+               /* return if there is other online CPU on this hctx */
+               if (next_cpu != cpu)
+                       return 0;
+
+               prev_cpu = next_cpu;
+       }
+
+       set_bit(BLK_MQ_S_INTERNAL_STOPPED, &hctx->state);
+       blk_mq_drain_inflight_rqs(hctx);
+
+       return 0;
+}
+
 /*
  * 'cpu' is going away. splice any existing rq_list entries from this
  * software queue to the hw queue dispatch list, and ensure that it
@@ -2241,6 +2296,8 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, 
struct hlist_node *node)
        ctx = __blk_mq_get_ctx(hctx->queue, cpu);
        type = hctx->type;
 
+       clear_bit(BLK_MQ_S_INTERNAL_STOPPED, &hctx->state);
+
        spin_lock(&ctx->lock);
        if (!list_empty(&ctx->rq_lists[type])) {
                list_splice_init(&ctx->rq_lists[type], &tmp);
@@ -2261,6 +2318,9 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, 
struct hlist_node *node)
 
 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
 {
+       if (!(hctx->flags & BLK_MQ_F_NO_MANAGED_IRQ))
+               cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
+                                                   &hctx->cpuhp_online);
        cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
                                            &hctx->cpuhp_dead);
 }
@@ -2320,6 +2380,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
 {
        hctx->queue_num = hctx_idx;
 
+       if (!(hctx->flags & BLK_MQ_F_NO_MANAGED_IRQ))
+               cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
+                               &hctx->cpuhp_online);
        cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
 
        hctx->tags = set->tags[hctx_idx];
@@ -3547,6 +3610,8 @@ static int __init blk_mq_init(void)
 {
        cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
                                blk_mq_hctx_notify_dead);
+       cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
+                               NULL, blk_mq_hctx_notify_online);
        return 0;
 }
 subsys_initcall(blk_mq_init);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ee60885ec855..a345f2cf920d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -58,6 +58,7 @@ struct blk_mq_hw_ctx {
 
        atomic_t                nr_active;
 
+       struct hlist_node       cpuhp_online;
        struct hlist_node       cpuhp_dead;
        struct kobject          kobj;
 
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 068793a619ca..bb80f52040cb 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -147,6 +147,7 @@ enum cpuhp_state {
        CPUHP_AP_SMPBOOT_THREADS,
        CPUHP_AP_X86_VDSO_VMA_ONLINE,
        CPUHP_AP_IRQ_AFFINITY_ONLINE,
+       CPUHP_AP_BLK_MQ_ONLINE,
        CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
        CPUHP_AP_X86_INTEL_EPB_ONLINE,
        CPUHP_AP_PERF_ONLINE,
-- 
2.20.1

Reply via email to