No need for I/O scheduling during probe, and this cleans up the
code nicely by converging on the switch path.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Damien Le Moal <[email protected]>
Tested-by: Damien Le Moal <[email protected]>
---
 block/blk-mq.c    |  9 ---------
 block/blk-sysfs.c | 25 ++++++++++++++-----------
 block/elevator.c  | 21 ++++++---------------
 3 files changed, 20 insertions(+), 35 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6332940ca118..cc42bdf54eb0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2569,15 +2569,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct 
blk_mq_tag_set *set,
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
        blk_mq_add_queue_tag_set(set, q);
        blk_mq_map_swqueue(q);
-
-       if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
-               int ret;
-
-               ret = elevator_init_mq(q);
-               if (ret)
-                       return ERR_PTR(ret);
-       }
-
        return q;
 
 err_hctxs:
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 94987b1f69e1..f3b24c6c3dac 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -907,21 +907,24 @@ int blk_register_queue(struct gendisk *disk)
 
        blk_throtl_register_queue(q);
 
-       if (q->request_fn || (q->mq_ops && q->elevator)) {
+       if (q->request_fn)
                ret = elv_register_queue(q);
-               if (ret) {
-                       mutex_unlock(&q->sysfs_lock);
-                       kobject_uevent(&q->kobj, KOBJ_REMOVE);
-                       kobject_del(&q->kobj);
-                       blk_trace_remove_sysfs(dev);
-                       kobject_put(&dev->kobj);
-                       return ret;
-               }
-       }
-       ret = 0;
+       else if (q->mq_ops && q->elevator)
+               ret = elevator_init_mq(q);
+       else
+               ret = 0;
+       if (ret)
+               goto out_cleanup_queue;
 unlock:
        mutex_unlock(&q->sysfs_lock);
        return ret;
+out_cleanup_queue:
+       mutex_unlock(&q->sysfs_lock);
+       kobject_uevent(&q->kobj, KOBJ_REMOVE);
+       kobject_del(&q->kobj);
+       blk_trace_remove_sysfs(dev);
+       kobject_put(&dev->kobj);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(blk_register_queue);
 
diff --git a/block/elevator.c b/block/elevator.c
index fa828b5bfd4b..df87468c6dbe 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -981,28 +981,19 @@ static int elevator_switch_mq(struct request_queue *q,
 int elevator_init_mq(struct request_queue *q)
 {
        struct elevator_type *e;
-       int err = 0;
+       int err;
 
-       if (q->nr_hw_queues != 1)
-               return 0;
+       lockdep_assert_held(&q->sysfs_lock);
 
-       /*
-        * q->sysfs_lock must be held to provide mutual exclusion between
-        * elevator_switch() and here.
-        */
-       mutex_lock(&q->sysfs_lock);
-       if (unlikely(q->elevator))
-               goto out_unlock;
+       if (q->nr_hw_queues != 1 || (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
+               return 0;
 
        e = elevator_get(q, "mq-deadline", false);
        if (!e)
-               goto out_unlock;
-
-       err = blk_mq_init_sched(q, e);
+               return 0;
+       err = elevator_switch_mq(q, e);
        if (err)
                elevator_put(e);
-out_unlock:
-       mutex_unlock(&q->sysfs_lock);
        return err;
 }
 
-- 
2.17.0

Reply via email to