Since the next patch in this series will use RCU to iterate over
tag_list, make this safe. Add lockdep_assert_held() statements
in functions that iterate over tag_list to make clear that using
list_for_each_entry() instead of list_for_each_entry_rcu() is
fine in these functions.
Signed-off-by: Bart Van Assche
Cc: Christoph Hellwig
Cc: Hannes Reinecke
---
block/blk-mq.c | 11 +--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f7cd3208bcdf..c26464f9649a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2076,6 +2076,8 @@ static void blk_mq_update_tag_set_depth(struct
blk_mq_tag_set *set, bool shared)
{
struct request_queue *q;
+ lockdep_assert_held(>tag_list_lock);
+
list_for_each_entry(q, >tag_list, tag_set_list) {
blk_mq_freeze_queue(q);
queue_set_hctx_shared(q, shared);
@@ -2088,7 +2090,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue
*q)
struct blk_mq_tag_set *set = q->tag_set;
mutex_lock(>tag_list_lock);
- list_del_init(>tag_set_list);
+ list_del_rcu(>tag_set_list);
+ INIT_LIST_HEAD(>tag_set_list);
if (list_is_singular(>tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED;
@@ -2096,6 +2099,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue
*q)
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(>tag_list_lock);
+
+ synchronize_rcu();
}
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@ -2113,7 +2118,7 @@ static void blk_mq_add_queue_tag_set(struct
blk_mq_tag_set *set,
}
if (set->flags & BLK_MQ_F_TAG_SHARED)
queue_set_hctx_shared(q, true);
- list_add_tail(>tag_set_list, >tag_list);
+ list_add_tail_rcu(>tag_set_list, >tag_list);
mutex_unlock(>tag_list_lock);
}
@@ -2601,6 +2606,8 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set
*set, int nr_hw_queues)
{
struct request_queue *q;
+ lockdep_assert_held(>tag_list_lock);
+
if (nr_hw_queues > nr_cpu_ids)
nr_hw_queues = nr_cpu_ids;
if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
--
2.12.0