This function will be used in the patch "Make blk_get_request() block for
non-PM requests while suspended".

Signed-off-by: Bart Van Assche <bvanass...@acm.org>
Cc: Christoph Hellwig <h...@lst.de>
Cc: Ming Lei <ming....@redhat.com>
Cc: Jianchao Wang <jianchao.w.w...@oracle.com>
Cc: Hannes Reinecke <h...@suse.com>
Cc: Johannes Thumshirn <jthumsh...@suse.de>
Cc: Alan Stern <st...@rowland.harvard.edu>
---
 block/blk-mq-tag.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
 block/blk-mq-tag.h |  2 ++
 2 files changed, 46 insertions(+)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index cb5db0c3cc32..f95b41b5f07a 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -374,6 +374,50 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, 
busy_iter_fn *fn,
        rcu_read_unlock();
 }
 
+/*
+ * Call @fn(rq, @priv, reserved) for each request associated with request
+ * queue @q or any queue that it shares tags with and that has been assigned a
+ * tag. 'reserved' indicates whether or not 'rq' is a reserved request. In
+ * contrast to blk_mq_queue_tag_busy_iter(), if an I/O scheduler has been
+ * associated with @q, this function also iterates over requests that have
+ * been assigned a scheduler tag but that have not yet been assigned a driver
+ * tag.
+ */
+void blk_mq_queue_rq_iter(struct request_queue *q, busy_iter_fn *fn, void 
*priv)
+{
+       struct blk_mq_hw_ctx *hctx;
+       int i;
+
+       /*
+        * __blk_mq_update_nr_hw_queues() will update nr_hw_queues and
+        * queue_hw_ctx after having frozen the request queue. So we can use
+        * q_usage_counter to avoid a race with that
+        * function. __blk_mq_update_nr_hw_queues() uses synchronize_rcu() to
+        * ensure that this function leaves the critical section below.
+        */
+       rcu_read_lock();
+       if (percpu_ref_is_zero(&q->q_usage_counter)) {
+               rcu_read_unlock();
+               return;
+       }
+
+       queue_for_each_hw_ctx(q, hctx, i) {
+               struct blk_mq_tags *tags = hctx->sched_tags ? : hctx->tags;
+
+               /*
+                * If no software queues are currently mapped to this
+                * hardware queue, there's nothing to check
+                */
+               if (!blk_mq_hw_queue_mapped(hctx))
+                       continue;
+
+               if (tags->nr_reserved_tags)
+                       bt_for_each(hctx, &tags->breserved_tags, fn, priv, 
true);
+               bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
+       }
+       rcu_read_unlock();
+}
+
 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
                    bool round_robin, int node)
 {
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 61deab0b5a5a..25e62997ed6c 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -35,6 +35,8 @@ extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                void *priv);
+void blk_mq_queue_rq_iter(struct request_queue *q, busy_iter_fn *fn,
+                         void *priv);
 
 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
                                                 struct blk_mq_hw_ctx *hctx)
-- 
2.19.0.397.gdd90340f6a-goog

Reply via email to