In the following patch, we will use blk_mq_try_issue_directly() for DM
to return the dispatch result, and DM need this informatin to improve
IO merge.

Signed-off-by: Ming Lei <[email protected]>
---
 block/blk-mq.c | 27 +++++++++++++++++----------
 1 file changed, 17 insertions(+), 10 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index fe82d7a47b35..fd4fb6316ea1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1620,13 +1620,14 @@ static bool __blk_mq_issue_req(struct blk_mq_hw_ctx 
*hctx,
        return false;
 }
 
-static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
-                                       struct request *rq,
-                                       blk_qc_t *cookie, bool may_sleep)
+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                               struct request *rq,
+                                               blk_qc_t *cookie,
+                                               bool may_sleep)
 {
        struct request_queue *q = rq->q;
        blk_qc_t new_cookie;
-       blk_status_t ret;
+       blk_status_t ret = BLK_STS_OK;
        bool run_queue = true;
 
        /* RCU or SRCU read lock is needed before checking quiesced flag */
@@ -1649,26 +1650,30 @@ static void __blk_mq_try_issue_directly(struct 
blk_mq_hw_ctx *hctx,
        switch (ret) {
        case BLK_STS_OK:
                *cookie = new_cookie;
-               return;
+               return ret;
        case BLK_STS_RESOURCE:
                __blk_mq_requeue_request(rq);
                goto insert;
        default:
                *cookie = BLK_QC_T_NONE;
                blk_mq_end_request(rq, ret);
-               return;
+               return ret;
        }
 
 insert:
        blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
+       return ret;
 }
 
-static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
-               struct request *rq, blk_qc_t *cookie)
+static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                             struct request *rq,
+                                             blk_qc_t *cookie)
 {
+       blk_status_t ret;
+
        if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
                rcu_read_lock();
-               __blk_mq_try_issue_directly(hctx, rq, cookie, false);
+               ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
                rcu_read_unlock();
        } else {
                unsigned int srcu_idx;
@@ -1676,9 +1681,11 @@ static void blk_mq_try_issue_directly(struct 
blk_mq_hw_ctx *hctx,
                might_sleep();
 
                srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
-               __blk_mq_try_issue_directly(hctx, rq, cookie, true);
+               ret = __blk_mq_try_issue_directly(hctx, rq, cookie, true);
                srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
        }
+
+       return ret;
 }
 
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
-- 
2.9.5

Reply via email to