If we want to support async IO polling, then we have to allow
finding completions that aren't just for the one we are
looking for. Always pass in -1 to the mq_ops->poll() helper,
and have that return how many events were found in this poll
loop.

Signed-off-by: Jens Axboe <ax...@kernel.dk>
---
 block/blk-mq.c          | 69 +++++++++++++++++++++++------------------
 drivers/nvme/host/pci.c | 32 +++++++++----------
 2 files changed, 54 insertions(+), 47 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index f8c2e6544903..03b1af0151ca 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3266,9 +3266,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue 
*q,
         *  0:  use half of prev avg
         * >0:  use this specific value
         */
-       if (q->poll_nsec == -1)
-               return false;
-       else if (q->poll_nsec > 0)
+       if (q->poll_nsec > 0)
                nsecs = q->poll_nsec;
        else
                nsecs = blk_mq_poll_nsecs(q, hctx, rq);
@@ -3305,21 +3303,36 @@ static bool blk_mq_poll_hybrid_sleep(struct 
request_queue *q,
        return true;
 }
 
-static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static bool blk_mq_poll_hybrid(struct request_queue *q,
+                              struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
+{
+       struct request *rq;
+
+       if (q->poll_nsec == -1)
+               return false;
+
+       if (!blk_qc_t_is_internal(cookie))
+               rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
+       else {
+               rq = blk_mq_tag_to_rq(hctx->sched_tags, 
blk_qc_t_to_tag(cookie));
+               /*
+                * With scheduling, if the request has completed, we'll
+                * get a NULL return here, as we clear the sched tag when
+                * that happens. The request still remains valid, like always,
+                * so we should be safe with just the NULL check.
+                */
+               if (!rq)
+                       return false;
+       }
+
+       return blk_mq_poll_hybrid_sleep(q, hctx, rq);
+}
+
+static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx)
 {
        struct request_queue *q = hctx->queue;
        long state;
 
-       /*
-        * If we sleep, have the caller restart the poll loop to reset
-        * the state. Like for the other success return cases, the
-        * caller is responsible for checking if the IO completed. If
-        * the IO isn't complete, we'll get called again and will go
-        * straight to the busy poll loop.
-        */
-       if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
-               return 1;
-
        hctx->poll_considered++;
 
        state = current->state;
@@ -3328,7 +3341,7 @@ static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, 
struct request *rq)
 
                hctx->poll_invoked++;
 
-               ret = q->mq_ops.poll(hctx, rq->tag);
+               ret = q->mq_ops.poll(hctx, -1U);
                if (ret > 0) {
                        hctx->poll_success++;
                        set_current_state(TASK_RUNNING);
@@ -3352,27 +3365,23 @@ static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, 
struct request *rq)
 static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
 {
        struct blk_mq_hw_ctx *hctx;
-       struct request *rq;
 
        if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
                return 0;
 
        hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
-       if (!blk_qc_t_is_internal(cookie))
-               rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
-       else {
-               rq = blk_mq_tag_to_rq(hctx->sched_tags, 
blk_qc_t_to_tag(cookie));
-               /*
-                * With scheduling, if the request has completed, we'll
-                * get a NULL return here, as we clear the sched tag when
-                * that happens. The request still remains valid, like always,
-                * so we should be safe with just the NULL check.
-                */
-               if (!rq)
-                       return 0;
-       }
 
-       return __blk_mq_poll(hctx, rq);
+       /*
+        * If we sleep, have the caller restart the poll loop to reset
+        * the state. Like for the other success return cases, the
+        * caller is responsible for checking if the IO completed. If
+        * the IO isn't complete, we'll get called again and will go
+        * straight to the busy poll loop.
+        */
+       if (blk_mq_poll_hybrid(q, hctx, cookie))
+               return 1;
+
+       return __blk_mq_poll(hctx);
 }
 
 unsigned int blk_mq_rq_cpu(struct request *rq)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index bb22ae567208..adeb8f516bf9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -995,13 +995,18 @@ static inline void nvme_handle_cqe(struct nvme_queue 
*nvmeq, u16 idx)
        nvme_end_request(req, cqe->status, cqe->result);
 }
 
-static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
+static int nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
 {
+       int nr = 0;
+
        while (start != end) {
+               nr++;
                nvme_handle_cqe(nvmeq, start);
                if (++start == nvmeq->q_depth)
                        start = 0;
        }
+
+       return nr;
 }
 
 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
@@ -1012,22 +1017,17 @@ static inline void nvme_update_cq_head(struct 
nvme_queue *nvmeq)
        }
 }
 
-static inline bool nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
-               u16 *end, int tag)
+static inline void nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
+                                  u16 *end)
 {
-       bool found = false;
-
        *start = nvmeq->cq_head;
-       while (!found && nvme_cqe_pending(nvmeq)) {
-               if (nvmeq->cqes[nvmeq->cq_head].command_id == tag)
-                       found = true;
+       while (nvme_cqe_pending(nvmeq))
                nvme_update_cq_head(nvmeq);
-       }
+
        *end = nvmeq->cq_head;
 
        if (*start != *end)
                nvme_ring_cq_doorbell(nvmeq);
-       return found;
 }
 
 static irqreturn_t nvme_irq(int irq, void *data)
@@ -1039,7 +1039,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
        spin_lock(&nvmeq->cq_lock);
        if (nvmeq->cq_head != nvmeq->last_cq_head)
                ret = IRQ_HANDLED;
-       nvme_process_cq(nvmeq, &start, &end, -1);
+       nvme_process_cq(nvmeq, &start, &end);
        nvmeq->last_cq_head = nvmeq->cq_head;
        spin_unlock(&nvmeq->cq_lock);
 
@@ -1062,7 +1062,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
 static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
 {
        u16 start, end;
-       bool found;
 
        if (!nvme_cqe_pending(nvmeq))
                return 0;
@@ -1074,14 +1073,13 @@ static int __nvme_poll(struct nvme_queue *nvmeq, 
unsigned int tag)
                local_irq_disable();
 
        spin_lock(&nvmeq->cq_lock);
-       found = nvme_process_cq(nvmeq, &start, &end, tag);
+       nvme_process_cq(nvmeq, &start, &end);
        spin_unlock(&nvmeq->cq_lock);
 
        if (!nvmeq->polled)
                local_irq_enable();
 
-       nvme_complete_cqes(nvmeq, start, end);
-       return found;
+       return nvme_complete_cqes(nvmeq, start, end);
 }
 
 static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
@@ -1414,7 +1412,7 @@ static void nvme_disable_admin_queue(struct nvme_dev 
*dev, bool shutdown)
                nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
 
        spin_lock_irq(&nvmeq->cq_lock);
-       nvme_process_cq(nvmeq, &start, &end, -1);
+       nvme_process_cq(nvmeq, &start, &end);
        spin_unlock_irq(&nvmeq->cq_lock);
 
        nvme_complete_cqes(nvmeq, start, end);
@@ -2209,7 +2207,7 @@ static void nvme_del_cq_end(struct request *req, 
blk_status_t error)
                unsigned long flags;
 
                spin_lock_irqsave(&nvmeq->cq_lock, flags);
-               nvme_process_cq(nvmeq, &start, &end, -1);
+               nvme_process_cq(nvmeq, &start, &end);
                spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
 
                nvme_complete_cqes(nvmeq, start, end);
-- 
2.17.1

Reply via email to