If REQ_FAILFAST_TRANSPORT is set it means the driver should not retry
IO that completed with transport errors. REQ_FAILFAST_TRANSPORT is
set by multipathing software (e.g. dm-multipath) before it issues IO.

Update NVMe to allow failover of requests marked with either
REQ_NVME_MPATH or REQ_FAILFAST_TRANSPORT. This allows such requests
to be given a disposition of either FAILOVER or FAILUP respectively.
FAILUP handling ensures a retryable error is returned up from NVMe.

Introduce nvme_failup_req() for use in nvme_complete_rq() if
nvme_decide_disposition() returns FAILUP. nvme_failup_req() ensures
the request is completed with a retryable IO error when appropriate.

Signed-off-by: Mike Snitzer <[email protected]>
---
 drivers/nvme/host/core.c | 28 ++++++++++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 4134cf3c7e48..605ffba6835f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -299,6 +299,7 @@ enum nvme_disposition {
        COMPLETE,
        RETRY,
        FAILOVER,
+       FAILUP,
 };
 
 static inline enum nvme_disposition nvme_decide_disposition(struct request 
*req)
@@ -318,10 +319,11 @@ static inline enum nvme_disposition 
nvme_decide_disposition(struct request *req)
            nvme_req(req)->retries >= nvme_max_retries)
                return COMPLETE;
 
-       if (req->cmd_flags & REQ_NVME_MPATH) {
+       if (req->cmd_flags & (REQ_NVME_MPATH | REQ_FAILFAST_TRANSPORT)) {
                if (nvme_is_path_error(nvme_req(req)->status) ||
                    blk_queue_dying(req->q))
-                       return FAILOVER;
+                       return (req->cmd_flags & REQ_NVME_MPATH) ?
+                               FAILOVER : FAILUP;
        } else {
                if (blk_queue_dying(req->q))
                        return COMPLETE;
@@ -343,6 +345,25 @@ static inline void nvme_end_req(struct request *req)
        blk_mq_end_request(req, status);
 }
 
+static void nvme_failup_req(struct request *req)
+{
+       blk_status_t status = nvme_error_status(nvme_req(req)->status);
+
+       /* Ensure a retryable path error is returned */
+       if (WARN_ON_ONCE(!blk_path_error(status))) {
+               /*
+                * If here, nvme_is_path_error() returned true.
+                * So nvme_error_status() translation needs updating
+                * relative to blk_path_error(), or vice versa.
+                */
+               pr_debug("Request meant for failover but blk_status_t 
(errno=%d) was not retryable.\n",
+                        blk_status_to_errno(status));
+               nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+       }
+
+       nvme_end_req(req);
+}
+
 void nvme_complete_rq(struct request *req)
 {
        trace_nvme_complete_rq(req);
@@ -361,6 +382,9 @@ void nvme_complete_rq(struct request *req)
        case FAILOVER:
                nvme_failover_req(req);
                return;
+       case FAILUP:
+               nvme_failup_req(req);
+               return;
        }
 }
 EXPORT_SYMBOL_GPL(nvme_complete_rq);
-- 
2.15.0

--
dm-devel mailing list
[email protected]
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to