BZ: 1948690
Upstream Status: RHEL-only

Signed-off-by: Mike Snitzer <snit...@redhat.com>

rhel-8.git commit b904f4b8e0f90613bf1b2b9d9ccad3c015741daf
Author: Mike Snitzer <snit...@redhat.com>
Date:   Tue Aug 25 21:52:47 2020 -0400

    [nvme] nvme: decouple basic ANA log page re-read support from native 
multipathing
    
    Message-id: <20200825215248.2291-10-snit...@redhat.com>
    Patchwork-id: 325179
    Patchwork-instance: patchwork
    O-Subject: [RHEL8.3 PATCH 09/10] nvme: decouple basic ANA log page re-read 
support from native multipathing
    Bugzilla: 1843515
    RH-Acked-by: David Milburn <dmilb...@redhat.com>
    RH-Acked-by: Gopal Tiwari <gtiw...@redhat.com>
    RH-Acked-by: Ewan Milne <emi...@redhat.com>
    
    BZ: 1843515
    Upstream Status: RHEL-only
    
    Whether or not ANA is present is a choice of the target implementation;
    the host (and whether it supports multipathing) has _zero_ influence on
    this.  If the target declares a path as 'inaccessible' the path _is_
    inaccessible to the host.  As such, ANA support should be functional
    even if native multipathing is not.
    
    Introduce ability to always re-read ANA log page as required due to ANA
    error and make current ANA state available via sysfs -- even if native
    multipathing is disabled on the host (e.g. nvme_core.multipath=N).
    
    This affords userspace access to the current ANA state independent of
    which layer might be doing multipathing.  It also allows multipath-tools
    to rely on the NVMe driver for ANA support while dm-multipath takes care
    of multipathing.
    
    And as always, if embedded NVMe users do not want any performance
    overhead associated with ANA or native NVMe multipathing they can
    disable CONFIG_NVME_MULTIPATH.
    
    Signed-off-by: Mike Snitzer <snit...@redhat.com>
    Signed-off-by: Frantisek Hrbata <fhrb...@redhat.com>

---
 drivers/nvme/host/core.c      |    2 ++
 drivers/nvme/host/multipath.c |   23 ++++++++++++++++++-----
 drivers/nvme/host/nvme.h      |    4 ++++
 3 files changed, 24 insertions(+), 5 deletions(-)

Index: linux-rhel9/drivers/nvme/host/core.c
===================================================================
--- linux-rhel9.orig/drivers/nvme/host/core.c
+++ linux-rhel9/drivers/nvme/host/core.c
@@ -347,6 +347,8 @@ static inline void nvme_end_req_with_fai
        if (unlikely(nvme_status & NVME_SC_DNR))
                goto out;
 
+       nvme_update_ana(req);
+
        if (!blk_path_error(status)) {
                pr_debug("Request meant for failover but blk_status_t 
(errno=%d) was not retryable.\n",
                         blk_status_to_errno(status));
Index: linux-rhel9/drivers/nvme/host/multipath.c
===================================================================
--- linux-rhel9.orig/drivers/nvme/host/multipath.c
+++ linux-rhel9/drivers/nvme/host/multipath.c
@@ -65,10 +65,25 @@ void nvme_set_disk_name(char *disk_name,
        }
 }
 
+static inline void __nvme_update_ana(struct nvme_ns *ns)
+{
+       if (!ns->ctrl->ana_log_buf)
+               return;
+
+       set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+       queue_work(nvme_wq, &ns->ctrl->ana_work);
+}
+
+
+void nvme_update_ana(struct request *req)
+{
+       if (nvme_is_ana_error(nvme_req(req)->status))
+               __nvme_update_ana(req->q->queuedata);
+}
+
 void nvme_failover_req(struct request *req)
 {
        struct nvme_ns *ns = req->q->queuedata;
-       u16 status = nvme_req(req)->status & 0x7ff;
        unsigned long flags;
 
        nvme_mpath_clear_current_path(ns);
@@ -78,10 +93,8 @@ void nvme_failover_req(struct request *r
         * ready to serve this namespace.  Kick of a re-read of the ANA
         * information page, and just try any other available path for now.
         */
-       if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
-               set_bit(NVME_NS_ANA_PENDING, &ns->flags);
-               queue_work(nvme_wq, &ns->ctrl->ana_work);
-       }
+       if (nvme_is_ana_error(nvme_req(req)->status))
+               __nvme_update_ana(ns);
 
        spin_lock_irqsave(&ns->head->requeue_lock, flags);
        blk_steal_bios(&ns->head->requeue_list, req);
Index: linux-rhel9/drivers/nvme/host/nvme.h
===================================================================
--- linux-rhel9.orig/drivers/nvme/host/nvme.h
+++ linux-rhel9/drivers/nvme/host/nvme.h
@@ -664,6 +664,7 @@ void nvme_mpath_start_freeze(struct nvme
 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
                        struct nvme_ctrl *ctrl, int *flags);
 void nvme_failover_req(struct request *req);
+void nvme_update_ana(struct request *req);
 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
@@ -714,6 +715,9 @@ static inline void nvme_set_disk_name(ch
 static inline void nvme_failover_req(struct request *req)
 {
 }
+static inline void nvme_update_ana(struct request *req)
+{
+}
 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
 {
 }

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to