Move the pmem region driver logic from region.c into pmem_region.c.

No functional changes.

Signed-off-by: Gregory Price <[email protected]>
---
 drivers/cxl/core/Makefile      |   1 +
 drivers/cxl/core/core.h        |   1 +
 drivers/cxl/core/pmem_region.c | 191 +++++++++++++++++++++++++++++++++
 drivers/cxl/core/region.c      | 184 -------------------------------
 4 files changed, 193 insertions(+), 184 deletions(-)
 create mode 100644 drivers/cxl/core/pmem_region.c

diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 5ad8fef210b5..23269c81fd44 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -17,6 +17,7 @@ cxl_core-y += cdat.o
 cxl_core-y += ras.o
 cxl_core-$(CONFIG_TRACING) += trace.o
 cxl_core-$(CONFIG_CXL_REGION) += region.o
+cxl_core-$(CONFIG_CXL_REGION) += pmem_region.o
 cxl_core-$(CONFIG_CXL_MCE) += mce.o
 cxl_core-$(CONFIG_CXL_FEATURES) += features.o
 cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += edac.o
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index dd987ef2def5..26991de12d76 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -43,6 +43,7 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port);
 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
 u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
                   u64 dpa);
+int devm_cxl_add_pmem_region(struct cxl_region *cxlr);
 
 #else
 static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr,
diff --git a/drivers/cxl/core/pmem_region.c b/drivers/cxl/core/pmem_region.c
new file mode 100644
index 000000000000..81b66e548bb5
--- /dev/null
+++ b/drivers/cxl/core/pmem_region.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+static void cxl_pmem_region_release(struct device *dev)
+{
+       struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+       int i;
+
+       for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+               struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
+
+               put_device(&cxlmd->dev);
+       }
+
+       kfree(cxlr_pmem);
+}
+
+static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
+       &cxl_base_attribute_group,
+       NULL,
+};
+
+const struct device_type cxl_pmem_region_type = {
+       .name = "cxl_pmem_region",
+       .release = cxl_pmem_region_release,
+       .groups = cxl_pmem_region_attribute_groups,
+};
+bool is_cxl_pmem_region(struct device *dev)
+{
+       return dev->type == &cxl_pmem_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, "CXL");
+
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+       if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
+                               "not a cxl_pmem_region device\n"))
+               return NULL;
+       return container_of(dev, struct cxl_pmem_region, dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
+static struct lock_class_key cxl_pmem_region_key;
+
+static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
+{
+       struct cxl_region_params *p = &cxlr->params;
+       struct cxl_nvdimm_bridge *cxl_nvb;
+       struct device *dev;
+       int i;
+
+       guard(rwsem_read)(&cxl_rwsem.region);
+       if (p->state != CXL_CONFIG_COMMIT)
+               return -ENXIO;
+
+       struct cxl_pmem_region *cxlr_pmem __free(kfree) =
+               kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), 
GFP_KERNEL);
+       if (!cxlr_pmem)
+               return -ENOMEM;
+
+       cxlr_pmem->hpa_range.start = p->res->start;
+       cxlr_pmem->hpa_range.end = p->res->end;
+
+       /* Snapshot the region configuration underneath the cxl_rwsem.region */
+       cxlr_pmem->nr_mappings = p->nr_targets;
+       for (i = 0; i < p->nr_targets; i++) {
+               struct cxl_endpoint_decoder *cxled = p->targets[i];
+               struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+               struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+
+               /*
+                * Regions never span CXL root devices, so by definition the
+                * bridge for one device is the same for all.
+                */
+               if (i == 0) {
+                       cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
+                       if (!cxl_nvb)
+                               return -ENODEV;
+                       cxlr->cxl_nvb = cxl_nvb;
+               }
+               m->cxlmd = cxlmd;
+               get_device(&cxlmd->dev);
+               m->start = cxled->dpa_res->start;
+               m->size = resource_size(cxled->dpa_res);
+               m->position = i;
+       }
+
+       dev = &cxlr_pmem->dev;
+       device_initialize(dev);
+       lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
+       device_set_pm_not_required(dev);
+       dev->parent = &cxlr->dev;
+       dev->bus = &cxl_bus_type;
+       dev->type = &cxl_pmem_region_type;
+       cxlr_pmem->cxlr = cxlr;
+       cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
+
+       return 0;
+}
+
+static void cxlr_pmem_unregister(void *_cxlr_pmem)
+{
+       struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
+       struct cxl_region *cxlr = cxlr_pmem->cxlr;
+       struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
+
+       /*
+        * Either the bridge is in ->remove() context under the device_lock(),
+        * or cxlr_release_nvdimm() is cancelling the bridge's release action
+        * for @cxlr_pmem and doing it itself (while manually holding the bridge
+        * lock).
+        */
+       device_lock_assert(&cxl_nvb->dev);
+       cxlr->cxlr_pmem = NULL;
+       cxlr_pmem->cxlr = NULL;
+       device_unregister(&cxlr_pmem->dev);
+}
+
+static void cxlr_release_nvdimm(void *_cxlr)
+{
+       struct cxl_region *cxlr = _cxlr;
+       struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
+
+       scoped_guard(device, &cxl_nvb->dev) {
+               if (cxlr->cxlr_pmem)
+                       devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
+                                       cxlr->cxlr_pmem);
+       }
+       cxlr->cxl_nvb = NULL;
+       put_device(&cxl_nvb->dev);
+}
+
+/**
+ * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
+ * @cxlr: parent CXL region for this pmem region bridge device
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
+{
+       struct cxl_pmem_region *cxlr_pmem;
+       struct cxl_nvdimm_bridge *cxl_nvb;
+       struct device *dev;
+       int rc;
+
+       rc = cxl_pmem_region_alloc(cxlr);
+       if (rc)
+               return rc;
+       cxlr_pmem = cxlr->cxlr_pmem;
+       cxl_nvb = cxlr->cxl_nvb;
+
+       dev = &cxlr_pmem->dev;
+       rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
+       if (rc)
+               goto err;
+
+       rc = device_add(dev);
+       if (rc)
+               goto err;
+
+       dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
+                       dev_name(dev));
+
+       scoped_guard(device, &cxl_nvb->dev) {
+               if (cxl_nvb->dev.driver)
+                       rc = devm_add_action_or_reset(&cxl_nvb->dev,
+                                       cxlr_pmem_unregister,
+                                       cxlr_pmem);
+               else
+                       rc = -ENXIO;
+       }
+
+       if (rc)
+               goto err_bridge;
+
+       /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
+       return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
+
+err:
+       put_device(dev);
+err_bridge:
+       put_device(&cxl_nvb->dev);
+       cxlr->cxl_nvb = NULL;
+       return rc;
+}
+
+
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e4097c464ed3..fc56f8f03805 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2747,46 +2747,6 @@ static ssize_t delete_region_store(struct device *dev,
 }
 DEVICE_ATTR_WO(delete_region);
 
-static void cxl_pmem_region_release(struct device *dev)
-{
-       struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
-       int i;
-
-       for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
-               struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
-
-               put_device(&cxlmd->dev);
-       }
-
-       kfree(cxlr_pmem);
-}
-
-static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
-       &cxl_base_attribute_group,
-       NULL,
-};
-
-const struct device_type cxl_pmem_region_type = {
-       .name = "cxl_pmem_region",
-       .release = cxl_pmem_region_release,
-       .groups = cxl_pmem_region_attribute_groups,
-};
-
-bool is_cxl_pmem_region(struct device *dev)
-{
-       return dev->type == &cxl_pmem_region_type;
-}
-EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, "CXL");
-
-struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
-{
-       if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
-                         "not a cxl_pmem_region device\n"))
-               return NULL;
-       return container_of(dev, struct cxl_pmem_region, dev);
-}
-EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
-
 struct cxl_poison_context {
        struct cxl_port *port;
        int part;
@@ -3236,64 +3196,6 @@ static int region_offset_to_dpa_result(struct cxl_region 
*cxlr, u64 offset,
        return -ENXIO;
 }
 
-static struct lock_class_key cxl_pmem_region_key;
-
-static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
-{
-       struct cxl_region_params *p = &cxlr->params;
-       struct cxl_nvdimm_bridge *cxl_nvb;
-       struct device *dev;
-       int i;
-
-       guard(rwsem_read)(&cxl_rwsem.region);
-       if (p->state != CXL_CONFIG_COMMIT)
-               return -ENXIO;
-
-       struct cxl_pmem_region *cxlr_pmem __free(kfree) =
-               kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), 
GFP_KERNEL);
-       if (!cxlr_pmem)
-               return -ENOMEM;
-
-       cxlr_pmem->hpa_range.start = p->res->start;
-       cxlr_pmem->hpa_range.end = p->res->end;
-
-       /* Snapshot the region configuration underneath the cxl_rwsem.region */
-       cxlr_pmem->nr_mappings = p->nr_targets;
-       for (i = 0; i < p->nr_targets; i++) {
-               struct cxl_endpoint_decoder *cxled = p->targets[i];
-               struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
-               struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
-
-               /*
-                * Regions never span CXL root devices, so by definition the
-                * bridge for one device is the same for all.
-                */
-               if (i == 0) {
-                       cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
-                       if (!cxl_nvb)
-                               return -ENODEV;
-                       cxlr->cxl_nvb = cxl_nvb;
-               }
-               m->cxlmd = cxlmd;
-               get_device(&cxlmd->dev);
-               m->start = cxled->dpa_res->start;
-               m->size = resource_size(cxled->dpa_res);
-               m->position = i;
-       }
-
-       dev = &cxlr_pmem->dev;
-       device_initialize(dev);
-       lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
-       device_set_pm_not_required(dev);
-       dev->parent = &cxlr->dev;
-       dev->bus = &cxl_bus_type;
-       dev->type = &cxl_pmem_region_type;
-       cxlr_pmem->cxlr = cxlr;
-       cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
-
-       return 0;
-}
-
 static void cxl_dax_region_release(struct device *dev)
 {
        struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
@@ -3357,92 +3259,6 @@ static struct cxl_dax_region 
*cxl_dax_region_alloc(struct cxl_region *cxlr)
        return cxlr_dax;
 }
 
-static void cxlr_pmem_unregister(void *_cxlr_pmem)
-{
-       struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
-       struct cxl_region *cxlr = cxlr_pmem->cxlr;
-       struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
-
-       /*
-        * Either the bridge is in ->remove() context under the device_lock(),
-        * or cxlr_release_nvdimm() is cancelling the bridge's release action
-        * for @cxlr_pmem and doing it itself (while manually holding the bridge
-        * lock).
-        */
-       device_lock_assert(&cxl_nvb->dev);
-       cxlr->cxlr_pmem = NULL;
-       cxlr_pmem->cxlr = NULL;
-       device_unregister(&cxlr_pmem->dev);
-}
-
-static void cxlr_release_nvdimm(void *_cxlr)
-{
-       struct cxl_region *cxlr = _cxlr;
-       struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
-
-       scoped_guard(device, &cxl_nvb->dev) {
-               if (cxlr->cxlr_pmem)
-                       devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
-                                           cxlr->cxlr_pmem);
-       }
-       cxlr->cxl_nvb = NULL;
-       put_device(&cxl_nvb->dev);
-}
-
-/**
- * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
- * @cxlr: parent CXL region for this pmem region bridge device
- *
- * Return: 0 on success negative error code on failure.
- */
-static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
-{
-       struct cxl_pmem_region *cxlr_pmem;
-       struct cxl_nvdimm_bridge *cxl_nvb;
-       struct device *dev;
-       int rc;
-
-       rc = cxl_pmem_region_alloc(cxlr);
-       if (rc)
-               return rc;
-       cxlr_pmem = cxlr->cxlr_pmem;
-       cxl_nvb = cxlr->cxl_nvb;
-
-       dev = &cxlr_pmem->dev;
-       rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
-       if (rc)
-               goto err;
-
-       rc = device_add(dev);
-       if (rc)
-               goto err;
-
-       dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
-               dev_name(dev));
-
-       scoped_guard(device, &cxl_nvb->dev) {
-               if (cxl_nvb->dev.driver)
-                       rc = devm_add_action_or_reset(&cxl_nvb->dev,
-                                                     cxlr_pmem_unregister,
-                                                     cxlr_pmem);
-               else
-                       rc = -ENXIO;
-       }
-
-       if (rc)
-               goto err_bridge;
-
-       /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
-       return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
-
-err:
-       put_device(dev);
-err_bridge:
-       put_device(&cxl_nvb->dev);
-       cxlr->cxl_nvb = NULL;
-       return rc;
-}
-
 static void cxlr_dax_unregister(void *_cxlr_dax)
 {
        struct cxl_dax_region *cxlr_dax = _cxlr_dax;
-- 
2.52.0


Reply via email to