Register DAX from the HMEM path only after determining that CXL owns Soft Reserved range. This avoids onlining memory under CXL before ownership is finalized and prevents failed teardown when HMEM must reclaim the range.
Introduce cxl_register_dax() to walk overlapping CXL regions and register DAX from CXL only when cxl_regions_fully_map() confirms full coverage of the span. If CXL does not own the span, skip cxl_dax setup and allow HMEM to register DAX and online memory. With probe time DAX creation already suppressed in the previous patch, this change ensures that only the single owner (CXL or HMEM) performs DAX/KMEM setup. Signed-off-by: Smita Koralahalli <[email protected]> --- drivers/cxl/core/region.c | 42 +++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxl.h | 5 +++++ drivers/dax/hmem/hmem.c | 5 +++-- 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index c17cd8706b9d..38e7ec6a087b 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -3784,6 +3784,48 @@ struct cxl_range_ctx { bool found; }; +static void cxl_region_enable_dax(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + int rc; + + if (walk_iomem_res_desc(IORES_DESC_NONE, + IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY, + p->res->start, p->res->end, cxlr, + is_system_ram) > 0) + return; + + rc = devm_cxl_add_dax_region(cxlr); + if (rc) + dev_warn(&cxlr->dev, "failed to add DAX for %s: %d\n", + dev_name(&cxlr->dev), rc); +} + +static int cxl_register_dax_cb(struct device *dev, void *data) +{ + struct cxl_range_ctx *ctx = data; + struct cxl_region *cxlr; + + cxlr = cxlr_overlapping_range(dev, ctx->start, ctx->end); + if (!cxlr) + return 0; + + if (cxlr->mode != CXL_PARTMODE_RAM) + return 0; + + cxl_region_enable_dax(cxlr); + + return 0; +} + +void cxl_register_dax(resource_size_t start, resource_size_t end) +{ + struct cxl_range_ctx ctx = { .start = start, .end = end }; + + bus_for_each_dev(&cxl_bus_type, NULL, &ctx, cxl_register_dax_cb); +} +EXPORT_SYMBOL_GPL(cxl_register_dax); + static int cxl_region_map_cb(struct device *dev, void *data) { struct cxl_range_ctx *ctx = data; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 324220596890..414ddf6c35d7 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -879,6 +879,7 @@ int cxl_add_to_region(struct cxl_endpoint_decoder *cxled); struct cxl_dax_region *to_cxl_dax_region(struct device *dev); u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa); bool cxl_regions_fully_map(resource_size_t start, resource_size_t end); +void cxl_register_dax(resource_size_t start, resource_size_t end); #else static inline bool is_cxl_pmem_region(struct device *dev) { @@ -906,6 +907,10 @@ static inline bool cxl_regions_fully_map(resource_size_t start, { return false; } +static inline void cxl_register_dax(resource_size_t start, + resource_size_t end) +{ +} #endif void cxl_endpoint_parse_cdat(struct cxl_port *port); diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c index db4c46337ac3..b9312e0f2e62 100644 --- a/drivers/dax/hmem/hmem.c +++ b/drivers/dax/hmem/hmem.c @@ -155,9 +155,10 @@ static int handle_deferred_cxl(struct device *host, int target_nid, if (region_intersects(res->start, resource_size(res), IORESOURCE_MEM, IORES_DESC_CXL) != REGION_DISJOINT) { - if (cxl_regions_fully_map(res->start, res->end)) + if (cxl_regions_fully_map(res->start, res->end)) { dax_cxl_mode = DAX_CXL_MODE_DROP; - else + cxl_register_dax(res->start, res->end); + } else dax_cxl_mode = DAX_CXL_MODE_REGISTER; hmem_register_device(host, target_nid, res); -- 2.17.1

