devm_cxl_pmem_add_region() is used to create cxl region based on region information scanned from LSA.
devm_cxl_add_region() is used to just allocate cxlr and its fields are filled later by userspace tool using device attributes (*_store()). Inspiration for devm_cxl_pmem_add_region() is taken from these device attributes (_store*) calls. It allocates cxlr and fills information parsed from LSA and calls device_add(&cxlr->dev) to initiate further region creation porbes Reviewed-by: Dave Jiang <[email protected]> Signed-off-by: Neeraj Kumar <[email protected]> --- drivers/cxl/core/region.c | 118 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 2e60e5e72551..e384eacc46ae 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2621,6 +2621,121 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, return ERR_PTR(rc); } +static ssize_t alloc_region_hpa(struct cxl_region *cxlr, u64 size) +{ + int rc; + + if (!size) + return -EINVAL; + + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem))) + return rc; + + return alloc_hpa(cxlr, size); +} + +static ssize_t alloc_region_dpa(struct cxl_endpoint_decoder *cxled, u64 size) +{ + if (!size) + return -EINVAL; + + if (!IS_ALIGNED(size, SZ_256M)) + return -EINVAL; + + return cxl_dpa_alloc(cxled, size); +} + +static struct cxl_region * +cxl_pmem_region_prep(struct cxl_root_decoder *cxlrd, int id, + struct cxl_pmem_region_params *params, + struct cxl_endpoint_decoder *cxled, + enum cxl_decoder_type type) +{ + struct cxl_region_params *p; + struct device *dev; + int rc; + + struct cxl_region *cxlr __free(put_cxl_region) = + cxl_region_alloc(cxlrd, id); + if (IS_ERR(cxlr)) + return cxlr; + + dev = &cxlr->dev; + rc = dev_set_name(dev, "region%d", id); + if (rc) + return ERR_PTR(rc); + + cxlr->mode = CXL_PARTMODE_PMEM; + cxlr->type = type; + + p = &cxlr->params; + p->uuid = params->uuid; + p->interleave_ways = params->nlabel; + p->interleave_granularity = params->ig; + + rc = alloc_region_hpa(cxlr, params->rawsize); + if (rc) + return ERR_PTR(rc); + + rc = cxl_dpa_set_part(cxled, CXL_PARTMODE_PMEM); + if (rc) + return ERR_PTR(rc); + + rc = alloc_region_dpa(cxled, params->rawsize); + if (rc) + return ERR_PTR(rc); + + /* + * TODO: Currently we have support of interleave_way == 1, where + * we can only have one region per mem device. It means mem device + * position (params->position) will always be 0. It is therefore + * attaching only one target at params->position + */ + if (params->position) + return ERR_PTR(-EOPNOTSUPP); + + rc = attach_target(cxlr, cxled, params->position, TASK_INTERRUPTIBLE); + if (rc) + return ERR_PTR(rc); + + rc = __commit(cxlr); + if (rc) + return ERR_PTR(rc); + + rc = device_add(dev); + if (rc) + return ERR_PTR(rc); + + return no_free_ptr(cxlr); +} + +static struct cxl_region * +devm_cxl_pmem_add_region(struct cxl_root_decoder *cxlrd, int id, + struct cxl_pmem_region_params *params, + struct cxl_endpoint_decoder *cxled, + enum cxl_decoder_type type) +{ + struct cxl_port *root_port; + struct cxl_region *cxlr; + int rc; + + cxlr = cxl_pmem_region_prep(cxlrd, id, params, cxled, type); + if (IS_ERR(cxlr)) + return cxlr; + + root_port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); + rc = devm_add_action_or_reset(root_port->uport_dev, + unregister_region, cxlr); + if (rc) + return ERR_PTR(rc); + + dev_dbg(root_port->uport_dev, "%s: created %s\n", + dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(&cxlr->dev)); + + return cxlr; +} + static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf) { return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); @@ -2663,6 +2778,9 @@ struct cxl_region *cxl_create_region(struct cxl_root_decoder *cxlrd, return ERR_PTR(-EBUSY); } + if (pmem_params) + return devm_cxl_pmem_add_region(cxlrd, id, pmem_params, cxled, + CXL_DECODER_HOSTONLYMEM); return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); } -- 2.34.1

