From: Leon Romanovsky <[email protected]> Split CQ creation into distinct kernel and user flows. The erdma driver, inherited from mlx4, uses a problematic pattern that shares and caches umem in erdma_map_user_dbrecords(). This design blocks the driver from supporting generic umem sources (VMA, dmabuf, memfd, and others).
Signed-off-by: Leon Romanovsky <[email protected]> --- drivers/infiniband/hw/erdma/erdma_main.c | 1 + drivers/infiniband/hw/erdma/erdma_verbs.c | 97 ++++++++++++++++++++----------- drivers/infiniband/hw/erdma/erdma_verbs.h | 2 + 3 files changed, 67 insertions(+), 33 deletions(-) diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c index f35b30235018..1b6426e89d80 100644 --- a/drivers/infiniband/hw/erdma/erdma_main.c +++ b/drivers/infiniband/hw/erdma/erdma_main.c @@ -505,6 +505,7 @@ static const struct ib_device_ops erdma_device_ops = { .alloc_pd = erdma_alloc_pd, .alloc_ucontext = erdma_alloc_ucontext, .create_cq = erdma_create_cq, + .create_user_cq = erdma_create_user_cq, .create_qp = erdma_create_qp, .dealloc_pd = erdma_dealloc_pd, .dealloc_ucontext = erdma_dealloc_ucontext, diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index 058edc42de58..6f809907fec5 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -1952,8 +1952,8 @@ static int erdma_init_kernel_cq(struct erdma_cq *cq) return -ENOMEM; } -int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, - struct uverbs_attr_bundle *attrs) +int erdma_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs) { struct ib_udata *udata = &attrs->driver_udata; struct erdma_cq *cq = to_ecq(ibcq); @@ -1962,6 +1962,11 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, int ret; struct erdma_ucontext *ctx = rdma_udata_to_drv_context( udata, struct erdma_ucontext, ibucontext); + struct erdma_ureq_create_cq ureq; + struct erdma_uresp_create_cq uresp; + + if (ibcq->umem) + return -EOPNOTSUPP; if (depth > dev->attrs.max_cqe) return -EINVAL; @@ -1977,31 +1982,22 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, if (ret < 0) return ret; - if (!rdma_is_kernel_res(&ibcq->res)) { - struct erdma_ureq_create_cq ureq; - struct erdma_uresp_create_cq uresp; - - ret = ib_copy_from_udata(&ureq, udata, - min(udata->inlen, sizeof(ureq))); - if (ret) - goto err_out_xa; + ret = ib_copy_from_udata(&ureq, udata, + min(udata->inlen, sizeof(ureq))); + if (ret) + goto err_out_xa; - ret = erdma_init_user_cq(ctx, cq, &ureq); - if (ret) - goto err_out_xa; + ret = erdma_init_user_cq(ctx, cq, &ureq); + if (ret) + goto err_out_xa; - uresp.cq_id = cq->cqn; - uresp.num_cqe = depth; + uresp.cq_id = cq->cqn; + uresp.num_cqe = depth; - ret = ib_copy_to_udata(udata, &uresp, - min(sizeof(uresp), udata->outlen)); - if (ret) - goto err_free_res; - } else { - ret = erdma_init_kernel_cq(cq); - if (ret) - goto err_out_xa; - } + ret = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (ret) + goto err_free_res; ret = create_cq_cmd(ctx, cq); if (ret) @@ -2010,19 +2006,54 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return 0; err_free_res: - if (!rdma_is_kernel_res(&ibcq->res)) { - erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); - put_mtt_entries(dev, &cq->user_cq.qbuf_mem); - } else { - dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT, - cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); - dma_pool_free(dev->db_pool, cq->kern_cq.dbrec, - cq->kern_cq.dbrec_dma); - } + erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); + put_mtt_entries(dev, &cq->user_cq.qbuf_mem); err_out_xa: xa_erase(&dev->cq_xa, cq->cqn); + return ret; +} + +int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs) +{ + struct erdma_cq *cq = to_ecq(ibcq); + struct erdma_dev *dev = to_edev(ibcq->device); + unsigned int depth = attr->cqe; + int ret; + + if (depth > dev->attrs.max_cqe) + return -EINVAL; + depth = roundup_pow_of_two(depth); + cq->ibcq.cqe = depth; + cq->depth = depth; + cq->assoc_eqn = attr->comp_vector + 1; + + ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq, + XA_LIMIT(1, dev->attrs.max_cq - 1), + &dev->next_alloc_cqn, GFP_KERNEL); + if (ret < 0) + return ret; + + ret = erdma_init_kernel_cq(cq); + if (ret) + goto err_out_xa; + + ret = create_cq_cmd(NULL, cq); + if (ret) + goto err_free_res; + + return 0; + +err_free_res: + dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT, + cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); + dma_pool_free(dev->db_pool, cq->kern_cq.dbrec, + cq->kern_cq.dbrec_dma); + +err_out_xa: + xa_erase(&dev->cq_xa, cq->cqn); return ret; } diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h index 7d8d3fe501d5..21a4fb404806 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.h +++ b/drivers/infiniband/hw/erdma/erdma_verbs.h @@ -435,6 +435,8 @@ int erdma_get_port_immutable(struct ib_device *dev, u32 port, struct ib_port_immutable *ib_port_immutable); int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct uverbs_attr_bundle *attrs); +int erdma_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs); int erdma_query_port(struct ib_device *dev, u32 port, struct ib_port_attr *attr); int erdma_query_gid(struct ib_device *dev, u32 port, int idx, -- 2.52.0
