On Fri, Feb 13, 2026 at 4:31 PM Leon Romanovsky <[email protected]> wrote:
>
> From: Leon Romanovsky <[email protected]>
>
> There is no need to defer the CQ resize operation, as it is intended to
> be completed in one pass. The current bnxt_re_resize_cq() implementation
> does not handle concurrent CQ resize requests, and this will be addressed
> in the following patches.
bnxt HW requires that the previous CQ memory be available with the HW until
HW generates a cut off cqe on the CQ that is being destroyed. This is
the reason for
polling the completions in the user library after returning the
resize_cq call. Once the polling
thread sees the expected CQE, it will invoke the driver to free CQ
memory.  So ib_umem_release
should wait. This patch doesn't guarantee that.  Do you think if there
is a better way to handle this requirement?

>
> Signed-off-by: Leon Romanovsky <[email protected]>
> ---
>  drivers/infiniband/hw/bnxt_re/ib_verbs.c | 33 
> +++++++++-----------------------
>  1 file changed, 9 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c 
> b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> index d652018c19b3..2aecfbbb7eaf 100644
> --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> @@ -3309,20 +3309,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct 
> ib_cq_init_attr *attr,
>         return rc;
>  }
>
> -static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
> -{
> -       struct bnxt_re_dev *rdev = cq->rdev;
> -
> -       bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
> -
> -       cq->qplib_cq.max_wqe = cq->resize_cqe;
> -       if (cq->resize_umem) {
> -               ib_umem_release(cq->ib_cq.umem);
> -               cq->ib_cq.umem = cq->resize_umem;
> -               cq->resize_umem = NULL;
> -               cq->resize_cqe = 0;
> -       }
> -}
>
>  int bnxt_re_resize_cq(struct ib_cq *ibcq, unsigned int cqe,
>                       struct ib_udata *udata)
> @@ -3387,7 +3373,15 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, unsigned int 
> cqe,
>                 goto fail;
>         }
>
> -       cq->ib_cq.cqe = cq->resize_cqe;
> +       bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
> +
> +       cq->qplib_cq.max_wqe = cq->resize_cqe;
> +       ib_umem_release(cq->ib_cq.umem);
> +       cq->ib_cq.umem = cq->resize_umem;
> +       cq->resize_umem = NULL;
> +       cq->resize_cqe = 0;
> +
> +       cq->ib_cq.cqe = entries;
>         atomic_inc(&rdev->stats.res.resize_count);
>
>         return 0;
> @@ -3907,15 +3901,6 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int 
> num_entries, struct ib_wc *wc)
>         struct bnxt_re_sqp_entries *sqp_entry = NULL;
>         unsigned long flags;
>
> -       /* User CQ; the only processing we do is to
> -        * complete any pending CQ resize operation.
> -        */
> -       if (cq->ib_cq.umem) {
> -               if (cq->resize_umem)
> -                       bnxt_re_resize_cq_complete(cq);
> -               return 0;
> -       }
> -
>         spin_lock_irqsave(&cq->cq_lock, flags);
>         budget = min_t(u32, num_entries, cq->max_cql);
>         num_entries = budget;
>
> --
> 2.52.0
>

Attachment: smime.p7s
Description: S/MIME Cryptographic Signature

Reply via email to