From bfb3fb43bab5f03e124c4eae13012e27432fe405 Mon Sep 17 00:00:00 2001
From: Vladimir Sokolovsky <[EMAIL PROTECTED]>
Date: Tue, 25 Mar 2008 17:09:08 +0200
Subject: [PATCH] Added resize CQ capability.

Signed-off-by: Vladimir Sokolovsky <[EMAIL PROTECTED]>
---
 src/cq.c    |   48 +++++++++++++++++++++++++++++++++++++++++++-----
 src/mlx4.h  |    4 ++++
 src/verbs.c |   56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 3 files changed, 101 insertions(+), 7 deletions(-)

diff --git a/src/cq.c b/src/cq.c
index 91297e4..ee7dd7b 100644
--- a/src/cq.c
+++ b/src/cq.c
@@ -114,10 +114,10 @@ static struct mlx4_cqe *get_cqe(struct mlx4_cq *cq, int 
entry)

 static void *get_sw_cqe(struct mlx4_cq *cq, int n)
 {
-       struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe);
+       struct mlx4_cqe *cqe = get_cqe(cq, n & cq->cqe);

        return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
-               !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe;
+               !!(n & (cq->cqe + 1))) ? NULL : cqe;
 }

 static struct mlx4_cqe *next_cqe_sw(struct mlx4_cq *cq)
@@ -201,6 +201,7 @@ static int mlx4_poll_one(struct mlx4_cq *cq,
        int is_error;
        int is_send;

+repoll:
        cqe = next_cqe_sw(cq);
        if (!cqe)
                return CQ_EMPTY;
@@ -215,6 +216,9 @@ static int mlx4_poll_one(struct mlx4_cq *cq,
         */
        rmb();

+       if ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 
MLX4_CQE_OPCODE_RESIZE)
+               goto repoll;
+
        qpn = ntohl(cqe->my_qpn);

        is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
@@ -398,7 +402,7 @@ void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct 
mlx4_srq *srq)
         * from our QP and therefore don't need to be checked.
         */
        for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); 
++prod_index)
-               if (prod_index == cq->cons_index + cq->ibv_cq.cqe)
+               if (prod_index == cq->cons_index + cq->cqe)
                        break;

        /*
@@ -406,13 +410,13 @@ void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, 
struct mlx4_srq *srq)
         * that match our QP by copying older entries on top of them.
         */
        while ((int) --prod_index - (int) cq->cons_index >= 0) {
-               cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
+               cqe = get_cqe(cq, prod_index & cq->cqe);
                if ((ntohl(cqe->my_qpn) & 0xffffff) == qpn) {
                        if (srq && !(cqe->owner_sr_opcode & 
MLX4_CQE_IS_SEND_MASK))
                                mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index));
                        ++nfreed;
                } else if (nfreed) {
-                       dest = get_cqe(cq, (prod_index + nfreed) & 
cq->ibv_cq.cqe);
+                       dest = get_cqe(cq, (prod_index + nfreed) & cq->cqe);
                        owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
                        memcpy(dest, cqe, sizeof *cqe);
                        dest->owner_sr_opcode = owner_bit |
@@ -433,6 +437,40 @@ void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, 
struct mlx4_srq *srq)
        pthread_spin_unlock(&cq->lock);
 }

+int mlx4_get_outstanding_cqes(struct mlx4_cq *cq)
+{
+       int i;
+
+       for (i = cq->cons_index; get_sw_cqe(cq, (i & cq->cqe)); ++i)
+               ;
+
+       return i - cq->cons_index;
+}
+
 void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int old_cqe)
 {
+       struct mlx4_cqe *cqe;
+       int i;
+
+       i = cq->cons_index;
+       cqe = get_cqe(cq, (i & old_cqe));
+
+       while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != 
MLX4_CQE_OPCODE_RESIZE) {
+               memcpy(buf + ((i + 1) & cq->ibv_cq.cqe) * MLX4_CQ_ENTRY_SIZE,
+                      cqe, MLX4_CQ_ENTRY_SIZE);
+               ++i;
+               cqe = get_cqe(cq, (i & old_cqe));
+       }
+
+       ++cq->cons_index;
+}
+
+int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent)
+{
+       if (mlx4_alloc_buf(buf, align(nent * MLX4_CQ_ENTRY_SIZE, 
dev->page_size),
+                   dev->page_size))
+               return -1;
+       memset(buf->buf, 0, nent * MLX4_CQ_ENTRY_SIZE);
+
+       return 0;
 }
diff --git a/src/mlx4.h b/src/mlx4.h
index 3710a17..61076ac 100644
--- a/src/mlx4.h
+++ b/src/mlx4.h
@@ -174,12 +174,14 @@ struct mlx4_pd {
 struct mlx4_cq {
        struct ibv_cq                   ibv_cq;
        struct mlx4_buf                 buf;
+       struct mlx4_buf                 resize_buf;
        pthread_spinlock_t              lock;
        uint32_t                        cqn;
        uint32_t                        cons_index;
        uint32_t                       *set_ci_db;
        uint32_t                       *arm_db;
        int                             arm_sn;
+       int                             cqe;
 };

 struct mlx4_srq {
@@ -307,6 +309,7 @@ int mlx4_dereg_mr(struct ibv_mr *mr);
 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
                               struct ibv_comp_channel *channel,
                               int comp_vector);
+int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent);
 int mlx4_resize_cq(struct ibv_cq *cq, int cqe);
 int mlx4_destroy_cq(struct ibv_cq *cq);
 int mlx4_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
@@ -314,6 +317,7 @@ int mlx4_arm_cq(struct ibv_cq *cq, int solicited);
 void mlx4_cq_event(struct ibv_cq *cq);
 void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn,
                    struct mlx4_srq *srq);
+int mlx4_get_outstanding_cqes(struct mlx4_cq *cq);
 void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int new_cqe);

 struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
diff --git a/src/verbs.c b/src/verbs.c
index 50e0947..ba04af7 100644
--- a/src/verbs.c
+++ b/src/verbs.c
@@ -209,6 +209,7 @@ struct ibv_cq *mlx4_create_cq(struct ibv_context *context, 
int cqe,
                goto err_db;

        cq->cqn = resp.cqn;
+       cq->cqe = cq->ibv_cq.cqe;

        return &cq->ibv_cq;

@@ -226,8 +227,59 @@ err:

 int mlx4_resize_cq(struct ibv_cq *ibcq, int cqe)
 {
-       /* XXX resize CQ not implemented */
-       return ENOSYS;
+       struct mlx4_cq *cq = to_mcq(ibcq);
+       struct mlx4_resize_cq cmd;
+       struct mlx4_buf buf;
+       int old_cqe, outst_cqe, ret;
+
+       /* Sanity check CQ size before proceeding */
+       if (cqe > 0x3fffff)
+               return EINVAL;
+
+       pthread_spin_lock(&cq->lock);
+
+       cqe = align_queue_size(cqe);
+       if (cqe == ibcq->cqe + 1) {
+               ret = 0;
+               goto out;
+       }
+
+       /* Can't be smaller then the number of outstanding CQEs */
+       outst_cqe = mlx4_get_outstanding_cqes(cq);
+       if (cqe < outst_cqe + 1) {
+               ret = 0;
+               goto out;
+       }
+
+       ret = mlx4_alloc_cq_buf(to_mdev(ibcq->context->device), &buf, cqe);
+       if (ret)
+               goto out;
+
+       cmd.buf_addr = (uintptr_t) buf.buf;
+
+#ifdef IBV_CMD_RESIZE_CQ_HAS_RESP_PARAMS
+       {
+               struct ibv_resize_cq_resp resp;
+               ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd,
+                                       &resp, sizeof resp);
+       }
+#else
+       ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd);
+#endif
+       if (ret) {
+               mlx4_free_buf(&buf);
+               goto out;
+       }
+
+       mlx4_cq_resize_copy_cqes(cq, buf.buf, cq->cqe);
+       mlx4_free_buf(&cq->buf);
+
+       cq->buf = buf;
+       cq->cqe = cq->ibv_cq.cqe;
+
+out:
+       pthread_spin_unlock(&cq->lock);
+       return ret;
 }

 int mlx4_destroy_cq(struct ibv_cq *cq)
--
1.5.4.2

_______________________________________________
general mailing list
[email protected]
http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to