From 644e491927e900337785ed664cc141681f7c730f Mon Sep 17 00:00:00 2001
From: Vladimir Sokolovsky <[EMAIL PROTECTED]>
Date: Tue, 25 Mar 2008 10:23:09 +0200
Subject: [PATCH] MLX4: Added resize_cq capability.

Signed-off-by: Vladimir Sokolovsky <[EMAIL PROTECTED]>
---
 drivers/infiniband/hw/mlx4/cq.c      |  298 ++++++++++++++++++++++++++++++++++
 drivers/infiniband/hw/mlx4/main.c    |    2 +
 drivers/infiniband/hw/mlx4/mlx4_ib.h |   13 ++
 3 files changed, 313 insertions(+), 0 deletions(-)

diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 401178f..9302502 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -36,6 +36,14 @@
 #include "mlx4_ib.h"
 #include "user.h"

+enum {
+       MLX4_MAX_DIRECT_CQ_SIZE = 2 * PAGE_SIZE
+};
+
+enum {
+       MLX4_CQ_ENTRY_SIZE = 0x20
+};
+
 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
 {
        struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
@@ -124,7 +132,11 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, 
int entries, int vector
        entries      = roundup_pow_of_two(entries + 1);
        cq->ibcq.cqe = entries - 1;
        buf_size     = entries * sizeof (struct mlx4_cqe);
+       mutex_init(&cq->resize_mutex);
        spin_lock_init(&cq->lock);
+       cq->is_kernel = !context;
+       cq->resize_buf = NULL;
+       cq->resize_umem = NULL;

        if (context) {
                struct mlx4_ib_create_cq ucmd;
@@ -223,6 +235,238 @@ err_cq:
        return ERR_PTR(err);
 }

+static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq 
*cq,
+                                 int entries)
+{
+       int err;
+
+       if (cq->resize_buf) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+       if (!cq->resize_buf) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = mlx4_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
+       if (err) {
+               spin_lock_irq(&cq->lock);
+               kfree(cq->resize_buf);
+               cq->resize_buf = NULL;
+               spin_unlock_irq(&cq->lock);
+               goto out;
+       }
+
+       cq->resize_buf->cqe = entries - 1;
+
+       return 0;
+
+out:
+       return err;
+}
+
+static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq 
*cq,
+                                  int entries, struct ib_udata *udata)
+{
+       struct mlx4_ib_resize_cq ucmd;
+       int buf_size;
+       int err;
+
+       if (cq->resize_umem) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+               err = -EFAULT;
+               goto out;
+       }
+
+       cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+       if (!cq->resize_buf) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       buf_size = entries * sizeof(struct mlx4_cqe);
+
+       cq->resize_umem = ib_umem_get(cq->umem->context, ucmd.buf_addr, 
buf_size,
+                                     IB_ACCESS_LOCAL_WRITE);
+       if (IS_ERR(cq->resize_umem)) {
+               err = PTR_ERR(cq->resize_umem);
+               goto err_umem;
+       }
+
+       err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->resize_umem),
+                               ilog2(cq->resize_umem->page_size), 
&cq->resize_buf->buf.mtt);
+       if (err)
+               goto err_buf;
+
+       err = mlx4_ib_umem_write_mtt(dev, &cq->resize_buf->buf.mtt, 
cq->resize_umem);
+       if (err)
+               goto err_mtt;
+
+       cq->resize_buf->cqe = entries - 1;
+
+       return 0;
+
+err_mtt:
+       mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
+
+err_buf:
+       ib_umem_release(cq->resize_umem);
+       cq->resize_umem = NULL;
+
+err_umem:
+       kfree(cq->resize_buf);
+       cq->resize_buf = NULL;
+
+out:
+       return err;
+}
+
+int mlx4_get_outstanding_cqes(struct mlx4_ib_cq *cq)
+{
+       int i;
+
+       i = cq->mcq.cons_index;
+       while (get_sw_cqe(cq, i & cq->ibcq.cqe))
+               ++i;
+
+       return i - cq->mcq.cons_index;
+}
+
+void mlx4_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
+{
+       struct mlx4_cqe *cqe;
+       int i;
+
+       i = cq->mcq.cons_index;
+       cqe = get_cqe(cq, i & cq->ibcq.cqe);
+       while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != 
MLX4_CQE_OPCODE_RESIZE) {
+               memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
+                                       (i + 1) & cq->resize_buf->cqe),
+                       get_cqe(cq, i & cq->ibcq.cqe), MLX4_CQ_ENTRY_SIZE);
+               cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
+       }
+       ++cq->mcq.cons_index;
+}
+
+int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
+       struct mlx4_ib_cq *cq = to_mcq(ibcq);
+       struct mlx4_cq_context *context;
+       struct ib_umem *tumem = NULL;
+       u64 mtt_addr;
+       int outst_cqe;
+       int err;
+
+       mutex_lock(&cq->resize_mutex);
+
+       if (entries < 1 || entries > dev->dev->caps.max_cqes) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       entries = roundup_pow_of_two(entries + 1);
+       if (entries == ibcq->cqe + 1) {
+               err = 0;
+               goto out;
+       }
+
+       if (cq->is_kernel) {
+               /* Can't be smaller then the number of outstanding CQEs */
+               outst_cqe = mlx4_get_outstanding_cqes(cq);
+               if (entries < outst_cqe + 1) {
+                       err = 0;
+                       goto out;
+               }
+
+               err = mlx4_alloc_resize_buf(dev, cq, entries);
+               if (err)
+                       goto out;
+       } else {
+               err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
+               if (err)
+                       goto out;
+       }
+
+       context = kzalloc(sizeof *context, GFP_KERNEL);
+
+       if (!context) {
+               err = -ENOMEM;
+               goto err_buf;
+       }
+
+       memset(context, 0, sizeof *context);
+
+       context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
+       context->log_page_size   = cq->resize_buf->buf.mtt.page_shift - 12;
+       mtt_addr = mlx4_mtt_addr(dev->dev, &cq->resize_buf->buf.mtt);
+       context->mtt_base_addr_h = mtt_addr >> 32;
+       context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+
+       err = mlx4_cq_modify(dev->dev, &cq->mcq, context, 0);
+       if (err)
+               goto err_buf;
+
+       spin_lock_irq(&cq->lock);
+       if (cq->is_kernel) {
+               if (cq->resize_buf) {
+                       mlx4_cq_resize_copy_cqes(cq);
+                       mlx4_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+                       cq->buf      = cq->resize_buf->buf;
+                       cq->ibcq.cqe = cq->resize_buf->cqe;
+
+                       kfree(cq->resize_buf);
+                       cq->resize_buf = NULL;
+               }
+       } else {
+               cq->buf      = cq->resize_buf->buf;
+               cq->ibcq.cqe = cq->resize_buf->cqe;
+               tumem = cq->umem;
+               cq->umem     = cq->resize_umem;
+
+               kfree(cq->resize_buf);
+               cq->resize_buf = NULL;
+               cq->resize_umem = NULL;
+       }
+       spin_unlock_irq(&cq->lock);
+
+       if (!cq->is_kernel)
+               if (tumem)
+                       ib_umem_release(tumem);
+
+       goto free_context;
+
+err_buf:
+       if (cq->resize_buf) {
+               if (cq->is_kernel)
+                       mlx4_free_cq_buf(dev, &cq->resize_buf->buf,
+                                        cq->resize_buf->cqe);
+
+               spin_lock_irq(&cq->lock);
+               kfree(cq->resize_buf);
+               cq->resize_buf = NULL;
+               spin_unlock_irq(&cq->lock);
+       }
+       if (cq->resize_umem) {
+               ib_umem_release(cq->resize_umem);
+               cq->resize_umem = NULL;
+       }
+
+free_context:
+       kfree(context);
+
+out:
+       mutex_unlock(&cq->resize_mutex);
+       return err;
+}
+
 int mlx4_ib_destroy_cq(struct ib_cq *cq)
 {
        struct mlx4_ib_dev *dev = to_mdev(cq->device);
@@ -255,6 +499,44 @@ static void dump_cqe(void *cqe)
               be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
 }

+int mlx4_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int 
nent)
+{
+       int err;
+
+       err = mlx4_buf_alloc(dev->dev, nent * MLX4_CQ_ENTRY_SIZE,
+               MLX4_MAX_DIRECT_CQ_SIZE,
+               &buf->buf);
+
+       if (err)
+               goto out;
+
+       err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
+                                   &buf->mtt);
+       if (err)
+               goto err_buf;
+
+       err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
+       if (err)
+               goto err_mtt;
+
+       return 0;
+
+err_mtt:
+       mlx4_mtt_cleanup(dev->dev, &buf->mtt);
+
+err_buf:
+       mlx4_buf_free(dev->dev, nent * MLX4_CQ_ENTRY_SIZE,
+                             &buf->buf);
+
+out:
+       return err;
+}
+
+void mlx4_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int 
cqe)
+{
+       mlx4_buf_free(dev->dev, (cqe + 1) * MLX4_CQ_ENTRY_SIZE, &buf->buf);
+}
+
 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
                                     struct ib_wc *wc)
 {
@@ -343,6 +625,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
        u32 g_mlpath_rqpn;
        u16 wqe_ctr;

+repoll:
        cqe = next_cqe_sw(cq);
        if (!cqe)
                return -EAGAIN;
@@ -364,6 +647,21 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                printk(KERN_WARNING "Completion for NOP opcode detected!\n");
                return -EINVAL;
        }
+       /* Resize CQ */
+       if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 
MLX4_CQE_OPCODE_RESIZE)) {
+               if (cq->resize_buf) {
+                       struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
+
+                       mlx4_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+                       cq->buf      = cq->resize_buf->buf;
+                       cq->ibcq.cqe = cq->resize_buf->cqe;
+
+                       kfree(cq->resize_buf);
+                       cq->resize_buf = NULL;
+               }
+
+               goto repoll;
+       }

        if (!*cur_qp ||
            (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
diff --git a/drivers/infiniband/hw/mlx4/main.c 
b/drivers/infiniband/hw/mlx4/main.c
index f770610..6ee7f46 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -569,6 +569,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                (1ull << IB_USER_VERBS_CMD_DEREG_MR)              |
                (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)   |
                (1ull << IB_USER_VERBS_CMD_CREATE_CQ)             |
+               (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)             |
                (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)            |
                (1ull << IB_USER_VERBS_CMD_CREATE_QP)             |
                (1ull << IB_USER_VERBS_CMD_MODIFY_QP)             |
@@ -608,6 +609,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        ibdev->ib_dev.post_recv              = mlx4_ib_post_recv;
        ibdev->ib_dev.create_cq              = mlx4_ib_create_cq;
        ibdev->ib_dev.modify_cq              = mlx4_ib_modify_cq;
+       ibdev->ib_dev.resize_cq              = mlx4_ib_resize_cq;
        ibdev->ib_dev.destroy_cq     = mlx4_ib_destroy_cq;
        ibdev->ib_dev.poll_cq                = mlx4_ib_poll_cq;
        ibdev->ib_dev.req_notify_cq  = mlx4_ib_arm_cq;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h 
b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 83eb071..425970f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -78,13 +78,22 @@ struct mlx4_ib_cq_buf {
        struct mlx4_mtt         mtt;
 };

+struct mlx4_ib_cq_resize {
+    struct mlx4_ib_cq_buf buf;
+    int         cqe;
+};
+
 struct mlx4_ib_cq {
        struct ib_cq            ibcq;
        struct mlx4_cq          mcq;
        struct mlx4_ib_cq_buf   buf;
+       struct mlx4_ib_cq_resize *resize_buf;
+       int is_kernel;
        struct mlx4_ib_db       db;
        spinlock_t              lock;
+       struct mutex            resize_mutex;
        struct ib_umem         *umem;
+       struct ib_umem         *resize_umem;
 };

 struct mlx4_ib_mr {
@@ -250,9 +259,13 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 
start, u64 length,
 int mlx4_ib_dereg_mr(struct ib_mr *mr);

 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+void mlx4_cq_resize_copy_cqes(struct mlx4_ib_cq *cq);
+int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int 
vector,
                                struct ib_ucontext *context,
                                struct ib_udata *udata);
+int mlx4_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int 
nent);
+void mlx4_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int 
cqe);
 int mlx4_ib_destroy_cq(struct ib_cq *cq);
 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
--
1.5.4.2

_______________________________________________
general mailing list
[email protected]
http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to