As a preliminary step for signature feature which will
reuqire posting multiple (3) WQEs for a single WR, we
break post_send routine WQE indexing into begin and
finish routines.

This patch does not change any functionality.

Signed-off-by: Sagi Grimberg <sa...@mellanox.com>
---
 drivers/infiniband/hw/mlx5/qp.c |   95 ++++++++++++++++++++++++---------------
 1 files changed, 59 insertions(+), 36 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c80122e..dc8d9fc 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1983,6 +1983,57 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
        }
 }
 
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+                    struct mlx5_wqe_ctrl_seg **ctrl,
+                    struct ib_send_wr *wr, int *idx,
+                    int *size, int nreq)
+{
+       int err = 0;
+       if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
+               err = -ENOMEM;
+               return err;
+       }
+
+       *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+       *seg = mlx5_get_send_wqe(qp, *idx);
+       *ctrl = *seg;
+       *(uint32_t *)(*seg + 8) = 0;
+       (*ctrl)->imm = send_ieth(wr);
+       (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+               (wr->send_flags & IB_SEND_SIGNALED ?
+                MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+               (wr->send_flags & IB_SEND_SOLICITED ?
+                MLX5_WQE_CTRL_SOLICITED : 0);
+
+       *seg += sizeof(**ctrl);
+       *size = sizeof(**ctrl) / 16;
+
+       return err;
+}
+
+static void finish_wqe(struct mlx5_ib_qp *qp,
+                      struct mlx5_wqe_ctrl_seg *ctrl,
+                      u8 size, unsigned idx, u64 wr_id,
+                      int *nreq, u8 fence, u8 next_fence,
+                      u32 mlx5_opcode)
+{
+       u8 opmod = 0;
+       ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+                                            mlx5_opcode | ((u32)opmod << 24));
+       ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
+       ctrl->fm_ce_se |= fence;
+       qp->fm_cache = next_fence;
+       if (unlikely(qp->wq_sig))
+               ctrl->signature = wq_sig(ctrl);
+
+       qp->sq.wrid[idx] = wr_id;
+       qp->sq.w_list[idx].opcode = mlx5_opcode;
+       qp->sq.wqe_head[idx] = qp->sq.head + (*nreq)++;
+       qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+       qp->sq.w_list[idx].next = qp->sq.cur_post;
+}
+
+
 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                      struct ib_send_wr **bad_wr)
 {
@@ -1996,7 +2047,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct 
ib_send_wr *wr,
        int uninitialized_var(size);
        void *qend = qp->sq.qend;
        unsigned long flags;
-       u32 mlx5_opcode;
        unsigned idx;
        int err = 0;
        int inl = 0;
@@ -2005,7 +2055,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct 
ib_send_wr *wr,
        int nreq;
        int i;
        u8 next_fence = 0;
-       u8 opmod = 0;
        u8 fence;
 
        spin_lock_irqsave(&qp->sq.lock, flags);
@@ -2018,36 +2067,23 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct 
ib_send_wr *wr,
                        goto out;
                }
 
-               if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, 
qp->ibqp.send_cq))) {
+               fence = qp->fm_cache;
+               num_sge = wr->num_sge;
+               if (unlikely(num_sge > qp->sq.max_gs)) {
                        mlx5_ib_warn(dev, "\n");
                        err = -ENOMEM;
                        *bad_wr = wr;
                        goto out;
                }
 
-               fence = qp->fm_cache;
-               num_sge = wr->num_sge;
-               if (unlikely(num_sge > qp->sq.max_gs)) {
+               err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
+               if (err) {
                        mlx5_ib_warn(dev, "\n");
                        err = -ENOMEM;
                        *bad_wr = wr;
                        goto out;
                }
 
-               idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
-               seg = mlx5_get_send_wqe(qp, idx);
-               ctrl = seg;
-               *(uint32_t *)(seg + 8) = 0;
-               ctrl->imm = send_ieth(wr);
-               ctrl->fm_ce_se = qp->sq_signal_bits |
-                       (wr->send_flags & IB_SEND_SIGNALED ?
-                        MLX5_WQE_CTRL_CQ_UPDATE : 0) |
-                       (wr->send_flags & IB_SEND_SOLICITED ?
-                        MLX5_WQE_CTRL_SOLICITED : 0);
-
-               seg += sizeof(*ctrl);
-               size = sizeof(*ctrl) / 16;
-
                switch (ibqp->qp_type) {
                case IB_QPT_XRC_INI:
                        xrc = seg;
@@ -2197,22 +2233,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct 
ib_send_wr *wr,
                        }
                }
 
-               mlx5_opcode = mlx5_ib_opcode[wr->opcode];
-               ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 
8)      |
-                                                    mlx5_opcode                
        |
-                                                    ((u32)opmod << 24));
-               ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
-               ctrl->fm_ce_se |= get_fence(fence, wr);
-               qp->fm_cache = next_fence;
-               if (unlikely(qp->wq_sig))
-                       ctrl->signature = wq_sig(ctrl);
-
-               qp->sq.wrid[idx] = wr->wr_id;
-               qp->sq.w_list[idx].opcode = mlx5_opcode;
-               qp->sq.wqe_head[idx] = qp->sq.head + nreq;
-               qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
-               qp->sq.w_list[idx].next = qp->sq.cur_post;
-
+               finish_wqe(qp, ctrl, size, idx, wr->wr_id,&nreq,
+                          get_fence(fence, wr), next_fence,
+                          mlx5_ib_opcode[wr->opcode]);
                if (0)
                        dump_wqe(qp, idx, size);
        }
-- 
1.7.8.2

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to