PMD uses only power of two number of Completion Queue Elements, storing the
number of elements in log2 helps to reduce the size of the container to
store it.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
 drivers/net/mlx5/mlx5_rxq.c  | 2 +-
 drivers/net/mlx5/mlx5_rxtx.c | 8 ++++----
 drivers/net/mlx5/mlx5_rxtx.h | 4 ++--
 drivers/net/mlx5/mlx5_txq.c  | 2 +-
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d9db368..f6f4315 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -881,7 +881,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
        if (elts == NULL)
                return ENOMEM;
        tmpl->rxq.rq_db = rwq->rq.db;
-       tmpl->rxq.cqe_n = ibcq->cqe + 1;
+       tmpl->rxq.cqe_n = log2above(ibcq->cqe);
        tmpl->rxq.cq_ci = 0;
        tmpl->rxq.rq_ci = 0;
        tmpl->rxq.cq_db = cq->dbrec;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index e132727..4f28aa9 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -166,8 +166,8 @@ txq_complete(struct txq *txq) 
__attribute__((always_inline));
 static inline void
 txq_complete(struct txq *txq)
 {
-       const unsigned int cqe_n = txq->cqe_n;
        const unsigned int elts_n = 1 << txq->elts_n;
+       const unsigned int cqe_n = 1 << txq->cqe_n;
        const unsigned int cqe_cnt = cqe_n - 1;
        uint16_t elts_free = txq->elts_tail;
        uint16_t elts_tail;
@@ -427,9 +427,9 @@ mlx5_tx_dbrec(struct txq *txq)
 static inline void
 tx_prefetch_cqe(struct txq *txq, uint16_t ci)
 {
-       volatile struct mlx5_cqe64 *cqe;
+       volatile struct mlx5_cqe *cqe;

-       cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64;
+       cqe = &(*txq->cqes)[ci & ((1 << txq->cqe_n) - 1)];
        rte_prefetch0(cqe);
 }

@@ -1272,8 +1272,8 @@ uint16_t
 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
        struct rxq *rxq = dpdk_rxq;
-       const unsigned int cqe_cnt = rxq->cqe_n - 1;
        const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
+       const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
        const unsigned int sges_n = rxq->sges_n;
        struct rte_mbuf *pkt = NULL;
        struct rte_mbuf *seg = NULL;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 3ba3913..224614e 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -110,13 +110,13 @@ struct rxq {
        unsigned int vlan_strip:1; /* Enable VLAN stripping. */
        unsigned int crc_present:1; /* CRC must be subtracted. */
        unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
+       unsigned int cqe_n:4; /* Log 2 of CQ elements. */
        unsigned int elts_n:4; /* Log 2 of Mbufs. */
        unsigned int port_id:8;
        volatile uint32_t *rq_db;
        volatile uint32_t *cq_db;
        uint16_t rq_ci;
        uint16_t cq_ci;
-       uint16_t cqe_n; /* Number of CQ elements. */
        volatile struct mlx5_wqe_data_seg(*wqes)[];
        volatile struct mlx5_cqe(*cqes)[];
        struct rxq_zip zip; /* Compressed context. */
@@ -245,10 +245,10 @@ struct txq {
        uint16_t elts_tail; /* First element awaiting completion. */
        uint16_t elts_comp; /* Counter since last completion request. */
        uint16_t cq_ci; /* Consumer index for completion queue. */
-       uint16_t cqe_n; /* Number of CQ elements. */
        uint16_t wqe_ci; /* Consumer index for work queue. */
        uint16_t wqe_n; /* Number of WQ elements. */
        uint16_t elts_n:4; /* (*elts)[] length (in log2). */
+       uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
        uint16_t bf_offset; /* Blueflame offset. */
        uint16_t bf_buf_size; /* Blueflame size. */
        uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 9055016..6145b69 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -212,7 +212,7 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
                      "it should be set to %u", RTE_CACHE_LINE_SIZE);
                return EINVAL;
        }
-       tmpl->txq.cqe_n = ibcq->cqe + 1;
+       tmpl->txq.cqe_n = log2above(ibcq->cqe);
        tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
        tmpl->txq.wqes =
                (volatile struct mlx5_wqe64 (*)[])
-- 
2.1.4

Reply via email to