A non max_inline 0 means an inline is requested, there is no need to
duplicate this information.

Signed-off-by: Nelio Laranjeiro <nelio.laranje...@6wind.com>
Acked-by: Yongseok Koh <ys...@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 5 ++---
 drivers/net/mlx5/mlx5_rxtx.h | 1 -
 drivers/net/mlx5/mlx5_txq.c  | 3 +--
 3 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index d735e646c..28c0ad8ab 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -348,6 +348,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
        unsigned int comp;
        volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
        unsigned int segs_n = 0;
+       const unsigned int max_inline = txq->max_inline;
 
        if (unlikely(!pkts_n))
                return 0;
@@ -360,8 +361,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
        if (unlikely(!max_wqe))
                return 0;
        do {
-               unsigned int max_inline = txq->max_inline;
-               const unsigned int inline_en = !!max_inline && txq->inline_en;
                struct rte_mbuf *buf = NULL;
                uint8_t *raw;
                volatile struct mlx5_wqe_v *wqe = NULL;
@@ -516,7 +515,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                        }
                }
                /* Inline if enough room. */
-               if (inline_en || tso) {
+               if (max_inline || tso) {
                        uint32_t inl;
                        uintptr_t end = (uintptr_t)
                                (((uintptr_t)txq->wqes) +
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 63eb12c66..b8c7925a3 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -191,7 +191,6 @@ struct mlx5_txq_data {
        uint16_t elts_n:4; /* (*elts)[] length (in log2). */
        uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
        uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
-       uint16_t inline_en:1; /* When set inline is enabled. */
        uint16_t tso_en:1; /* When set hardware TSO is enabled. */
        uint16_t tunnel_en:1;
        /* When set TX offload for tunneled packets are supported. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 84d37be19..a786a6b63 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -353,7 +353,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
                .pd = priv->pd,
                .comp_mask = IBV_QP_INIT_ATTR_PD,
        };
-       if (txq_data->inline_en)
+       if (txq_data->max_inline)
                attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
        if (txq_data->tso_en) {
                attr.init.max_tso_header = txq_ctrl->max_tso_header;
@@ -589,7 +589,6 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t 
desc,
                tmpl->txq.max_inline =
                        ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
                         RTE_CACHE_LINE_SIZE);
-               tmpl->txq.inline_en = 1;
                /* TSO and MPS can't be enabled concurrently. */
                assert(!priv->tso || !priv->mps);
                if (priv->mps == MLX5_MPW_ENHANCED) {
-- 
2.11.0

Reply via email to