Inline TX will be fully managed by the PMD after Verbs is bypassed in the
data path. Remove the current code until then.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
Signed-off-by: Adrien Mazarguil <adrien.mazarguil at 6wind.com>
---
 config/common_base           |  1 -
 doc/guides/nics/mlx5.rst     | 10 ------
 drivers/net/mlx5/Makefile    |  4 ---
 drivers/net/mlx5/mlx5_defs.h |  5 ---
 drivers/net/mlx5/mlx5_rxtx.c | 73 +++++++++++++++-----------------------------
 drivers/net/mlx5/mlx5_rxtx.h |  9 ------
 drivers/net/mlx5/mlx5_txq.c  | 16 ----------
 7 files changed, 25 insertions(+), 93 deletions(-)

diff --git a/config/common_base b/config/common_base
index 2c22a9a..f2d34c8 100644
--- a/config/common_base
+++ b/config/common_base
@@ -207,7 +207,6 @@ CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS=1
 #
 CONFIG_RTE_LIBRTE_MLX5_PMD=n
 CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
-CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE=0
 CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8

 #
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 84c35a0..77fa957 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -114,16 +114,6 @@ These options can be modified in the ``.config`` file.
   adds additional run-time checks and debugging messages at the cost of
   lower performance.

-- ``CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE`` (default **0**)
-
-  Amount of data to be inlined during TX operations. Improves latency.
-  Can improve PPS performance when PCI backpressure is detected and may be
-  useful for scenarios involving heavy traffic on many queues.
-
-  Since the additional software logic necessary to handle this mode can
-  lower performance when there is no backpressure, it is not enabled by
-  default.
-
 - ``CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE`` (default **8**)

   Maximum number of cached memory pools (MPs) per TX queue. Each MP from
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index eca2ec3..406caa5 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -85,10 +85,6 @@ else
 CFLAGS += -DNDEBUG -UPEDANTIC
 endif

-ifdef CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE
-CFLAGS += -DMLX5_PMD_MAX_INLINE=$(CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE)
-endif
-
 ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE
 CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE)
 endif
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index da1c90e..9a19835 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -54,11 +54,6 @@
 /* RSS Indirection table size. */
 #define RSS_INDIRECTION_TABLE_SIZE 256

-/* Maximum size for inline data. */
-#ifndef MLX5_PMD_MAX_INLINE
-#define MLX5_PMD_MAX_INLINE 0
-#endif
-
 /*
  * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
  * from which buffers are to be transmitted will have to be mapped by this
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 4db72e9..7480a33 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -329,56 +329,33 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                        rte_prefetch0((volatile void *)
                                      (uintptr_t)buf_next_addr);
                }
-               /* Put packet into send queue. */
-#if MLX5_PMD_MAX_INLINE > 0
-               if (length <= txq->max_inline) {
-#ifdef HAVE_VERBS_VLAN_INSERTION
-                       if (insert_vlan)
-                               err = txq->send_pending_inline_vlan
-                                       (txq->qp,
-                                        (void *)addr,
-                                        length,
-                                        send_flags,
-                                        &buf->vlan_tci);
-                       else
-#endif /* HAVE_VERBS_VLAN_INSERTION */
-                               err = txq->send_pending_inline
-                                       (txq->qp,
-                                        (void *)addr,
-                                        length,
-                                        send_flags);
-               } else
-#endif
-               {
-                       /* Retrieve Memory Region key for this
-                        * memory pool. */
-                       lkey = txq_mp2mr(txq, txq_mb2mp(buf));
-                       if (unlikely(lkey == (uint32_t)-1)) {
-                               /* MR does not exist. */
-                               DEBUG("%p: unable to get MP <-> MR"
-                                     " association", (void *)txq);
-                               /* Clean up TX element. */
-                               elt->buf = NULL;
-                               goto stop;
-                       }
+               /* Retrieve Memory Region key for this memory pool. */
+               lkey = txq_mp2mr(txq, txq_mb2mp(buf));
+               if (unlikely(lkey == (uint32_t)-1)) {
+                       /* MR does not exist. */
+                       DEBUG("%p: unable to get MP <-> MR"
+                             " association", (void *)txq);
+                       /* Clean up TX element. */
+                       elt->buf = NULL;
+                       goto stop;
+               }
 #ifdef HAVE_VERBS_VLAN_INSERTION
-                       if (insert_vlan)
-                               err = txq->send_pending_vlan
-                                       (txq->qp,
-                                        addr,
-                                        length,
-                                        lkey,
-                                        send_flags,
-                                        &buf->vlan_tci);
-                       else
+               if (insert_vlan)
+                       err = txq->send_pending_vlan
+                               (txq->qp,
+                                addr,
+                                length,
+                                lkey,
+                                send_flags,
+                                &buf->vlan_tci);
+               else
 #endif /* HAVE_VERBS_VLAN_INSERTION */
-                               err = txq->send_pending
-                                       (txq->qp,
-                                        addr,
-                                        length,
-                                        lkey,
-                                        send_flags);
-               }
+                       err = txq->send_pending
+                               (txq->qp,
+                                addr,
+                                length,
+                                lkey,
+                                send_flags);
                if (unlikely(err))
                        goto stop;
 #ifdef MLX5_PMD_SOFT_COUNTERS
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 2e1f83b..3a353b0 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -240,19 +240,10 @@ struct txq {
 #ifdef HAVE_VERBS_VLAN_INSERTION
        int (*send_pending_vlan)();
 #endif
-#if MLX5_PMD_MAX_INLINE > 0
-       int (*send_pending_inline)();
-#ifdef HAVE_VERBS_VLAN_INSERTION
-       int (*send_pending_inline_vlan)();
-#endif
-#endif
        int (*send_flush)(struct ibv_qp *qp);
        struct ibv_cq *cq; /* Completion Queue. */
        struct ibv_qp *qp; /* Queue Pair. */
        struct txq_elt (*elts)[]; /* TX elements. */
-#if MLX5_PMD_MAX_INLINE > 0
-       uint32_t max_inline; /* Max inline send size <= MLX5_PMD_MAX_INLINE. */
-#endif
        unsigned int elts_n; /* (*elts)[] length. */
        unsigned int elts_head; /* Current index in (*elts)[]. */
        unsigned int elts_tail; /* First element awaiting completion. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 59974c5..75da65b 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -173,9 +173,6 @@ txq_cleanup(struct txq *txq)
        DEBUG("cleaning up %p", (void *)txq);
        txq_free_elts(txq);
        txq->poll_cnt = NULL;
-#if MLX5_PMD_MAX_INLINE > 0
-       txq->send_pending_inline = NULL;
-#endif
        txq->send_flush = NULL;
        if (txq->if_qp != NULL) {
                assert(txq->priv != NULL);
@@ -305,9 +302,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, 
uint16_t desc,
                                        desc),
                        /* Max number of scatter/gather elements in a WR. */
                        .max_send_sge = 1,
-#if MLX5_PMD_MAX_INLINE > 0
-                       .max_inline_data = MLX5_PMD_MAX_INLINE,
-#endif
                },
                .qp_type = IBV_QPT_RAW_PACKET,
                /* Do *NOT* enable this, completions events are managed per
@@ -325,10 +319,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, 
uint16_t desc,
                      (void *)dev, strerror(ret));
                goto error;
        }
-#if MLX5_PMD_MAX_INLINE > 0
-       /* ibv_create_qp() updates this value. */
-       tmpl.max_inline = attr.init.cap.max_inline_data;
-#endif
        attr.mod = (struct ibv_exp_qp_attr){
                /* Move the QP to this state. */
                .qp_state = IBV_QPS_INIT,
@@ -403,12 +393,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, 
uint16_t desc,
        txq_cleanup(txq);
        *txq = tmpl;
        txq->poll_cnt = txq->if_cq->poll_cnt;
-#if MLX5_PMD_MAX_INLINE > 0
-       txq->send_pending_inline = txq->if_qp->send_pending_inline;
-#ifdef HAVE_VERBS_VLAN_INSERTION
-       txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan;
-#endif
-#endif
        txq->send_pending = txq->if_qp->send_pending;
 #ifdef HAVE_VERBS_VLAN_INSERTION
        txq->send_pending_vlan = txq->if_qp->send_pending_vlan;
-- 
2.1.4

Reply via email to