The common "simple Tx" function - in some ways a scalar version of the
vector Tx functions - can be used by the idpf driver as well as i40e and
ice, so add support for it to the driver.

Signed-off-by: Bruce Richardson <[email protected]>
---
 drivers/net/intel/idpf/idpf_common_device.h |  2 ++
 drivers/net/intel/idpf/idpf_common_rxtx.c   | 19 +++++++++++++++
 drivers/net/intel/idpf/idpf_common_rxtx.h   |  3 +++
 drivers/net/intel/idpf/idpf_rxtx.c          | 26 ++++++++++++++++++++-
 4 files changed, 49 insertions(+), 1 deletion(-)

diff --git a/drivers/net/intel/idpf/idpf_common_device.h 
b/drivers/net/intel/idpf/idpf_common_device.h
index 31915a03d4..527aa9b3dc 100644
--- a/drivers/net/intel/idpf/idpf_common_device.h
+++ b/drivers/net/intel/idpf/idpf_common_device.h
@@ -78,6 +78,7 @@ enum idpf_rx_func_type {
 enum idpf_tx_func_type {
        IDPF_TX_DEFAULT,
        IDPF_TX_SINGLEQ,
+       IDPF_TX_SINGLEQ_SIMPLE,
        IDPF_TX_SINGLEQ_AVX2,
        IDPF_TX_AVX512,
        IDPF_TX_SINGLEQ_AVX512,
@@ -100,6 +101,7 @@ struct idpf_adapter {
 
        bool is_tx_singleq; /* true - single queue model, false - split queue 
model */
        bool is_rx_singleq; /* true - single queue model, false - split queue 
model */
+       bool tx_simple_allowed; /* true if all queues support simple TX */
 
        /* For timestamp */
        uint64_t time_hw;
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.c 
b/drivers/net/intel/idpf/idpf_common_rxtx.c
index 781310e564..bf2e9363d4 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.c
@@ -1348,6 +1348,15 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct 
rte_mbuf **tx_pkts,
                        idpf_set_tso_ctx, NULL, NULL);
 }
 
+RTE_EXPORT_INTERNAL_SYMBOL(idpf_dp_singleq_xmit_pkts_simple)
+uint16_t
+idpf_dp_singleq_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+                                  uint16_t nb_pkts)
+{
+       return ci_xmit_pkts_simple(tx_queue, tx_pkts, nb_pkts);
+}
+
+
 /* TX prep functions */
 RTE_EXPORT_INTERNAL_SYMBOL(idpf_dp_prep_pkts)
 uint16_t
@@ -1533,6 +1542,16 @@ const struct ci_tx_path_info idpf_tx_path_infos[] = {
                        .single_queue = true
                }
        },
+       [IDPF_TX_SINGLEQ_SIMPLE] = {
+               .pkt_burst = idpf_dp_singleq_xmit_pkts_simple,
+               .info = "Single Queue Scalar Simple",
+               .features = {
+                       .tx_offloads = IDPF_TX_VECTOR_OFFLOADS,
+                       .single_queue = true,
+                       .simple_tx = true,
+               }
+       },
+
 #ifdef RTE_ARCH_X86
        [IDPF_TX_SINGLEQ_AVX2] = {
                .pkt_burst = idpf_dp_singleq_xmit_pkts_avx2,
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.h 
b/drivers/net/intel/idpf/idpf_common_rxtx.h
index fe7094d434..914cab0f25 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.h
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.h
@@ -221,6 +221,9 @@ __rte_internal
 uint16_t idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                   uint16_t nb_pkts);
 __rte_internal
+uint16_t idpf_dp_singleq_xmit_pkts_simple(void *tx_queue, struct rte_mbuf 
**tx_pkts,
+                                  uint16_t nb_pkts);
+__rte_internal
 uint16_t idpf_dp_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                           uint16_t nb_pkts);
 __rte_internal
diff --git a/drivers/net/intel/idpf/idpf_rxtx.c 
b/drivers/net/intel/idpf/idpf_rxtx.c
index 5c2516f556..a2bb4b766d 100644
--- a/drivers/net/intel/idpf/idpf_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_rxtx.c
@@ -497,6 +497,22 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
        txq->q_set = true;
        dev->data->tx_queues[queue_idx] = txq;
 
+       /* Set tx_simple_allowed flag based on queue configuration.
+        * For queue 0: explicitly set the flag based on its configuration.
+        * For other queues: only set to false if this queue cannot use 
simple_tx.
+        */
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+               goto out;
+
+       /* for first queue, default to true, disable later if any queue can't 
meet conditions */
+       if (queue_idx == 0)
+               adapter->tx_simple_allowed = true;
+
+       if ((txq->offloads != (txq->offloads & 
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) ||
+                       txq->tx_rs_thresh < IDPF_VPMD_TX_MAX_BURST)
+               adapter->tx_simple_allowed = false;
+
+out:
        return 0;
 
 err_complq_setup:
@@ -639,6 +655,7 @@ int
 idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct idpf_vport *vport = dev->data->dev_private;
+       struct idpf_adapter *ad = vport->adapter;
        struct ci_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        int err = 0;
 
@@ -655,6 +672,12 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
                return err;
        }
 
+       /* Record what kind of descriptor cleanup we need on teardown.
+        * For single queue mode, vector or simple tx paths use vec entry 
format.
+        */
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+               txq->use_vec_entry = ad->tx_simple_allowed;
+
        /* Ready to switch the queue on */
        err = idpf_vc_queue_switch(vport, tx_queue_id, false, true,
                                                        
VIRTCHNL2_QUEUE_TYPE_TX);
@@ -835,7 +858,8 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
        struct ci_tx_path_features req_features = {
                .tx_offloads = dev->data->dev_conf.txmode.offloads,
                .simd_width = RTE_VECT_SIMD_DISABLED,
-               .single_queue = (vport->txq_model == 
VIRTCHNL2_QUEUE_MODEL_SINGLE)
+               .single_queue = (vport->txq_model == 
VIRTCHNL2_QUEUE_MODEL_SINGLE),
+               .simple_tx = ad->tx_simple_allowed
        };
 
        /* The primary process selects the tx path for all processes. */
-- 
2.51.0

Reply via email to