From: Jun Yang <[email protected]>

Max push queue number is bus level number affecting all dpaa devices.
Move the configuration from PMD driver to bus driver.

Signed-off-by: Jun Yang <[email protected]>
---
 drivers/bus/dpaa/bus_dpaa_driver.h |  6 ++++
 drivers/bus/dpaa/dpaa_bus.c        | 51 ++++++++++++++++++++++++++++++
 drivers/net/dpaa/dpaa_ethdev.c     | 51 ++++++++----------------------
 3 files changed, 70 insertions(+), 38 deletions(-)

diff --git a/drivers/bus/dpaa/bus_dpaa_driver.h 
b/drivers/bus/dpaa/bus_dpaa_driver.h
index 976f356699..cca0543432 100644
--- a/drivers/bus/dpaa/bus_dpaa_driver.h
+++ b/drivers/bus/dpaa/bus_dpaa_driver.h
@@ -249,6 +249,12 @@ struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);
 __rte_internal
 uint32_t dpaa_soc_ver(void);
 
+__rte_internal
+int dpaa_push_queue_num_update(void);
+
+__rte_internal
+uint16_t dpaa_push_queue_max_num(void);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 1a35aa52df..d9830b68ca 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -50,6 +50,13 @@
 #define DPAA_SVR_MASK 0xffff0000
 #define RTE_PRIORITY_102 102
 
+#define DPAA_PUSH_RXQ_NUM_ARG "dpaa_push_rxq_num"
+/* At present we allow up to 4 push mode queues as default - as each of
+ * this queue need dedicated portal and we are short of portals.
+ */
+#define DPAA_MAX_PUSH_MODE_QUEUE 8
+#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
+
 struct rte_dpaa_bus {
        struct rte_bus bus;
        TAILQ_HEAD(, rte_dpaa_device) device_list;
@@ -57,6 +64,8 @@ struct rte_dpaa_bus {
        int device_count;
        int detected;
        uint32_t svr_ver;
+       uint16_t max_push_rxq_num;
+       RTE_ATOMIC(uint16_t) push_rxq_num;
 };
 
 static struct rte_dpaa_bus rte_dpaa_bus;
@@ -91,6 +100,34 @@ dpaa_get_eth_port_cfg(int dev_id)
        return &dpaa_netcfg->port_cfg[dev_id];
 }
 
+RTE_EXPORT_INTERNAL_SYMBOL(dpaa_push_queue_num_update)
+int
+dpaa_push_queue_num_update(void)
+{
+       int ret = false;
+       uint16_t current, new_val;
+
+       current = rte_atomic_load_explicit(&rte_dpaa_bus.push_rxq_num,
+                                          rte_memory_order_acquire);
+       if (current < rte_dpaa_bus.max_push_rxq_num) {
+               new_val = current + 1;
+               if 
(rte_atomic_compare_exchange_strong_explicit(&rte_dpaa_bus.push_rxq_num,
+                               &current, new_val,
+                               rte_memory_order_release,
+                               rte_memory_order_acquire))
+                       ret = true;
+       }
+
+       return ret;
+}
+
+RTE_EXPORT_INTERNAL_SYMBOL(dpaa_push_queue_max_num)
+uint16_t
+dpaa_push_queue_max_num(void)
+{
+       return rte_dpaa_bus.max_push_rxq_num;
+}
+
 static int
 compare_dpaa_devices(struct rte_dpaa_device *dev1,
                     struct rte_dpaa_device *dev2)
@@ -681,6 +718,7 @@ rte_dpaa_bus_probe(void)
        uint32_t svr_ver;
        int probe_all = rte_dpaa_bus.bus.conf.scan_mode != 
RTE_BUS_SCAN_ALLOWLIST;
        static int process_once;
+       char *penv;
 
        /* If DPAA bus is not present nothing needs to be done */
        if (!rte_dpaa_bus.detected)
@@ -709,6 +747,18 @@ rte_dpaa_bus_probe(void)
                        rte_dpaa_bus.svr_ver);
        }
 
+       /* Disabling the default push mode for LS1043A */
+       if (rte_dpaa_bus.svr_ver == SVR_LS1043A_FAMILY) {
+               rte_dpaa_bus.max_push_rxq_num = 0;
+               return 0;
+       }
+
+       penv = getenv("DPAA_PUSH_QUEUES_NUMBER");
+       if (penv)
+               rte_dpaa_bus.max_push_rxq_num = atoi(penv);
+       if (rte_dpaa_bus.max_push_rxq_num > DPAA_MAX_PUSH_MODE_QUEUE)
+               rte_dpaa_bus.max_push_rxq_num = DPAA_MAX_PUSH_MODE_QUEUE;
+
        /* Device list creation is only done once */
        if (!process_once) {
                rte_dpaa_bus_dev_build();
@@ -947,6 +997,7 @@ static struct rte_dpaa_bus rte_dpaa_bus = {
                .dev_iterate = dpaa_bus_dev_iterate,
                .cleanup = dpaa_bus_cleanup,
        },
+       .max_push_rxq_num = DPAA_DEFAULT_PUSH_MODE_QUEUE,
        .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
        .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
        .device_count = 0,
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 43aab98339..0baf5c03fa 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -89,16 +89,6 @@ static int fmc_q = 1;        /* Indicates the use of static 
fmc for distribution */
 static int default_q;  /* use default queue - FMC is not executed*/
 bool dpaa_enable_recv_err_pkts; /* Enable main queue to receive error packets 
*/
 
-/* At present we only allow up to 4 push mode queues as default - as each of
- * this queue need dedicated portal and we are short of portals.
- */
-#define DPAA_MAX_PUSH_MODE_QUEUE       8
-#define DPAA_DEFAULT_PUSH_MODE_QUEUE   4
-
-static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
-static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
-
-
 /* Per RX FQ Taildrop in frame count */
 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
 
@@ -1113,7 +1103,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
        struct qm_mcc_initfq opts = {0};
        u32 ch_id, flags = 0;
-       int ret;
+       int ret, set_push_rxq = false;
        u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
        uint32_t max_rx_pktlen;
 
@@ -1214,12 +1204,12 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
                DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
                               fman_if_get_sg_enable(fif), max_rx_pktlen);
        /* checking if push mode only, no error check for now */
-       if (!rxq->is_static &&
-           dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+       if (!rxq->is_static)
+               set_push_rxq = dpaa_push_queue_num_update();
+       if (set_push_rxq) {
                struct qman_portal *qp;
                int q_fd;
 
-               dpaa_push_queue_idx++;
                opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
                opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
                                   QM_FQCTRL_CTXASTASHING |
@@ -1269,7 +1259,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
                qp = fsl_qman_fq_portal_create(&q_fd);
                if (!qp) {
                        DPAA_PMD_ERR("Unable to alloc fq portal");
-                       return -1;
+                       return -EIO;
                }
                rxq->qp = qp;
 
@@ -1279,19 +1269,19 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
                        struct rte_device *rdev = dev->device;
 
                        dpaa_dev = container_of(rdev, struct rte_dpaa_device,
-                                               device);
+                               device);
                        dev->intr_handle = dpaa_dev->intr_handle;
                        if (rte_intr_vec_list_alloc(dev->intr_handle,
-                                       NULL, dpaa_push_mode_max_queue)) {
+                                       NULL, dpaa_push_queue_max_num())) {
                                DPAA_PMD_ERR("intr_vec alloc failed");
                                return -ENOMEM;
                        }
                        if (rte_intr_nb_efd_set(dev->intr_handle,
-                                       dpaa_push_mode_max_queue))
+                                       dpaa_push_queue_max_num()))
                                return -rte_errno;
 
                        if (rte_intr_max_intr_set(dev->intr_handle,
-                                       dpaa_push_mode_max_queue))
+                                       dpaa_push_queue_max_num()))
                                return -rte_errno;
                }
 
@@ -1339,9 +1329,8 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
 RTE_EXPORT_INTERNAL_SYMBOL(dpaa_eth_eventq_attach)
 int
 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
-               int eth_rx_queue_id,
-               u16 ch_id,
-               const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+       int eth_rx_queue_id, u16 ch_id,
+       const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
        int ret;
        u32 flags = 0;
@@ -1349,10 +1338,10 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
        struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
        struct qm_mcc_initfq opts = {0};
 
-       if (dpaa_push_mode_max_queue) {
+       if (dpaa_push_queue_max_num() > 0) {
                DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible");
                DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.",
-                             dpaa_push_mode_max_queue);
+                       dpaa_push_queue_max_num());
                DPAA_PMD_WARN("To disable set DPAA_PUSH_QUEUES_NUMBER to 0");
        }
 
@@ -2581,20 +2570,6 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
                        }
                }
 
-               /* disabling the default push mode for LS1043 */
-               if (dpaa_soc_ver() == SVR_LS1043A_FAMILY)
-                       dpaa_push_mode_max_queue = 0;
-
-               /* if push mode queues to be enabled. Currently we are allowing
-                * only one queue per thread.
-                */
-               if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
-                       dpaa_push_mode_max_queue =
-                                       atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
-                       if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
-                           dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
-               }
-
                is_global_init = 1;
        }
 
-- 
2.25.1

Reply via email to