From: Long Wu <long...@corigine.com>

The bonding PMD hardcoded the value of dedicated hardware Rx/Tx
queue size as (128/512). This will cause the bonding port start
fail if some NIC requires more Rx/Tx descriptors than the hardcoded
number.

Therefore, use the minimum hardware queue size of the member port
to initialize dedicated hardware Rx/Tx queue. If obtaining the
minimum queue size failed, use the default queue size.

Fixes: 112891cd27e5 ("net/bonding: add dedicated HW queues for LACP control")
Cc: tomaszx.kula...@intel.com
Cc: sta...@dpdk.org

Signed-off-by: Long Wu <long...@corigine.com>
Reviewed-by: Chaoyong He <chaoyong...@corigine.com>

---
v3:
* Get the minimal supported queue size from PMD, rather than add an new
  API.
v2:
* Adjust some logs following the request of reviewer.
---
 drivers/net/bonding/rte_eth_bond_8023ad.h |  3 +++
 drivers/net/bonding/rte_eth_bond_pmd.c    | 25 ++++++++++++++++++++---
 2 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h 
b/drivers/net/bonding/rte_eth_bond_8023ad.h
index 5432eafcfe..f827229671 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -35,6 +35,9 @@ extern "C" {
 #define MARKER_TLV_TYPE_INFO                0x01
 #define MARKER_TLV_TYPE_RESP                0x02
 
+#define SLOW_TX_QUEUE_HW_DEFAULT_SIZE       512
+#define SLOW_RX_QUEUE_HW_DEFAULT_SIZE       512
+
 typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint16_t member_id,
                                                  struct rte_mbuf *lacp_pkt);
 
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c 
b/drivers/net/bonding/rte_eth_bond_pmd.c
index 91bf2c2345..9d72140b82 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1684,10 +1684,26 @@ member_configure_slow_queue(struct rte_eth_dev 
*bonding_eth_dev,
        }
 
        if (internals->mode4.dedicated_queues.enabled == 1) {
-               /* Configure slow Rx queue */
+               struct rte_eth_dev_info member_info = {};
+               uint16_t nb_rx_desc = SLOW_RX_QUEUE_HW_DEFAULT_SIZE;
+               uint16_t nb_tx_desc = SLOW_TX_QUEUE_HW_DEFAULT_SIZE;
+
+               errval = rte_eth_dev_info_get(member_eth_dev->data->port_id,
+                               &member_info);
+               if (errval != 0) {
+                       RTE_BOND_LOG(ERR,
+                                       "rte_eth_dev_info_get: port=%d, err 
(%d)",
+                                       member_eth_dev->data->port_id,
+                                       errval);
+                       return errval;
+               }
 
+               if (member_info.rx_desc_lim.nb_min != 0)
+                       nb_rx_desc = member_info.rx_desc_lim.nb_min;
+
+               /* Configure slow Rx queue */
                errval = rte_eth_rx_queue_setup(member_eth_dev->data->port_id,
-                               internals->mode4.dedicated_queues.rx_qid, 128,
+                               internals->mode4.dedicated_queues.rx_qid, 
nb_rx_desc,
                                
rte_eth_dev_socket_id(member_eth_dev->data->port_id),
                                NULL, port->slow_pool);
                if (errval != 0) {
@@ -1699,8 +1715,11 @@ member_configure_slow_queue(struct rte_eth_dev 
*bonding_eth_dev,
                        return errval;
                }
 
+               if (member_info.tx_desc_lim.nb_min != 0)
+                       nb_tx_desc = member_info.tx_desc_lim.nb_min;
+
                errval = rte_eth_tx_queue_setup(member_eth_dev->data->port_id,
-                               internals->mode4.dedicated_queues.tx_qid, 512,
+                               internals->mode4.dedicated_queues.tx_qid, 
nb_tx_desc,
                                
rte_eth_dev_socket_id(member_eth_dev->data->port_id),
                                NULL);
                if (errval != 0) {
-- 
2.43.5

Reply via email to