Testpmd exposes "--rxq-share=[N]" parameter which controls
sharing Rx queues. Before this patch logic was that either:

- all queues were assigned to the same share group
  (when N was not passed),
- or ports were grouped in subsets of N ports,
  each subset got different share group index.

2nd option did not work well with dynamic representor probing,
where new representors would be assigned to new share group.

This patch changes the logic in testpmd to dynamically
assign share group index. Each unique switch and Rx domain
will get different share group.

Signed-off-by: Dariusz Sosnowski <[email protected]>
---
 app/test-pmd/parameters.c             | 12 ++-----
 app/test-pmd/testpmd.c                | 49 +++++++++++++++++++++++++--
 app/test-pmd/testpmd.h                |  2 +-
 doc/guides/testpmd_app_ug/run_app.rst | 10 +++---
 4 files changed, 54 insertions(+), 19 deletions(-)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 3617860830..5d9a5f2501 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -507,7 +507,7 @@ usage(char* progname)
        printf("  --tx-ip=src,dst: IP addresses in Tx-only mode\n");
        printf("  --tx-udp=src[,dst]: UDP ports in Tx-only mode\n");
        printf("  --eth-link-speed: force link speed.\n");
-       printf("  --rxq-share=X: number of ports per shared Rx queue groups, 
defaults to UINT32_MAX (1 group)\n");
+       printf("  --rxq-share: enable Rx queue sharing per switch and Rx 
domain\n");
        printf("  --disable-link-check: disable check on link status when "
               "starting/stopping ports.\n");
        printf("  --disable-device-start: do not automatically start port\n");
@@ -1579,15 +1579,7 @@ launch_args_parse(int argc, char** argv)
                                rte_exit(EXIT_FAILURE, "txonly-flows must be >= 
1 and <= 64\n");
                        break;
                case TESTPMD_OPT_RXQ_SHARE_NUM:
-                       if (optarg == NULL) {
-                               rxq_share = UINT32_MAX;
-                       } else {
-                               n = atoi(optarg);
-                               if (n >= 0)
-                                       rxq_share = (uint32_t)n;
-                               else
-                                       rte_exit(EXIT_FAILURE, "rxq-share must 
be >= 0\n");
-                       }
+                       rxq_share = 1;
                        break;
                case TESTPMD_OPT_NO_FLUSH_RX_NUM:
                        no_flush_rx = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index aad880aa34..be8e8299e3 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -545,9 +545,17 @@ uint8_t record_core_cycles;
 uint8_t record_burst_stats;
 
 /*
- * Number of ports per shared Rx queue group, 0 disable.
+ * Enable Rx queue sharing between ports in the same switch and Rx domain.
  */
-uint32_t rxq_share;
+uint8_t rxq_share;
+
+struct share_group_slot {
+       uint16_t domain_id;
+       uint16_t rx_domain;
+       uint16_t share_group;
+};
+
+struct share_group_slot share_group_slots[RTE_MAX_ETHPORTS];
 
 unsigned int num_sockets = 0;
 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
@@ -586,6 +594,41 @@ int proc_id;
  */
 unsigned int num_procs = 1;
 
+static uint16_t
+assign_share_group(struct rte_eth_dev_info *dev_info)
+{
+       unsigned int first_free = RTE_DIM(share_group_slots);
+       bool found = false;
+       unsigned int i;
+
+       for (i = 0; i < RTE_DIM(share_group_slots); i++) {
+               if (share_group_slots[i].share_group > 0) {
+                       if (dev_info->switch_info.domain_id == 
share_group_slots[i].domain_id &&
+                           dev_info->switch_info.rx_domain == 
share_group_slots[i].rx_domain) {
+                               found = true;
+                               break;
+                       }
+               } else if (first_free == RTE_DIM(share_group_slots)) {
+                       first_free = i;
+               }
+       }
+
+       if (found)
+               return share_group_slots[i].share_group;
+
+       /*
+        * testpmd assigns all queues on a given port to single share group.
+        * There are RTE_MAX_ETHPORTS share group slots,
+        * so at least one should always be available.
+        */
+       RTE_ASSERT(first_free < RTE_DIM(share_group_slots));
+
+       share_group_slots[first_free].domain_id = 
dev_info->switch_info.domain_id;
+       share_group_slots[first_free].rx_domain = 
dev_info->switch_info.rx_domain;
+       share_group_slots[first_free].share_group = first_free + 1;
+       return share_group_slots[first_free].share_group;
+}
+
 static void
 eth_rx_metadata_negotiate_mp(uint16_t port_id)
 {
@@ -4097,7 +4140,7 @@ rxtx_port_config(portid_t pid)
                if (rxq_share > 0 &&
                    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
                        /* Non-zero share group to enable RxQ share. */
-                       port->rxq[qid].conf.share_group = pid / rxq_share + 1;
+                       port->rxq[qid].conf.share_group = 
assign_share_group(&port->dev_info);
                        port->rxq[qid].conf.share_qid = qid; /* Equal mapping. 
*/
                }
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index af185540c3..9b60ebd7fc 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -675,7 +675,7 @@ extern enum tx_pkt_split tx_pkt_split;
 extern uint8_t txonly_multi_flow;
 extern uint16_t txonly_flows;
 
-extern uint32_t rxq_share;
+extern uint8_t rxq_share;
 
 extern uint16_t nb_pkt_per_burst;
 extern uint16_t nb_pkt_flowgen_clones;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst 
b/doc/guides/testpmd_app_ug/run_app.rst
index ae3ef8cdf8..f4a30e5da9 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -393,13 +393,13 @@ The command line options are:
     Valid range is 1 to 64. Default is 64.
     Reducing this value limits the number of unique UDP source ports generated.
 
-*   ``--rxq-share=[X]``
+*   ``--rxq-share``
 
     Create queues in shared Rx queue mode if device supports.
-    Shared Rx queues are grouped per X ports. X defaults to UINT32_MAX,
-    implies all ports join share group 1. Forwarding engine "shared-rxq"
-    should be used for shared Rx queues. This engine does Rx only and
-    update stream statistics accordingly.
+    Testpmd will assign unique share group index per each
+    unique switch and Rx domain.
+    Forwarding engine "shared-rxq" should be used for shared Rx queues.
+    This engine does Rx only and update stream statistics accordingly.
 
 *   ``--eth-link-speed``
 
-- 
2.47.3

Reply via email to