Add rte_pmd_mlx5_pp_rate_table_query() to report the HW packet
pacing rate table size and how many entries are currently in use.

The total comes from the HCA QoS capability
packet_pacing_rate_table_size. The used count is derived by
collecting unique non-zero PP indices across all TX queues.

Signed-off-by: Vincent Jardin <[email protected]>
---
 doc/guides/rel_notes/release_26_03.rst |  5 ++
 drivers/net/mlx5/mlx5_tx.c             | 65 ++++++++++++++++++++++++++
 drivers/net/mlx5/rte_pmd_mlx5.h        | 32 +++++++++++++
 3 files changed, 102 insertions(+)

diff --git a/doc/guides/rel_notes/release_26_03.rst 
b/doc/guides/rel_notes/release_26_03.rst
index 5afb2fd6d9..44ff897b65 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -86,6 +86,11 @@ New Features
 
   * Added out-of-place support for CN20K SoC.
 
+* **Updated NVIDIA mlx5 net driver.**
+
+  * Added per-queue Tx rate limiting using hardware packet pacing.
+  * Added PMD-specific API to query per-queue rate limit and rate table 
capacity.
+
 * **Updated ZTE zxdh ethernet driver.**
 
   * Added support for modifying queue depth.
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index fa57d3ef98..cf3f93e635 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -19,6 +19,7 @@
 
 #include <mlx5_prm.h>
 #include <mlx5_common.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_autoconf.h"
 #include "mlx5_defs.h"
@@ -886,3 +887,67 @@ int rte_pmd_mlx5_txq_rate_limit_query(uint16_t port_id, 
uint16_t queue_id,
                                     packet_pacing_rate_limit_index);
        return 0;
 }
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_pmd_mlx5_pp_rate_table_query, 26.07)
+int rte_pmd_mlx5_pp_rate_table_query(uint16_t port_id,
+                                    struct rte_pmd_mlx5_pp_rate_table_info 
*info)
+{
+       struct rte_eth_dev *dev;
+       struct mlx5_priv *priv;
+       uint16_t used = 0;
+       uint16_t *seen;
+       unsigned int i;
+
+       if (info == NULL)
+               return -EINVAL;
+       if (!rte_eth_dev_is_valid_port(port_id))
+               return -ENODEV;
+       dev = &rte_eth_devices[port_id];
+       priv = dev->data->dev_private;
+       if (!priv->sh->cdev->config.hca_attr.qos.packet_pacing) {
+               rte_errno = ENOTSUP;
+               return -ENOTSUP;
+       }
+       info->total = priv->sh->cdev->config.hca_attr.qos
+                       .packet_pacing_rate_table_size;
+       if (priv->txqs == NULL || priv->txqs_n == 0) {
+               info->used = 0;
+               return 0;
+       }
+       seen = mlx5_malloc(MLX5_MEM_ZERO, priv->txqs_n * sizeof(*seen),
+                          0, SOCKET_ID_ANY);
+       if (seen == NULL)
+               return -ENOMEM;
+       /*
+        * Count unique non-zero PP indices across this port's TX queues.
+        * Note: the count reflects only queues on this port; other ports
+        * sharing the same device may also consume rate table entries.
+        */
+       for (i = 0; i < priv->txqs_n; i++) {
+               struct mlx5_txq_data *txq_data;
+               struct mlx5_txq_ctrl *txq_ctrl;
+               uint16_t pp_id;
+               uint16_t j;
+               bool dup;
+
+               if ((*priv->txqs)[i] == NULL)
+                       continue;
+               txq_data = (*priv->txqs)[i];
+               txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
+               pp_id = txq_ctrl->rl.pp_id;
+               if (pp_id == 0)
+                       continue;
+               dup = false;
+               for (j = 0; j < used; j++) {
+                       if (seen[j] == pp_id) {
+                               dup = true;
+                               break;
+                       }
+               }
+               if (!dup)
+                       seen[used++] = pp_id;
+       }
+       mlx5_free(seen);
+       info->used = used;
+       return 0;
+}
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index 698d7d2032..f7970dd7fb 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -450,6 +450,38 @@ int
 rte_pmd_mlx5_txq_rate_limit_query(uint16_t port_id, uint16_t queue_id,
                                  struct rte_pmd_mlx5_txq_rate_limit_info 
*info);
 
+/**
+ * Packet pacing rate table capacity information.
+ */
+struct rte_pmd_mlx5_pp_rate_table_info {
+       uint16_t total;         /**< Total HW rate table entries. */
+       uint16_t used;          /**< Currently allocated entries. */
+};
+
+/**
+ * Query packet pacing rate table capacity.
+ *
+ * The ``used`` count reflects only the queried port's TX queues.
+ * Other ports sharing the same physical device may also consume
+ * rate table entries that are not included in this count.
+ *
+ * @param[in] port_id
+ *   Port ID.
+ * @param[out] info
+ *   Rate table capacity information.
+ *
+ * @return
+ *   0 on success, negative errno on failure:
+ *   - -ENODEV: invalid port_id.
+ *   - -EINVAL: info is NULL.
+ *   - -ENOTSUP: packet pacing not supported.
+ *   - -ENOMEM: allocation failure.
+ */
+__rte_experimental
+int
+rte_pmd_mlx5_pp_rate_table_query(uint16_t port_id,
+                                struct rte_pmd_mlx5_pp_rate_table_info *info);
+
 /** Type of mlx5 driver event for which custom callback is called. */
 enum rte_pmd_mlx5_driver_event_cb_type {
        /** Called after HW Rx queue is created. */
-- 
2.43.0

Reply via email to