On 7/7/2021 7:35 PM, Gaëtan Rivet wrote:
External email: Use caution opening links or attachments


On Wed, Jul 7, 2021, at 17:05, Eli Britstein wrote:
Port numbers are usually small. Maintain an array of netdev handles indexed
by port numbers. It accelerates looking up for them for
netdev_hw_miss_packet_recover().

Reported-by: Cian Ferriter <cian.ferri...@intel.com>
Signed-off-by: Eli Britstein <el...@nvidia.com>
Reviewed-by: Gaetan Rivet <gaet...@nvidia.com>
---
  lib/dpif-netdev.c | 41 +++++++++++++++++++++++++++++++++++++----
  1 file changed, 37 insertions(+), 4 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 2e654426e..accb23a1a 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -650,6 +650,9 @@ struct dp_netdev_pmd_thread_ctx {
      uint32_t emc_insert_min;
  };

+/* Size of netdev's cache. */
+#define DP_PMD_NETDEV_CACHE_SIZE 1024
+
  /* PMD: Poll modes drivers.  PMD accesses devices via polling to eliminate
   * the performance overhead of interrupt processing.  Therefore netdev can
   * not implement rx-wait for these devices.  dpif-netdev needs to poll
@@ -786,6 +789,7 @@ struct dp_netdev_pmd_thread {
       * other instance will only be accessed by its own pmd thread. */
      struct hmap tnl_port_cache;
      struct hmap send_port_cache;
+    struct netdev *send_netdev_cache[DP_PMD_NETDEV_CACHE_SIZE];

      /* Keep track of detailed PMD performance statistics. */
      struct pmd_perf_stats perf_stats;
@@ -5910,6 +5914,10 @@ pmd_free_cached_ports(struct
dp_netdev_pmd_thread *pmd)
          free(tx_port_cached);
      }
      HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->send_port_cache) {
+        if (tx_port_cached->port->port_no <
It has some issues in github actions. I'll fix and post v2.
+            ARRAY_SIZE(pmd->send_netdev_cache)) {
+            pmd->send_netdev_cache[tx_port_cached->port->port_no] =
NULL;
+        }
          free(tx_port_cached);
      }
  }
@@ -5939,6 +5947,11 @@ pmd_load_cached_ports(struct
dp_netdev_pmd_thread *pmd)
              tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached);
              hmap_insert(&pmd->send_port_cache, &tx_port_cached->node,
                          hash_port_no(tx_port_cached->port->port_no));
+            if (tx_port_cached->port->port_no <
+                ARRAY_SIZE(pmd->send_netdev_cache)) {
+                pmd->send_netdev_cache[tx_port_cached->port->port_no] =
+                    tx_port_cached->port->netdev;
+            }
          }
      }
  }
@@ -6585,6 +6598,7 @@ dp_netdev_configure_pmd(struct
dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
      hmap_init(&pmd->tx_ports);
      hmap_init(&pmd->tnl_port_cache);
      hmap_init(&pmd->send_port_cache);
+    memset(pmd->send_netdev_cache, 0, sizeof pmd->send_netdev_cache);
      cmap_init(&pmd->tx_bonds);
      /* init the 'flow_cache' since there is no
       * actual thread created for NON_PMD_CORE_ID. */
@@ -6603,6 +6617,7 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd)
      struct dpcls *cls;

      dp_netdev_pmd_flow_flush(pmd);
+    memset(pmd->send_netdev_cache, 0, sizeof pmd->send_netdev_cache);
      hmap_destroy(&pmd->send_port_cache);
      hmap_destroy(&pmd->tnl_port_cache);
      hmap_destroy(&pmd->tx_ports);
@@ -7090,20 +7105,38 @@ smc_lookup_batch(struct dp_netdev_pmd_thread *pmd,
  static struct tx_port * pmd_send_port_cache_lookup(
      const struct dp_netdev_pmd_thread *pmd, odp_port_t port_no);

+OVS_UNUSED
+static inline struct netdev *
+pmd_netdev_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
+                        odp_port_t port_no)
+{
+    struct tx_port *p;
+
+    if (port_no < ARRAY_SIZE(pmd->send_netdev_cache)) {
+        return pmd->send_netdev_cache[port_no];
+    }
+
+    p = pmd_send_port_cache_lookup(pmd, port_no);
+    if (p) {
+        return p->port->netdev;
+    }
+    return NULL;
+}
+
  static inline int
  dp_netdev_hw_flow(const struct dp_netdev_pmd_thread *pmd,
                    odp_port_t port_no OVS_UNUSED,
                    struct dp_packet *packet,
                    struct dp_netdev_flow **flow)
  {
-    struct tx_port *p OVS_UNUSED;
+    struct netdev *netdev OVS_UNUSED;
      uint32_t mark;

  #ifdef ALLOW_EXPERIMENTAL_API /* Packet restoration API required. */
      /* Restore the packet if HW processing was terminated before completion. 
*/
-    p = pmd_send_port_cache_lookup(pmd, port_no);
-    if (OVS_LIKELY(p)) {
-        int err = netdev_hw_miss_packet_recover(p->port->netdev, packet);
+    netdev = pmd_netdev_cache_lookup(pmd, port_no);
+    if (OVS_LIKELY(netdev)) {
+        int err = netdev_hw_miss_packet_recover(netdev, packet);

          if (err && err != EOPNOTSUPP) {
              COVERAGE_INC(datapath_drop_hw_miss_recover);FI-86194-0059
--
2.28.0.2311.g225365fb51

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Hello,

I tested the performance impact of this patch with a partial offload setup.
As reported by pmd-stats-show, in average cycles per packet:

Before vxlan-decap: 525 c/p
After vxlan-decap: 542 c/p
After this fix: 530 c/p

Without those fixes, vxlan-decap has a 3.2% negative impact on cycles,
with the fixes, the impact is reduced to 0.95%.

As I had to force partial offloads for our hardware, it would be better
with an outside confirmation on a proper setup.

Kind regards,
--
Gaetan Rivet
_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to