From: Cian Ferriter <[email protected]> Use dp_netdev_pmd_thread->dp_netdev_pmd_thread_ctx to store whether the SMC is enabled. Also implement a single patcket version of SMC lookup.
Signed-off-by: Cian Ferriter <[email protected]> Co-authored-by: Harry van Haaren <[email protected]> Signed-off-by: Harry van Haaren <[email protected]> --- v5: - Move increment of SMC_HITS stat to this patch for compilation fixup - Fixup compile issue on returning 0 instead of NULL for pointer --- lib/dpif-netdev-avx512.c | 19 ++++++++++++++++++- lib/dpif-netdev-private-dfc.h | 8 ++++++++ lib/dpif-netdev-private-thread.h | 2 ++ lib/dpif-netdev.c | 28 ++++++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 1 deletion(-) diff --git a/lib/dpif-netdev-avx512.c b/lib/dpif-netdev-avx512.c index 5bf6cf073..f51155d6e 100644 --- a/lib/dpif-netdev-avx512.c +++ b/lib/dpif-netdev-avx512.c @@ -83,7 +83,10 @@ dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd, /* Check if EMC or SMC are enabled */ struct dfc_cache *cache = &pmd->flow_cache; const uint32_t emc_enabled = pmd->ctx.emc_insert_min != 0; + bool smc_enable_db = pmd->ctx.smc_enable_db; + uint32_t emc_hits = 0; + uint32_t smc_hits = 0; /* a 1 bit in this mask indidcates a hit, so no DPCLS lookup on the pkt. */ uint32_t hwol_emc_smc_hitmask = 0; @@ -113,8 +116,11 @@ dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd, */ key->hash = dpif_netdev_packet_get_rss_hash_orig_pkt(packet, &key->mf); + struct dp_netdev_flow *f = NULL; + if (emc_enabled) { - struct dp_netdev_flow *f = emc_lookup(&cache->emc_cache, key); + f = emc_lookup(&cache->emc_cache, key); + if (f) { rules[i] = &f->cr; emc_hits++; @@ -123,6 +129,16 @@ dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd, } }; + if (smc_enable_db && !f) { + f = smc_lookup_single(pmd, packet, key); + if (f) { + rules[i] = &f->cr; + smc_hits++; + hwol_emc_smc_hitmask |= (1 << i); + continue; + } + } + /* The flow pointer was not found in HWOL/EMC/SMC, so add it to the * dpcls input keys array for batch lookup later. */ @@ -175,6 +191,7 @@ dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd, /* At this point we don't return error anymore, so commit stats here. */ pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_RECV, batch_size); pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_EXACT_HIT, emc_hits); + pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, smc_hits); pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MASKED_HIT, dpcls_key_idx); pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MASKED_LOOKUP, diff --git a/lib/dpif-netdev-private-dfc.h b/lib/dpif-netdev-private-dfc.h index 8f6a4899e..2cee0a38d 100644 --- a/lib/dpif-netdev-private-dfc.h +++ b/lib/dpif-netdev-private-dfc.h @@ -81,6 +81,9 @@ extern "C" { #define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \ DEFAULT_EM_FLOW_INSERT_INV_PROB) +/* Forward declaration for SMC function prototype. */ +struct dp_netdev_pmd_thread; + struct emc_entry { struct dp_netdev_flow *flow; struct netdev_flow_key key; /* key.hash used for emc hash value. */ @@ -237,6 +240,11 @@ emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key) return NULL; } +struct dp_netdev_flow * +smc_lookup_single(struct dp_netdev_pmd_thread *pmd, + struct dp_packet *packet, + struct netdev_flow_key *key); + #ifdef __cplusplus } #endif diff --git a/lib/dpif-netdev-private-thread.h b/lib/dpif-netdev-private-thread.h index 18387b81d..c47fe92dc 100644 --- a/lib/dpif-netdev-private-thread.h +++ b/lib/dpif-netdev-private-thread.h @@ -45,6 +45,8 @@ struct dp_netdev_pmd_thread_ctx { struct dp_netdev_rxq *last_rxq; /* EMC insertion probability context for the current processing cycle. */ uint32_t emc_insert_min; + /* Enable the SMC cache from ovsdb config */ + bool smc_enable_db; }; /* Forward declaration for typedef */ diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index e8d47e539..a8e656069 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -5183,6 +5183,8 @@ dpif_netdev_run(struct dpif *dpif) non_pmd->ctx.emc_insert_min = 0; } + non_pmd->ctx.smc_enable_db = dp->smc_enable_db; + for (i = 0; i < port->n_rxq; i++) { if (!netdev_rxq_enabled(port->rxqs[i].rx)) { @@ -5454,6 +5456,8 @@ reload: pmd->ctx.emc_insert_min = 0; } + pmd->ctx.smc_enable_db = pmd->dp->smc_enable_db; + process_packets = dp_netdev_process_rxq_port(pmd, poll_list[i].rxq, poll_list[i].port_no); @@ -6542,6 +6546,30 @@ smc_lookup_batch(struct dp_netdev_pmd_thread *pmd, pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, n_smc_hit); } +struct dp_netdev_flow * +smc_lookup_single(struct dp_netdev_pmd_thread *pmd, + struct dp_packet *packet, + struct netdev_flow_key *key) +{ + const struct cmap_node *flow_node = smc_entry_get(pmd, key->hash); + + if (OVS_LIKELY(flow_node != NULL)) { + struct dp_netdev_flow *flow = NULL; + + CMAP_NODE_FOR_EACH (flow, node, flow_node) { + /* Since we dont have per-port megaflow to check the port + * number, we need to verify that the input ports match. */ + if (OVS_LIKELY(dpcls_rule_matches_key(&flow->cr, key) && + flow->flow.in_port.odp_port == packet->md.in_port.odp_port)) { + + return (void *) flow; + } + } + } + + return NULL; +} + /* Try to process all ('cnt') the 'packets' using only the datapath flow cache * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the * miniflow is copied into 'keys' and the packet pointer is moved at the -- 2.25.1 _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
