'pmd-stats-show' now reports the number of entries in the EMC as well as the percentage full it is. Eg. For 2048 entries the EMC is reported as 25% full as the maximum capacity is 8192 entries.
Signed-off-by: Ciara Loftus <[email protected]> Signed-off-by: Georg Schmuecking <[email protected]> Co-authored-by: Georg Schmuecking <[email protected]> --- lib/dpif-netdev.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 546a1e9..0b0ca54 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -152,6 +152,7 @@ struct emc_entry { struct emc_cache { struct emc_entry entries[EM_FLOW_HASH_ENTRIES]; int sweep_idx; /* For emc_cache_slow_sweep(). */ + int n_entries; /* Number of EMC entries */ }; /* Iterate in the exact match cache through every entry that might contain a @@ -601,7 +602,7 @@ static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd, struct tx_port *tx, long long now); static inline bool emc_entry_alive(struct emc_entry *ce); -static void emc_clear_entry(struct emc_entry *ce); +static void emc_clear_entry(struct emc_cache *cache, struct emc_entry *ce); static void emc_cache_init(struct emc_cache *flow_cache) @@ -609,6 +610,7 @@ emc_cache_init(struct emc_cache *flow_cache) int i; flow_cache->sweep_idx = 0; + flow_cache->n_entries = 0; for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { flow_cache->entries[i].flow = NULL; flow_cache->entries[i].key.hash = 0; @@ -623,8 +625,9 @@ emc_cache_uninit(struct emc_cache *flow_cache) int i; for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { - emc_clear_entry(&flow_cache->entries[i]); + emc_clear_entry(flow_cache, &flow_cache->entries[i]); } + flow_cache->n_entries = 0; } /* Check and clear dead flow references slowly (one entry at each @@ -635,7 +638,7 @@ emc_cache_slow_sweep(struct emc_cache *flow_cache) struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx]; if (!emc_entry_alive(entry)) { - emc_clear_entry(entry); + emc_clear_entry(flow_cache, entry); } flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK; } @@ -717,9 +720,13 @@ pmd_info_show_stats(struct ds *reply, ds_put_cstr(reply, ":\n"); ds_put_format(reply, - "\temc hits:%llu\n\tmegaflow hits:%llu\n" + "\temc entries:%i (%.2f%% full)\n\temc hits:%llu\n" + "\tmegaflow hits:%llu\n" "\tavg. subtable lookups per hit:%.2f\n" "\tmiss:%llu\n\tlost:%llu\n", + pmd->flow_cache.n_entries, + ((double)pmd->flow_cache.n_entries / + (double)EM_FLOW_HASH_ENTRIES) * 100, stats[DP_STAT_EXACT_HIT], stats[DP_STAT_MASKED_HIT], stats[DP_STAT_MASKED_HIT] > 0 ? (1.0*stats[DP_STAT_LOOKUP_HIT])/stats[DP_STAT_MASKED_HIT] @@ -1937,21 +1944,25 @@ emc_entry_alive(struct emc_entry *ce) } static void -emc_clear_entry(struct emc_entry *ce) +emc_clear_entry(struct emc_cache *flow_cache, struct emc_entry *ce) { if (ce->flow) { dp_netdev_flow_unref(ce->flow); ce->flow = NULL; + flow_cache->n_entries--; } } static inline void -emc_change_entry(struct emc_entry *ce, struct dp_netdev_flow *flow, +emc_change_entry(struct emc_cache *flow_cache, struct emc_entry *ce, + struct dp_netdev_flow *flow, const struct netdev_flow_key *key) { if (ce->flow != flow) { if (ce->flow) { dp_netdev_flow_unref(ce->flow); + } else { + flow_cache->n_entries++; } if (dp_netdev_flow_ref(flow)) { @@ -1975,7 +1986,7 @@ emc_insert(struct emc_cache *cache, const struct netdev_flow_key *key, EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) { if (netdev_flow_key_equal(¤t_entry->key, key)) { /* We found the entry with the 'mf' miniflow */ - emc_change_entry(current_entry, flow, NULL); + emc_change_entry(cache, current_entry, flow, NULL); return; } @@ -1991,7 +2002,7 @@ emc_insert(struct emc_cache *cache, const struct netdev_flow_key *key, /* We didn't find the miniflow in the cache. * The 'to_be_replaced' entry is where the new flow will be stored */ - emc_change_entry(to_be_replaced, flow, key); + emc_change_entry(cache, to_be_replaced, flow, key); } static inline struct dp_netdev_flow * -- 2.4.3 _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
