This patch adds CD hit and miss statistics to dp_stat_type. PMD stats will show the total CD hit and miss counts. This patch depends on the first patch.
CC: Darrell Ball <dball at vmware.com> CC: Jan Scheurich <jan.scheurich at ericsson.com> Signed-off-by: Yipeng Wang <yipeng1.wang at intel.com> Signed-off-by: Antonio Fischetti <antonio.fischetti at intel.com> Co-authored-by: Antonio Fischetti <antonio.fischetti at intel.com> --- lib/dpif-netdev.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 78219ba..5245cb5 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -278,7 +278,7 @@ static void dpcls_remove(struct dpcls *, struct dpcls_rule *); static bool dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[], struct dpcls_rule **rules, size_t cnt, - int *num_lookups_p); + int *num_lookups_p, int *cd_hit); static inline struct dpcls_subtable * dpcls_find_subtable(struct dpcls *cls, const struct netdev_flow_key *mask); @@ -405,6 +405,8 @@ enum dp_stat_type { DP_STAT_LOST, /* Packets not passed up to the client. */ DP_STAT_LOOKUP_HIT, /* Number of subtable lookups for flow table hits */ + DP_STAT_CD_HIT, /* Packets that hit CD. */ + DP_STAT_CD_MISS, /* Packets that miss CD. */ DP_N_STATS }; @@ -938,6 +940,10 @@ pmd_info_show_stats(struct ds *reply, : 0, stats[DP_STAT_MISS], stats[DP_STAT_LOST]); + ds_put_format(reply, + "\tCD hits:%llu\n\tCD miss:%llu\n", + stats[DP_STAT_CD_HIT], stats[DP_STAT_CD_MISS]); + if (total_cycles == 0) { return; } @@ -2576,7 +2582,7 @@ find_index_in_sub_ptrs(struct dpcls *cls, static struct dp_netdev_flow * dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread *pmd, const struct netdev_flow_key *key, - int *lookup_num_p) + int *lookup_num_p, int *cd_hit) { struct dpcls *cls; struct dpcls_rule *rule; @@ -2585,7 +2591,7 @@ dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread *pmd, cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); if (OVS_LIKELY(cls)) { - dpcls_lookup(cls, key, &rule, 1, lookup_num_p); + dpcls_lookup(cls, key, &rule, 1, lookup_num_p, cd_hit); netdev_flow = dp_netdev_flow_cast(rule); } return netdev_flow; @@ -2932,7 +2938,7 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd, } ovs_mutex_lock(&pmd->flow_mutex); - netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL); + netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL, NULL); if (!netdev_flow) { if (put->flags & DPIF_FP_CREATE) { if (cmap_count(&pmd->flow_table) < MAX_FLOWS) { @@ -5412,7 +5418,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd, * to be locking everyone out of making flow installs. If we * move to a per-core classifier, it would be reasonable. */ ovs_mutex_lock(&pmd->flow_mutex); - netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL); + netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL, NULL); if (OVS_LIKELY(!netdev_flow)) { netdev_flow = dp_netdev_flow_add(pmd, key, &match, &ufid, add_actions->data, @@ -5444,6 +5450,7 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp = pmd->dp; int miss_cnt = 0, lost_cnt = 0; int lookup_cnt = 0, add_lookup_cnt; + int cd_hit = 0, add_cd_hit; bool any_miss; size_t i; @@ -5454,7 +5461,7 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, /* Get the classifier for the in_port */ cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); if (OVS_LIKELY(cls)) { - any_miss = !dpcls_lookup(cls, keys, rules, cnt, &lookup_cnt); + any_miss = !dpcls_lookup(cls, keys, rules, cnt, &lookup_cnt, &cd_hit); } else { any_miss = true; memset(rules, 0, sizeof(rules)); @@ -5477,9 +5484,10 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, * a rule covering this flow. In this case, it's a lot cheaper * to catch it here than execute a miss. */ netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i], - &add_lookup_cnt); + &add_lookup_cnt, &add_cd_hit); if (netdev_flow) { lookup_cnt += add_lookup_cnt; + cd_hit += add_cd_hit; rules[i] = &netdev_flow->cr; continue; } @@ -5519,6 +5527,9 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, dp_netdev_count_packet(pmd, DP_STAT_LOOKUP_HIT, lookup_cnt); dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt); dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt); + dp_netdev_count_packet(pmd, DP_STAT_CD_HIT, cd_hit); + dp_netdev_count_packet(pmd, DP_STAT_CD_MISS, cnt - cd_hit); + } /* Packets enter the datapath from a port (or from recirculation) here. @@ -6484,7 +6495,7 @@ dpcls_rule_matches_key(const struct dpcls_rule *rule, static bool dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[], struct dpcls_rule **rules, const size_t cnt, - int *num_lookups_p) + int *num_lookups_p, int *cd_hit) { /* The received 'cnt' miniflows are the search-keys that will be processed * to find a matching entry into the available subtables. @@ -6511,8 +6522,8 @@ dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[], * The cuckoo distributor lookup pass begin first before go to megaflow * cache. CD hit will return a subtable index to the subtable to lookup. */ - int i; + int cd_match = 0; int data[MAP_BITS]; cd_lookup_bulk_pipe(cls, keys, cnt, &found_map, data); @@ -6528,6 +6539,7 @@ dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[], if (OVS_LIKELY(dpcls_rule_matches_key(rule, &keys[i]))) { rules[i] = rule; lookups_match += 1; + cd_match += 1; goto scnext; } } @@ -6556,6 +6568,9 @@ dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[], if (num_lookups_p) { *num_lookups_p = lookups_match; } + if (cd_hit) { + *cd_hit = cd_match; + } return true; /* All found in CD. */ } -- 2.7.4 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev