On Fri, Jun 6, 2014 at 2:37 PM, Andy Zhou <az...@nicira.com> wrote:
> Simplify flow mask cache replacement without using expensive atomic
> memory access to the mask pointers.
>
> Signed-off-by: Andy Zhou <az...@nicira.com>
> ---
>  datapath/flow_table.c | 52 
> ++++++++++++++++++++++++++++-----------------------
>  1 file changed, 29 insertions(+), 23 deletions(-)
>
> diff --git a/datapath/flow_table.c b/datapath/flow_table.c
> index 58a25c7..fa0f37f 100644
> --- a/datapath/flow_table.c
> +++ b/datapath/flow_table.c
> @@ -554,8 +554,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
> flow_table *tbl,
>  {
>         struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
>         struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
> -       struct mask_cache_entry  *entries, *ce, *del;
> +       struct mask_cache_entry  *entries, *ce;
>         struct sw_flow *flow;
> +       struct sw_flow_mask *cache;
>         u32 hash = skb_hash;
>         int seg;
>
> @@ -566,42 +567,47 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
> flow_table *tbl,
>                 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
>         }
>
> -       del = NULL;
> +       ce = NULL;
> +       cache = NULL;
>         entries = this_cpu_ptr(tbl->mask_cache);
>
> +       /* Find the cache entry 'ce' to operate on. */
>         for (seg = 0; seg < MC_HASH_SEGS; seg++) {
> -               int index;
> +               int index = hash & (MC_HASH_ENTRIES - 1);
> +               struct mask_cache_entry *e;
>
> -               index = hash & (MC_HASH_ENTRIES - 1);
> -               ce = &entries[index];
> +               e = &entries[index];
> +               if (e->skb_hash == skb_hash) {
> +                       cache = 
> rcu_dereference_ovsl(ma->masks[e->mask_index]);
> +                       if (cache)
> +                               if (tbl_mask_array_find_idx(ma, cache) < 0)
> +                                       cache = NULL;
>
New function tbl_mask_array_find_idx() is called here, but it is not defined.

> -               if (ce->skb_hash == skb_hash) {
> -                       struct sw_flow_mask *mask;
> +                       if (!cache)
> +                               e->skb_hash = 0; /* Not a valid cache entry. 
> */
>
> -                       mask = 
> rcu_dereference_ovsl(ma->masks[ce->mask_index]);
> -                       if (mask) {
> -                               flow = masked_flow_lookup(ti, key, mask,
> -                                                         n_mask_hit);
> -                               if (flow)  /* Found */
> -                                       return flow;
> -
> -                       }
> -                       del = ce;
> +                       ce = e;  /* The best cache replacement candidate. */
>                         break;
>                 }
>
> -               if (!del || (del->skb_hash && !ce->skb_hash) ||
> -                   (rcu_dereference_ovsl(ma->masks[del->mask_index]) &&
> -                   !rcu_dereference_ovsl(ma->masks[ce->mask_index]))) {
> -                       del = ce;
> -               }
> +               if (!ce || e->skb_hash > ce->skb_hash)
> +                       ce = e;  /* A better replacement cache candidate. */
>
>                 hash >>= MC_HASH_SHIFT;
>         }
>
> -       flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index);
> +       /* Try cached mask first if a cache entry is found.  */
> +       if (cache) {
> +               flow = masked_flow_lookup(ti, key, cache, n_mask_hit);
> +               if (flow)
> +                       /* Cache hit. */
> +                       return flow;
> +       }
masked lookup is required for every cache entry hit. Therefore we need
to keep this call in the loop.

> +
> +       /* Cache miss, do full lookup. */
> +       flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
>         if (flow)
> -               del->skb_hash = skb_hash;
> +               ce->skb_hash = skb_hash;
>
>         return flow;
>  }
> --
> 1.9.1
>
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev
_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to