Just confirming---I no longer observe the crash with this patch applied.

On 10 January 2014 16:24, Andy Zhou <[email protected]> wrote:

> Both mega flow mask's reference counter and per flow table mask list
> should only be accessed when holding ovs_mutex() lock. However
> this is not true with ovs_flow_table_flush(). The patch fixes this bug.
>
> Signed-off-by: Andy Zhou <[email protected]>
> ---
>    v1->v2:
>         Atomic ops can protect mask's reference counter, but not mask list.
>         rework ovs_flow_table_flush() implementation instead.
> ---
>  datapath/flow_table.c |   61
> ++++++++++++++++++++++++-------------------------
>  1 file changed, 30 insertions(+), 31 deletions(-)
>
> diff --git a/datapath/flow_table.c b/datapath/flow_table.c
> index 4232b82..f26c43f 100644
> --- a/datapath/flow_table.c
> +++ b/datapath/flow_table.c
> @@ -162,29 +162,26 @@ static void rcu_free_sw_flow_mask_cb(struct rcu_head
> *rcu)
>         kfree(mask);
>  }
>
> -static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
> -{
> -       if (!mask)
> -               return;
> -
> -       BUG_ON(!mask->ref_count);
> -       mask->ref_count--;
> -
> -       if (!mask->ref_count) {
> -               list_del_rcu(&mask->list);
> -               if (deferred)
> -                       call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
> -               else
> -                       kfree(mask);
> -       }
> -}
> -
>  void ovs_flow_free(struct sw_flow *flow, bool deferred)
>  {
>         if (!flow)
>                 return;
>
> -       flow_mask_del_ref(flow->mask, deferred);
> +       if (flow->mask) {
> +               struct sw_flow_mask *mask = flow->mask;
> +
> +               BUG_ON(!mask->ref_count);
> +               mask->ref_count--;
> +
> +               if (!mask->ref_count) {
> +                       ASSERT_OVSL();
> +                       list_del_rcu(&mask->list);
> +                       if (deferred)
> +                               call_rcu(&mask->rcu,
> rcu_free_sw_flow_mask_cb);
> +                       else
> +                               kfree(mask);
> +               }
> +       }
>
>         if (deferred)
>                 call_rcu(&flow->rcu, rcu_free_flow_callback);
> @@ -197,12 +194,12 @@ static void free_buckets(struct flex_array *buckets)
>         flex_array_free(buckets);
>  }
>
> -static void __table_instance_destroy(struct table_instance *ti)
> +static void table_instance_free_flows(struct table_instance *ti, bool
> deferred)
>  {
>         int i;
>
>         if (ti->keep_flows)
> -               goto skip_flows;
> +               return;
>
>         for (i = 0; i < ti->n_buckets; i++) {
>                 struct sw_flow *flow;
> @@ -211,12 +208,14 @@ static void __table_instance_destroy(struct
> table_instance *ti)
>                 int ver = ti->node_ver;
>
>                 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
> -                       hlist_del(&flow->hash_node[ver]);
> -                       ovs_flow_free(flow, false);
> +                       hlist_del_rcu(&flow->hash_node[ver]);
> +                       ovs_flow_free(flow, deferred);
>                 }
>         }
> +}
>
> -skip_flows:
> +static void __table_instance_destroy(struct table_instance *ti)
> +{
>         free_buckets(ti->buckets);
>         kfree(ti);
>  }
> @@ -262,7 +261,8 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head
> *rcu)
>  {
>         struct table_instance *ti = container_of(rcu, struct
> table_instance, rcu);
>
> -       __table_instance_destroy(ti);
> +       free_buckets(ti->buckets);
> +       kfree(ti);
>  }
>
>  static void table_instance_destroy(struct table_instance *ti, bool
> deferred)
> @@ -270,6 +270,8 @@ static void table_instance_destroy(struct
> table_instance *ti, bool deferred)
>         if (!ti)
>                 return;
>
> +       table_instance_free_flows(ti, deferred);
> +
>         if (deferred)
>                 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
>         else
> @@ -513,16 +515,11 @@ static struct sw_flow_mask *mask_alloc(void)
>
>         mask = kmalloc(sizeof(*mask), GFP_KERNEL);
>         if (mask)
> -               mask->ref_count = 0;
> +               mask->ref_count = 1;
>
>         return mask;
>  }
>
> -static void mask_add_ref(struct sw_flow_mask *mask)
> -{
> -       mask->ref_count++;
> -}
> -
>  static bool mask_equal(const struct sw_flow_mask *a,
>                        const struct sw_flow_mask *b)
>  {
> @@ -563,9 +560,11 @@ static int flow_mask_insert(struct flow_table *tbl,
> struct sw_flow *flow,
>                 mask->key = new->key;
>                 mask->range = new->range;
>                 list_add_rcu(&mask->list, &tbl->mask_list);
> +       } else {
> +               BUG_ON(!mask->ref_count);
> +               mask->ref_count++;
>         }
>
> -       mask_add_ref(mask);
>         flow->mask = mask;
>         return 0;
>  }
> --
> 1.7.9.5
>
> _______________________________________________
> dev mailing list
> [email protected]
> http://openvswitch.org/mailman/listinfo/dev
>
_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev

Reply via email to