From: Vlad Buslov <vla...@mellanox.com>

From: Vlad Buslov <vla...@mellanox.com>

Action API was changed to work with actions and action_idr in concurrency
safe manner, however tcf_del_walker() still uses actions without taking a
reference or idrinfo->lock first, and deletes them directly, disregarding
possible concurrent delete.

Change tcf_del_walker() to take idrinfo->lock while iterating over actions
and use new tcf_idr_release_unsafe() to release them while holding the
lock.

And the blocking function fl_hw_destroy_tmplt() could be called when we
put a filter chain, so defer it to a work queue.

Signed-off-by: Vlad Buslov <vla...@mellanox.com>
[xiyou.wangc...@gmail.com: heavily modify the code and changelog]
Signed-off-by: Cong Wang <xiyou.wangc...@gmail.com>
---
 net/sched/act_api.c    | 20 +++++++++++++++++++-
 net/sched/cls_flower.c | 13 +++++++++++--
 2 files changed, 30 insertions(+), 3 deletions(-)

diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 6f118d62c731..fac8c769454f 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -246,6 +246,20 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, 
struct sk_buff *skb,
        goto done;
 }
 
+static int tcf_idr_release_unsafe(struct tc_action *p)
+{
+       if (atomic_read(&p->tcfa_bindcnt) > 0)
+               return -EPERM;
+
+       if (refcount_dec_and_test(&p->tcfa_refcnt)) {
+               idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
+               tcf_action_cleanup(p);
+               return ACT_P_DELETED;
+       }
+
+       return 0;
+}
+
 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
                          const struct tc_action_ops *ops)
 {
@@ -262,15 +276,19 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, 
struct sk_buff *skb,
        if (nla_put_string(skb, TCA_KIND, ops->kind))
                goto nla_put_failure;
 
+       spin_lock(&idrinfo->lock);
        idr_for_each_entry_ul(idr, p, id) {
-               ret = __tcf_idr_release(p, false, true);
+               ret = tcf_idr_release_unsafe(p);
                if (ret == ACT_P_DELETED) {
                        module_put(ops->owner);
                        n_i++;
                } else if (ret < 0) {
+                       spin_unlock(&idrinfo->lock);
                        goto nla_put_failure;
                }
        }
+       spin_unlock(&idrinfo->lock);
+
        if (nla_put_u32(skb, TCA_FCNT, n_i))
                goto nla_put_failure;
        nla_nest_end(skb, nest);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 4b8dd37dd4f8..0ed6630a5049 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -79,6 +79,7 @@ struct fl_flow_tmplt {
        struct fl_flow_key mask;
        struct flow_dissector dissector;
        struct tcf_chain *chain;
+       struct rcu_work rwork;
 };
 
 struct cls_fl_head {
@@ -1437,14 +1438,22 @@ static void *fl_tmplt_create(struct net *net, struct 
tcf_chain *chain,
        return ERR_PTR(err);
 }
 
-static void fl_tmplt_destroy(void *tmplt_priv)
+static void fl_tmplt_destroy_work(struct work_struct *work)
 {
-       struct fl_flow_tmplt *tmplt = tmplt_priv;
+       struct fl_flow_tmplt *tmplt = container_of(to_rcu_work(work),
+                                                struct fl_flow_tmplt, rwork);
 
        fl_hw_destroy_tmplt(tmplt->chain, tmplt);
        kfree(tmplt);
 }
 
+static void fl_tmplt_destroy(void *tmplt_priv)
+{
+       struct fl_flow_tmplt *tmplt = tmplt_priv;
+
+       tcf_queue_work(&tmplt->rwork, fl_tmplt_destroy_work);
+}
+
 static int fl_dump_key_val(struct sk_buff *skb,
                           void *val, int val_type,
                           void *mask, int mask_type, int len)
-- 
2.14.4

Reply via email to