Implement get/put function for blocks that only take/release the reference
and perform deallocation. These functions are intended to be used by
unlocked rules update path to always hold reference to block while working
with it. They use on new fine-grained locking mechanisms introduced in
previous patches in this set, instead of relying on global protection
provided by rtnl lock.

Extract code that is common with tcf_block_detach_ext() into common
function __tcf_block_put().

Extend tcf_block with rcu to allow safe deallocation when it is accessed
concurrently.

Signed-off-by: Vlad Buslov <vla...@mellanox.com>
Acked-by: Jiri Pirko <j...@mellanox.com>
---
 include/net/sch_generic.h |  1 +
 net/sched/cls_api.c       | 74 ++++++++++++++++++++++++++++++++---------------
 2 files changed, 51 insertions(+), 24 deletions(-)

diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 45fee65468d0..931fcdadf64a 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -357,6 +357,7 @@ struct tcf_block {
                struct tcf_chain *chain;
                struct list_head filter_chain_list;
        } chain0;
+       struct rcu_head rcu;
 };
 
 static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 924723fb74f6..0a7a3ace2da9 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -241,7 +241,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain)
                block->chain0.chain = NULL;
        kfree(chain);
        if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt))
-               kfree(block);
+               kfree_rcu(block, rcu);
 }
 
 static void tcf_chain_hold(struct tcf_chain *chain)
@@ -537,6 +537,19 @@ static struct tcf_block *tcf_block_lookup(struct net *net, 
u32 block_index)
        return idr_find(&tn->idr, block_index);
 }
 
+static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
+{
+       struct tcf_block *block;
+
+       rcu_read_lock();
+       block = tcf_block_lookup(net, block_index);
+       if (block && !refcount_inc_not_zero(&block->refcnt))
+               block = NULL;
+       rcu_read_unlock();
+
+       return block;
+}
+
 static void tcf_qdisc_put(struct Qdisc *q, bool rtnl_held)
 {
        if (!q)
@@ -573,6 +586,40 @@ static void tcf_block_put_all_chains(struct tcf_block 
*block)
        }
 }
 
+static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
+                           struct tcf_block_ext_info *ei)
+{
+       if (refcount_dec_and_test(&block->refcnt)) {
+               /* Flushing/putting all chains will cause the block to be
+                * deallocated when last chain is freed. However, if chain_list
+                * is empty, block has to be manually deallocated. After block
+                * reference counter reached 0, it is no longer possible to
+                * increment it or add new chains to block.
+                */
+               bool free_block = list_empty(&block->chain_list);
+
+               if (tcf_block_shared(block))
+                       tcf_block_remove(block, block->net);
+               if (!free_block)
+                       tcf_block_flush_all_chains(block);
+
+               if (q)
+                       tcf_block_offload_unbind(block, q, ei);
+
+               if (free_block)
+                       kfree_rcu(block, rcu);
+               else
+                       tcf_block_put_all_chains(block);
+       } else if (q) {
+               tcf_block_offload_unbind(block, q, ei);
+       }
+}
+
+static void tcf_block_refcnt_put(struct tcf_block *block)
+{
+       __tcf_block_put(block, NULL, NULL);
+}
+
 /* Find tcf block.
  * Set q, parent, cl when appropriate.
  */
@@ -795,7 +842,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct 
Qdisc *q,
                if (tcf_block_shared(block))
                        tcf_block_remove(block, net);
 err_block_insert:
-               kfree(block);
+               kfree_rcu(block, rcu);
        } else {
                refcount_dec(&block->refcnt);
        }
@@ -835,28 +882,7 @@ void tcf_block_put_ext(struct tcf_block *block, struct 
Qdisc *q,
        tcf_chain0_head_change_cb_del(block, ei);
        tcf_block_owner_del(block, q, ei->binder_type);
 
-       if (refcount_dec_and_test(&block->refcnt)) {
-               /* Flushing/putting all chains will cause the block to be
-                * deallocated when last chain is freed. However, if chain_list
-                * is empty, block has to be manually deallocated. After block
-                * reference counter reached 0, it is no longer possible to
-                * increment it or add new chains to block.
-                */
-               bool free_block = list_empty(&block->chain_list);
-
-               if (tcf_block_shared(block))
-                       tcf_block_remove(block, block->net);
-               if (!free_block)
-                       tcf_block_flush_all_chains(block);
-               tcf_block_offload_unbind(block, q, ei);
-
-               if (free_block)
-                       kfree(block);
-               else
-                       tcf_block_put_all_chains(block);
-       } else {
-               tcf_block_offload_unbind(block, q, ei);
-       }
+       __tcf_block_put(block, q, ei);
 }
 EXPORT_SYMBOL(tcf_block_put_ext);
 
-- 
2.7.5

Reply via email to