From: Jiri Pirko <j...@mellanox.com>

During block bind, we need to check tc offload feature. If it is
disabled yet still the block contains offloaded filters, forbid the
bind. Also forbid to register callback for a block that already
contains offloaded filters, as the play back is not supported now.
For keeping track of offloaded filters there is a new counter
introduced, alongside with couple of helpers called from cls_* code.
These helpers set and clear TCA_CLS_FLAGS_IN_HW flag.

Signed-off-by: Jiri Pirko <j...@mellanox.com>
---
v4->v5:
- add tracking of binding of devs that are unable to offload and check
  that before block cbs call.
v3->v4:
- propagate netdev_ops->ndo_setup_tc error up to tcf_block_offload_bind
  caller
v2->v3:
- new patch
---
 include/net/sch_generic.h | 18 +++++++++++
 net/sched/cls_api.c       | 80 ++++++++++++++++++++++++++++++++++++++---------
 net/sched/cls_bpf.c       |  5 ++-
 net/sched/cls_flower.c    |  3 +-
 net/sched/cls_matchall.c  |  3 +-
 net/sched/cls_u32.c       | 13 ++++----
 6 files changed, 99 insertions(+), 23 deletions(-)

diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index dba2214..ab86b64 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -289,8 +289,26 @@ struct tcf_block {
        struct list_head cb_list;
        struct list_head owner_list;
        bool keep_dst;
+       unsigned int offloadcnt; /* Number of oddloaded filters */
+       unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
 };
 
+static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
+{
+       if (*flags & TCA_CLS_FLAGS_IN_HW)
+               return;
+       *flags |= TCA_CLS_FLAGS_IN_HW;
+       block->offloadcnt++;
+}
+
+static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
+{
+       if (!(*flags & TCA_CLS_FLAGS_IN_HW))
+               return;
+       *flags &= ~TCA_CLS_FLAGS_IN_HW;
+       block->offloadcnt--;
+}
+
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
        struct qdisc_skb_cb *qcb;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8a130e2..0ff8ae9 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -265,31 +265,66 @@ void tcf_chain_put(struct tcf_chain *chain)
 }
 EXPORT_SYMBOL(tcf_chain_put);
 
-static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q,
-                                 struct tcf_block_ext_info *ei,
-                                 enum tc_block_command command)
+static bool tcf_block_offload_in_use(struct tcf_block *block)
+{
+       return block->offloadcnt;
+}
+
+static int tcf_block_offload_cmd(struct tcf_block *block,
+                                struct net_device *dev,
+                                struct tcf_block_ext_info *ei,
+                                enum tc_block_command command)
 {
-       struct net_device *dev = q->dev_queue->dev;
        struct tc_block_offload bo = {};
 
-       if (!dev->netdev_ops->ndo_setup_tc)
-               return;
        bo.command = command;
        bo.binder_type = ei->binder_type;
        bo.block = block;
-       dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+       return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 }
 
-static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
-                                  struct tcf_block_ext_info *ei)
+static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
+                                 struct tcf_block_ext_info *ei)
 {
-       tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND);
+       struct net_device *dev = q->dev_queue->dev;
+       int err;
+
+       if (!dev->netdev_ops->ndo_setup_tc)
+               goto no_offload_dev_inc;
+
+       /* If tc offload feature is disabled and the block we try to bind
+        * to already has some offloaded filters, forbid to bind.
+        */
+       if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
+               return -EOPNOTSUPP;
+
+       err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
+       if (err == -EOPNOTSUPP)
+               goto no_offload_dev_inc;
+       return err;
+
+no_offload_dev_inc:
+       if (tcf_block_offload_in_use(block))
+               return -EOPNOTSUPP;
+       block->nooffloaddevcnt++;
+       return 0;
 }
 
 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
                                     struct tcf_block_ext_info *ei)
 {
-       tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND);
+       struct net_device *dev = q->dev_queue->dev;
+       int err;
+
+       if (!dev->netdev_ops->ndo_setup_tc)
+               goto no_offload_dev_dec;
+       err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
+       if (err == -EOPNOTSUPP)
+               goto no_offload_dev_dec;
+       return;
+
+no_offload_dev_dec:
+       WARN_ON(block->nooffloaddevcnt-- == 0);
 }
 
 static int
@@ -502,10 +537,16 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct 
Qdisc *q,
                                           ei, extack);
        if (err)
                goto err_chain_head_change_cb_add;
-       tcf_block_offload_bind(block, q, ei);
+
+       err = tcf_block_offload_bind(block, q, ei);
+       if (err)
+               goto err_block_offload_bind;
+
        *p_block = block;
        return 0;
 
+err_block_offload_bind:
+       tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
 err_chain_head_change_cb_add:
        tcf_block_owner_del(block, q, ei->binder_type);
 err_block_owner_add:
@@ -637,9 +678,16 @@ struct tcf_block_cb *__tcf_block_cb_register(struct 
tcf_block *block,
 {
        struct tcf_block_cb *block_cb;
 
+       /* At this point, playback of previous block cb calls is not supported,
+        * so forbid to register to block which already has some offloaded
+        * filters present.
+        */
+       if (tcf_block_offload_in_use(block))
+               return ERR_PTR(-EOPNOTSUPP);
+
        block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
        if (!block_cb)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        block_cb->cb = cb;
        block_cb->cb_ident = cb_ident;
        block_cb->cb_priv = cb_priv;
@@ -655,7 +703,7 @@ int tcf_block_cb_register(struct tcf_block *block,
        struct tcf_block_cb *block_cb;
 
        block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
-       return block_cb ? 0 : -ENOMEM;
+       return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
 }
 EXPORT_SYMBOL(tcf_block_cb_register);
 
@@ -685,6 +733,10 @@ static int tcf_block_cb_call(struct tcf_block *block, enum 
tc_setup_type type,
        int ok_count = 0;
        int err;
 
+       /* Make sure all netdevs sharing this block are offload-capable. */
+       if (block->nooffloaddevcnt && err_stop)
+               return -EOPNOTSUPP;
+
        list_for_each_entry(block_cb, &block->cb_list, list) {
                err = block_cb->cb(type, type_data, block_cb->cb_priv);
                if (err) {
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index d79cc50..cf72aef 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -167,13 +167,16 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, 
struct cls_bpf_prog *prog,
        cls_bpf.exts_integrated = obj->exts_integrated;
        cls_bpf.gen_flags = obj->gen_flags;
 
+       if (oldprog)
+               tcf_block_offload_dec(block, &oldprog->gen_flags);
+
        err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
        if (prog) {
                if (err < 0) {
                        cls_bpf_offload_cmd(tp, oldprog, prog);
                        return err;
                } else if (err > 0) {
-                       prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
+                       tcf_block_offload_inc(block, &prog->gen_flags);
                }
        }
 
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 6132a73..f61df19 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -229,6 +229,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, 
struct cls_fl_filter *f)
 
        tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
                         &cls_flower, false);
+       tcf_block_offload_dec(block, &f->flags);
 }
 
 static int fl_hw_replace_filter(struct tcf_proto *tp,
@@ -256,7 +257,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
                fl_hw_destroy_filter(tp, f);
                return err;
        } else if (err > 0) {
-               f->flags |= TCA_CLS_FLAGS_IN_HW;
+               tcf_block_offload_inc(block, &f->flags);
        }
 
        if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 66d4e00..d0e57c8 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -81,6 +81,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
        cls_mall.cookie = cookie;
 
        tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false);
+       tcf_block_offload_dec(block, &head->flags);
 }
 
 static int mall_replace_hw_filter(struct tcf_proto *tp,
@@ -103,7 +104,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
                mall_destroy_hw_filter(tp, head, cookie);
                return err;
        } else if (err > 0) {
-               head->flags |= TCA_CLS_FLAGS_IN_HW;
+               tcf_block_offload_inc(block, &head->flags);
        }
 
        if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 507859c..020d328 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -529,16 +529,17 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, 
struct tc_u_hnode *h,
        return 0;
 }
 
-static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
+static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
 {
        struct tcf_block *block = tp->chain->block;
        struct tc_cls_u32_offload cls_u32 = {};
 
        tc_cls_common_offload_init(&cls_u32.common, tp);
        cls_u32.command = TC_CLSU32_DELETE_KNODE;
-       cls_u32.knode.handle = handle;
+       cls_u32.knode.handle = n->handle;
 
        tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
+       tcf_block_offload_dec(block, &n->flags);
 }
 
 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
@@ -567,10 +568,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, 
struct tc_u_knode *n,
 
        err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
        if (err < 0) {
-               u32_remove_hw_knode(tp, n->handle);
+               u32_remove_hw_knode(tp, n);
                return err;
        } else if (err > 0) {
-               n->flags |= TCA_CLS_FLAGS_IN_HW;
+               tcf_block_offload_inc(block, &n->flags);
        }
 
        if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
@@ -589,7 +590,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct 
tc_u_hnode *ht)
                        RCU_INIT_POINTER(ht->ht[h],
                                         rtnl_dereference(n->next));
                        tcf_unbind_filter(tp, &n->res);
-                       u32_remove_hw_knode(tp, n->handle);
+                       u32_remove_hw_knode(tp, n);
                        idr_remove_ext(&ht->handle_idr, n->handle);
                        if (tcf_exts_get_net(&n->exts))
                                call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
@@ -682,7 +683,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool 
*last)
                goto out;
 
        if (TC_U32_KEY(ht->handle)) {
-               u32_remove_hw_knode(tp, ht->handle);
+               u32_remove_hw_knode(tp, (struct tc_u_knode *)ht);
                ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
                goto out;
        }
-- 
2.9.5

Reply via email to