From: Maor Gottlieb <ma...@mellanox.com>

Introducing the following objects:

mlx5_flow_root_namespace: represent the root of specific flow table
type tree(e.g NIC receive, FDB, etc..)

mlx5_flow_group: define the mask of the flow specification.

fs_fte(flow steering flow table entry): defines the value of the
flow specification.

The following describes the relationships between the tree objects:
root_namespace --> priorities -->namespaces -->
priorities -->flow-tables --> flow-groups -->
flow-entries --> destinations

When we create new object(flow table/flow group/flow table entry), we
call to the FW command and then we add the related sw object to the tree.

When we destroy object, e.g. call to mlx5_destroy_flow_table, we use
the tree node destructor for destroying the FW object and remove the
node from the tree.

Signed-off-by: Maor Gottlieb <ma...@mellanox.com>
Signed-off-by: Moni Shoua <mo...@mellanox.com>
Signed-off-by: Matan Barak <mat...@mellanox.com>
Signed-off-by: Saeed Mahameed <sae...@mellanox.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |  464 +++++++++++++++++++++
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h |   23 +
 2 files changed, 487 insertions(+), 0 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index cac0d15..1828351 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -35,6 +35,12 @@
 
 #include "mlx5_core.h"
 #include "fs_core.h"
+#include "fs_cmd.h"
+
+static void del_rule(struct fs_node *node);
+static void del_flow_table(struct fs_node *node);
+static void del_flow_group(struct fs_node *node);
+static void del_fte(struct fs_node *node);
 
 static void tree_init_node(struct fs_node *node,
                           unsigned int refcount,
@@ -207,3 +213,461 @@ static bool compare_match_criteria(u8 
match_criteria_enable1,
        return match_criteria_enable1 == match_criteria_enable2 &&
                !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
 }
+
+static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
+{
+       struct fs_node *root;
+       struct mlx5_flow_namespace *ns;
+
+       root = node->root;
+
+       if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
+               pr_warn("mlx5: flow steering node is not in tree or 
garbaged\n");
+               return NULL;
+       }
+
+       ns = container_of(root, struct mlx5_flow_namespace, node);
+       return container_of(ns, struct mlx5_flow_root_namespace, ns);
+}
+
+static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
+{
+       struct mlx5_flow_root_namespace *root = find_root(node);
+
+       if (root)
+               return root->dev;
+       return NULL;
+}
+
+static void del_flow_table(struct fs_node *node)
+{
+       struct mlx5_flow_table *ft;
+       struct mlx5_core_dev *dev;
+       struct fs_prio *prio;
+       int err;
+
+       fs_get_obj(ft, node);
+       dev = get_dev(&ft->node);
+
+       err = mlx5_cmd_destroy_flow_table(dev, ft);
+       if (err)
+               pr_warn("flow steering can't destroy ft\n");
+       fs_get_obj(prio, ft->node.parent);
+       prio->num_ft--;
+}
+
+static void del_rule(struct fs_node *node)
+{
+       struct mlx5_flow_rule *rule;
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_group *fg;
+       struct fs_fte *fte;
+       u32     *match_value;
+       struct mlx5_core_dev *dev = get_dev(node);
+       int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
+       int err;
+
+       match_value = mlx5_vzalloc(match_len);
+       if (!match_value) {
+               pr_warn("failed to allocate inbox\n");
+               return;
+       }
+
+       fs_get_obj(rule, node);
+       fs_get_obj(fte, rule->node.parent);
+       fs_get_obj(fg, fte->node.parent);
+       memcpy(match_value, fte->val, sizeof(fte->val));
+       fs_get_obj(ft, fg->node.parent);
+       list_del(&rule->node.list);
+       fte->dests_size--;
+       if (fte->dests_size) {
+               err = mlx5_cmd_update_fte(dev, ft,
+                                         fg->id, fte);
+               if (err)
+                       pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
+                               __func__, fg->id, fte->index);
+       }
+       kvfree(match_value);
+}
+
+static void del_fte(struct fs_node *node)
+{
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_group *fg;
+       struct mlx5_core_dev *dev;
+       struct fs_fte *fte;
+       int err;
+
+       fs_get_obj(fte, node);
+       fs_get_obj(fg, fte->node.parent);
+       fs_get_obj(ft, fg->node.parent);
+
+       dev = get_dev(&ft->node);
+       err = mlx5_cmd_delete_fte(dev, ft,
+                                 fte->index);
+       if (err)
+               pr_warn("flow steering can't delete fte in index %d of flow 
group id %d\n",
+                       fte->index, fg->id);
+
+       fte->status = 0;
+       fg->num_ftes--;
+}
+
+static void del_flow_group(struct fs_node *node)
+{
+       struct mlx5_flow_group *fg;
+       struct mlx5_flow_table *ft;
+       struct mlx5_core_dev *dev;
+
+       fs_get_obj(fg, node);
+       fs_get_obj(ft, fg->node.parent);
+       dev = get_dev(&ft->node);
+
+       if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+               pr_warn("flow steering can't destroy fg %d of ft %d\n",
+                       fg->id, ft->id);
+}
+
+static struct fs_fte *alloc_fte(u8 action,
+                               u32 flow_tag,
+                               u32 *match_value,
+                               unsigned int index)
+{
+       struct fs_fte *fte;
+
+       fte = kzalloc(sizeof(*fte), GFP_KERNEL);
+       if (!fte)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(fte->val, match_value, sizeof(fte->val));
+       fte->node.type =  FS_TYPE_FLOW_ENTRY;
+       fte->flow_tag = flow_tag;
+       fte->index = index;
+       fte->action = action;
+
+       return fte;
+}
+
+static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
+{
+       struct mlx5_flow_group *fg;
+       void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+                                           create_fg_in, match_criteria);
+       u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
+                                           create_fg_in,
+                                           match_criteria_enable);
+       fg = kzalloc(sizeof(*fg), GFP_KERNEL);
+       if (!fg)
+               return ERR_PTR(-ENOMEM);
+
+       fg->mask.match_criteria_enable = match_criteria_enable;
+       memcpy(&fg->mask.match_criteria, match_criteria,
+              sizeof(fg->mask.match_criteria));
+       fg->node.type =  FS_TYPE_FLOW_GROUP;
+       fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
+                                  start_flow_index);
+       fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
+                               end_flow_index) - fg->start_index + 1;
+       return fg;
+}
+
+static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+                                               enum fs_flow_table_type 
table_type)
+{
+       struct mlx5_flow_table *ft;
+
+       ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
+       if (!ft)
+               return NULL;
+
+       ft->level = level;
+       ft->node.type = FS_TYPE_FLOW_TABLE;
+       ft->type = table_type;
+       ft->max_fte = max_fte;
+
+       return ft;
+}
+
+static struct mlx5_flow_table *mlx5_create_flow_table(struct 
mlx5_flow_namespace *ns,
+                                                     int prio,
+                                                     int max_fte)
+{
+       struct mlx5_flow_table *ft;
+       int err;
+       int log_table_sz;
+       struct mlx5_flow_root_namespace *root =
+               find_root(&ns->node);
+       struct fs_prio *fs_prio = NULL;
+
+       if (!root) {
+               pr_err("mlx5: flow steering failed to find root of 
namespace\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       fs_prio = find_prio(ns, prio);
+       if (!fs_prio)
+               return ERR_PTR(-EINVAL);
+
+       lock_ref_node(&fs_prio->node);
+       if (fs_prio->num_ft == fs_prio->max_ft) {
+               err = -ENOSPC;
+               goto unlock_prio;
+       }
+
+       ft = alloc_flow_table(find_next_free_level(fs_prio),
+                             roundup_pow_of_two(max_fte),
+                             root->table_type);
+       if (!ft) {
+               err = -ENOMEM;
+               goto unlock_prio;
+       }
+
+       tree_init_node(&ft->node, 1, del_flow_table);
+       log_table_sz = ilog2(ft->max_fte);
+       err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+                                        log_table_sz, &ft->id);
+       if (err)
+               goto free_ft;
+
+       tree_add_node(&ft->node, &fs_prio->node);
+       list_add_tail(&ft->node.list, &fs_prio->node.children);
+       fs_prio->num_ft++;
+       unlock_ref_node(&fs_prio->node);
+
+       return ft;
+
+free_ft:
+       kfree(ft);
+unlock_prio:
+       unlock_ref_node(&fs_prio->node);
+       return ERR_PTR(err);
+}
+
+static struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table 
*ft,
+                                                     u32 *fg_in)
+{
+       struct mlx5_flow_group *fg;
+       struct mlx5_core_dev *dev = get_dev(&ft->node);
+       int err;
+
+       if (!dev)
+               return ERR_PTR(-ENODEV);
+
+       fg = alloc_flow_group(fg_in);
+       if (IS_ERR(fg))
+               return fg;
+
+       lock_ref_node(&ft->node);
+       err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+       if (err) {
+               kfree(fg);
+               unlock_ref_node(&ft->node);
+               return ERR_PTR(err);
+       }
+       /* Add node to tree */
+       tree_init_node(&fg->node, 1, del_flow_group);
+       tree_add_node(&fg->node, &ft->node);
+       /* Add node to group list */
+       list_add(&fg->node.list, ft->node.children.prev);
+       unlock_ref_node(&ft->node);
+
+       return fg;
+}
+
+static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_rule *rule;
+
+       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+       if (!rule)
+               return NULL;
+
+       rule->node.type = FS_TYPE_FLOW_DEST;
+       memcpy(&rule->dest_attr, dest, sizeof(*dest));
+
+       return rule;
+}
+
+/* fte should not be deleted while calling this function */
+static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
+                                          struct mlx5_flow_group *fg,
+                                          struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_rule *rule;
+       int err;
+
+       rule = alloc_rule(dest);
+       if (!rule)
+               return ERR_PTR(-ENOMEM);
+
+       fs_get_obj(ft, fg->node.parent);
+       /* Add dest to dests list- added as first element after the head */
+       tree_init_node(&rule->node, 1, del_rule);
+       list_add_tail(&rule->node.list, &fte->node.children);
+       fte->dests_size++;
+       if (fte->dests_size == 1)
+               err = mlx5_cmd_create_fte(get_dev(&ft->node),
+                                         ft, fg->id, fte);
+       else
+               err = mlx5_cmd_update_fte(get_dev(&ft->node),
+                                         ft, fg->id, fte);
+       if (err)
+               goto free_rule;
+
+       fte->status |= FS_FTE_STATUS_EXISTING;
+
+       return rule;
+
+free_rule:
+       list_del(&rule->node.list);
+       kfree(rule);
+       fte->dests_size--;
+       return ERR_PTR(err);
+}
+
+/* Assumed fg is locked */
+static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
+                                      struct list_head **prev)
+{
+       struct fs_fte *fte;
+       unsigned int start = fg->start_index;
+
+       if (prev)
+               *prev = &fg->node.children;
+
+       /* assumed list is sorted by index */
+       fs_for_each_fte(fte, fg) {
+               if (fte->index != start)
+                       return start;
+               start++;
+               if (prev)
+                       *prev = &fte->node.list;
+       }
+
+       return start;
+}
+
+/* prev is output, prev->next = new_fte */
+static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
+                                u32 *match_value,
+                                u8 action,
+                                u32 flow_tag,
+                                struct list_head **prev)
+{
+       struct fs_fte *fte;
+       int index;
+
+       index = get_free_fte_index(fg, prev);
+       fte = alloc_fte(action, flow_tag, match_value, index);
+       if (IS_ERR(fte))
+               return fte;
+
+       return fte;
+}
+
+/* Assuming parent fg(flow table) is locked */
+static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+                                         u32 *match_value,
+                                         u8 action,
+                                         u32 flow_tag,
+                                         struct mlx5_flow_destination *dest)
+{
+       struct fs_fte *fte;
+       struct mlx5_flow_rule *rule;
+       struct mlx5_flow_table *ft;
+       struct list_head *prev;
+
+       lock_ref_node(&fg->node);
+       fs_for_each_fte(fte, fg) {
+               nested_lock_ref_node(&fte->node);
+               if (compare_match_value(&fg->mask, match_value, &fte->val) &&
+                   action == fte->action && flow_tag == fte->flow_tag) {
+                       rule = add_rule_fte(fte, fg, dest);
+                       unlock_ref_node(&fte->node);
+                       if (IS_ERR(rule))
+                               goto unlock_fg;
+                       else
+                               goto add_rule;
+               }
+               unlock_ref_node(&fte->node);
+       }
+       fs_get_obj(ft, fg->node.parent);
+       if (fg->num_ftes >= fg->max_ftes) {
+               rule = ERR_PTR(-ENOSPC);
+               goto unlock_fg;
+       }
+
+       fte = create_fte(fg, match_value, action, flow_tag, &prev);
+       if (IS_ERR(fte)) {
+               rule = (void *)fte;
+               goto unlock_fg;
+       }
+       tree_init_node(&fte->node, 0, del_fte);
+       rule = add_rule_fte(fte, fg, dest);
+       if (IS_ERR(rule)) {
+               kfree(fte);
+               goto unlock_fg;
+       }
+
+       fg->num_ftes++;
+
+       tree_add_node(&fte->node, &fg->node);
+       list_add(&fte->node.list, prev);
+add_rule:
+       tree_add_node(&rule->node, &fte->node);
+unlock_fg:
+       unlock_ref_node(&fg->node);
+       return rule;
+}
+
+static struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+                  u8 match_criteria_enable,
+                  u32 *match_criteria,
+                  u32 *match_value,
+                  u32 action,
+                  u32 flow_tag,
+                  struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_group *g;
+       struct mlx5_flow_rule *rule = ERR_PTR(-EINVAL);
+
+       tree_get_node(&ft->node);
+       lock_ref_node(&ft->node);
+       fs_for_each_fg(g, ft)
+               if (compare_match_criteria(g->mask.match_criteria_enable,
+                                          match_criteria_enable,
+                                          g->mask.match_criteria,
+                                          match_criteria)) {
+                       unlock_ref_node(&ft->node);
+                       rule = add_rule_fg(g, match_value,
+                                          action, flow_tag, dest);
+                       goto put;
+               }
+       unlock_ref_node(&ft->node);
+put:
+       tree_put_node(&ft->node);
+       return rule;
+}
+
+static void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
+{
+       tree_remove_node(&rule->node);
+}
+
+static int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
+{
+       if (tree_remove_node(&ft->node))
+               mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't 
destroyed, refcount > 1\n",
+                              ft->id);
+
+       return 0;
+}
+
+static void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
+{
+       if (tree_remove_node(&fg->node))
+               mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't 
destroyed, refcount > 1\n",
+                              fg->id);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b033714..6c27b8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -69,13 +69,16 @@ struct mlx5_flow_rule {
        struct mlx5_flow_destination            dest_attr;
 };
 
+/* Type of children is mlx5_flow_group */
 struct mlx5_flow_table {
        struct fs_node                  node;
        u32                             id;
+       unsigned int                    max_fte;
        unsigned int                    level;
        enum fs_flow_table_type         type;
 };
 
+/* Type of children is mlx5_flow_rule */
 struct fs_fte {
        struct fs_node                  node;
        u32                             val[MLX5_ST_SZ_DW(fte_match_param)];
@@ -83,15 +86,19 @@ struct fs_fte {
        u32                             flow_tag;
        u32                             index;
        u32                             action;
+       enum fs_fte_status              status;
 };
 
+/* Type of children is mlx5_flow_table/namespace */
 struct fs_prio {
        struct fs_node                  node;
        unsigned int                    max_ft;
        unsigned int                    start_level;
        unsigned int                    prio;
+       unsigned int                    num_ft;
 };
 
+/* Type of children is fs_prio */
 struct mlx5_flow_namespace {
        /* parent == NULL => root ns */
        struct  fs_node                 node;
@@ -102,6 +109,22 @@ struct mlx5_flow_group_mask {
        u32     match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
 };
 
+/* Type of children is fs_fte */
+struct mlx5_flow_group {
+       struct fs_node                  node;
+       struct mlx5_flow_group_mask     mask;
+       u32                             start_index;
+       u32                             max_ftes;
+       u32                             num_ftes;
+       u32                             id;
+};
+
+struct mlx5_flow_root_namespace {
+       struct mlx5_flow_namespace      ns;
+       enum   fs_flow_table_type       table_type;
+       struct mlx5_core_dev            *dev;
+};
+
 #define fs_get_obj(v, _node)  {v = container_of((_node), typeof(*v), node); }
 
 #define fs_list_for_each_entry(pos, root)              \
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to