From: Eli Cohen <e...@nvidia.com>

Add rules to forward packets to the net device's TIR only if the
destination MAC is equal to the configured MAC. This is required to
prevent the netdevice from receiving traffic not destined to its
configured MAC.

Signed-off-by: Eli Cohen <e...@nvidia.com>
Reviewed-by: Parav Pandit <pa...@nvidia.com>
---
 drivers/vdpa/mlx5/net/mlx5_vnet.c | 76 +++++++++++++++++++++++--------
 1 file changed, 58 insertions(+), 18 deletions(-)

diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 61f17ce0892e..e15c7f639490 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -158,7 +158,8 @@ struct mlx5_vdpa_net {
        struct mutex reslock;
        struct mlx5_flow_table *rxft;
        struct mlx5_fc *rx_counter;
-       struct mlx5_flow_handle *rx_rule;
+       struct mlx5_flow_handle *rx_rule_ucast;
+       struct mlx5_flow_handle *rx_rule_mcast;
        bool setup;
        u16 mtu;
        u32 cur_num_vqs;
@@ -1382,21 +1383,33 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
        struct mlx5_flow_table_attr ft_attr = {};
        struct mlx5_flow_act flow_act = {};
        struct mlx5_flow_namespace *ns;
+       struct mlx5_flow_spec *spec;
+       void *headers_c;
+       void *headers_v;
+       u8 *dmac_c;
+       u8 *dmac_v;
        int err;
 
-       /* for now, one entry, match all, forward to tir */
-       ft_attr.max_fte = 1;
-       ft_attr.autogroup.max_num_groups = 1;
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return -ENOMEM;
+
+       spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       ft_attr.max_fte = 2;
+       ft_attr.autogroup.max_num_groups = 2;
 
        ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, 
MLX5_FLOW_NAMESPACE_BYPASS);
        if (!ns) {
-               mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
-               return -EOPNOTSUPP;
+               mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+               err = -EOPNOTSUPP;
+               goto err_ns;
        }
 
        ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
-       if (IS_ERR(ndev->rxft))
-               return PTR_ERR(ndev->rxft);
+       if (IS_ERR(ndev->rxft)) {
+               err = PTR_ERR(ndev->rxft);
+               goto err_ns;
+       }
 
        ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
        if (IS_ERR(ndev->rx_counter)) {
@@ -1404,37 +1417,64 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
                goto err_fc;
        }
 
+       headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 
outer_headers);
+       dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, 
outer_headers.dmac_47_16);
+       memset(dmac_c, 0xff, ETH_ALEN);
+       headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 
outer_headers);
+       dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, 
outer_headers.dmac_47_16);
+       ether_addr_copy(dmac_v, ndev->config.mac);
+
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 
MLX5_FLOW_CONTEXT_ACTION_COUNT;
        dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
        dest[0].tir_num = ndev->res.tirn;
        dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
        dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
-       ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 
2);
-       if (IS_ERR(ndev->rx_rule)) {
-               err = PTR_ERR(ndev->rx_rule);
-               ndev->rx_rule = NULL;
-               goto err_rule;
+       ndev->rx_rule_ucast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, 
dest, 2);
+
+       if (IS_ERR(ndev->rx_rule_ucast)) {
+               err = PTR_ERR(ndev->rx_rule_ucast);
+               ndev->rx_rule_ucast = NULL;
+               goto err_rule_ucast;
+       }
+
+       memset(dmac_c, 0, ETH_ALEN);
+       memset(dmac_v, 0, ETH_ALEN);
+       dmac_c[0] = 1;
+       dmac_v[0] = 1;
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       ndev->rx_rule_mcast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, 
dest, 1);
+       if (IS_ERR(ndev->rx_rule_mcast)) {
+               err = PTR_ERR(ndev->rx_rule_mcast);
+               ndev->rx_rule_mcast = NULL;
+               goto err_rule_mcast;
        }
 
+       kvfree(spec);
        return 0;
 
-err_rule:
+err_rule_mcast:
+       mlx5_del_flow_rules(ndev->rx_rule_ucast);
+       ndev->rx_rule_ucast = NULL;
+err_rule_ucast:
        mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
 err_fc:
        mlx5_destroy_flow_table(ndev->rxft);
+err_ns:
+       kvfree(spec);
        return err;
 }
 
 static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
 {
-       if (!ndev->rx_rule)
+       if (!ndev->rx_rule_ucast)
                return;
 
-       mlx5_del_flow_rules(ndev->rx_rule);
+       mlx5_del_flow_rules(ndev->rx_rule_mcast);
+       ndev->rx_rule_mcast = NULL;
+       mlx5_del_flow_rules(ndev->rx_rule_ucast);
+       ndev->rx_rule_ucast = NULL;
        mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
        mlx5_destroy_flow_table(ndev->rxft);
-
-       ndev->rx_rule = NULL;
 }
 
 static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
-- 
2.25.4

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to