BPF programs can use the devmap with a bpf_redirect_map() helper
routine to forward packets to netdevice in map.

Signed-off-by: John Fastabend <john.fastab...@gmail.com>
Acked-by: Daniel Borkmann <dan...@iogearbox.net>
---
 include/linux/bpf.h      |    3 +++
 include/uapi/linux/bpf.h |    8 ++++++-
 kernel/bpf/devmap.c      |   12 ++++++++++
 kernel/bpf/verifier.c    |    4 +++
 net/core/filter.c        |   54 ++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 80 insertions(+), 1 deletion(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5175729..8c2f3e1 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -374,4 +374,7 @@ static inline void __bpf_prog_uncharge(struct user_struct 
*user, u32 pages)
 void bpf_user_rnd_init_once(void);
 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
+/* Map specifics */
+struct net_device  *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
+
 #endif /* _LINUX_BPF_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0a48060..b95f46d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -346,6 +346,11 @@ enum bpf_attach_type {
  *     @flags: bit 0 - if set, redirect to ingress instead of egress
  *             other bits - reserved
  *     Return: TC_ACT_REDIRECT
+ * int bpf_redirect_map(key, map, flags)
+ *     redirect to endpoint in map
+ *     @key: index in map to lookup
+ *     @map: fd of map to do lookup in
+ *     @flags: --
  *
  * u32 bpf_get_route_realm(skb)
  *     retrieve a dst's tclassid
@@ -569,7 +574,8 @@ enum bpf_attach_type {
        FN(probe_read_str),             \
        FN(get_socket_cookie),          \
        FN(get_socket_uid),             \
-       FN(set_hash),
+       FN(set_hash),                   \
+       FN(redirect_map),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 1a87835..36dc13de 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -159,6 +159,18 @@ static int dev_map_get_next_key(struct bpf_map *map, void 
*key, void *next_key)
        return 0;
 }
 
+struct net_device  *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
+{
+       struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+       struct bpf_dtab_netdev *dev;
+
+       if (key >= map->max_entries)
+               return NULL;
+
+       dev = READ_ONCE(dtab->netdev_map[key]);
+       return dev ? dev->dev : NULL;
+}
+
 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete 
and/or
  * update happens in parallel here a dev_put wont happen until after reading 
the
  * ifindex.
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 06073ba..1d03956 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1330,6 +1330,10 @@ static int check_map_func_compatibility(struct bpf_map 
*map, int func_id)
                if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
                        goto error;
                break;
+       case BPF_FUNC_redirect_map:
+               if (map->map_type != BPF_MAP_TYPE_DEVMAP)
+                       goto error;
+               break;
        default:
                break;
        }
diff --git a/net/core/filter.c b/net/core/filter.c
index 441abbb..482bda8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1778,6 +1778,7 @@ static int __bpf_redirect(struct sk_buff *skb, struct 
net_device *dev,
 struct redirect_info {
        u32 ifindex;
        u32 flags;
+       struct bpf_map *map;
 };
 
 static DEFINE_PER_CPU(struct redirect_info, redirect_info);
@@ -1791,6 +1792,7 @@ struct redirect_info {
 
        ri->ifindex = ifindex;
        ri->flags = flags;
+       ri->map = NULL;
 
        return TC_ACT_REDIRECT;
 }
@@ -1818,6 +1820,29 @@ int skb_do_redirect(struct sk_buff *skb)
        .arg2_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_3(bpf_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags)
+{
+       struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+
+       if (unlikely(flags))
+               return XDP_ABORTED;
+
+       ri->ifindex = ifindex;
+       ri->flags = flags;
+       ri->map = map;
+
+       return XDP_REDIRECT;
+}
+
+static const struct bpf_func_proto bpf_redirect_map_proto = {
+       .func           = bpf_redirect_map,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
+
 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
 {
        return task_get_classid(skb);
@@ -2309,14 +2334,41 @@ static int __bpf_tx_xdp(struct net_device *dev, struct 
xdp_buff *xdp)
        return -EOPNOTSUPP;
 }
 
+int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
+                       struct bpf_prog *xdp_prog)
+{
+       struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+       struct bpf_map *map = ri->map;
+       struct net_device *fwd;
+
+       fwd = __dev_map_lookup_elem(map, ri->ifindex);
+       if (!fwd)
+               goto out;
+
+       ri->ifindex = 0;
+       ri->map = NULL;
+
+       trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT);
+
+       return __bpf_tx_xdp(fwd, xdp);
+out:
+       ri->ifindex = 0;
+       ri->map = NULL;
+       return -EINVAL;
+}
+
 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
                    struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
        struct net_device *fwd;
 
+       if (ri->map)
+               return xdp_do_redirect_map(dev, xdp, xdp_prog);
+
        fwd = dev_get_by_index_rcu(dev_net(dev), ri->ifindex);
        ri->ifindex = 0;
+       ri->map = NULL;
        if (unlikely(!fwd)) {
                bpf_warn_invalid_xdp_redirect(ri->ifindex);
                return -EINVAL;
@@ -2868,6 +2920,8 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const 
void *src_buff,
                return &bpf_xdp_adjust_head_proto;
        case BPF_FUNC_redirect:
                return &bpf_xdp_redirect_proto;
+       case BPF_FUNC_redirect_map:
+               return &bpf_redirect_map_proto;
        default:
                return bpf_base_func_proto(func_id);
        }

Reply via email to