Uprobe programs that modify regs require different runtime assumptions
than those that do not. Mixing !kprobe_write_ctx progs with
kprobe_write_ctx progs via tail calls could break these assumptions.

To address this, reject the combination of !kprobe_write_ctx progs with
kprobe_write_ctx progs in bpf_map_owner_matches(), which prevents the
tail callee from modifying regs unexpectedly.

Also reject kprobe_write_ctx mismatches during initialization to
prevent bypassing the above restriction.

Without this check, the above restriction can be bypassed as follows.

struct {
        __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
        __uint(max_entries, 1);
        __uint(key_size, 4);
        __uint(value_size, 4);
} jmp_table SEC(".maps");

SEC("?kprobe")
int prog_a(struct pt_regs *regs)
{
        regs->ax = 0;
        bpf_tail_call_static(regs, &jmp_table, 0);
        return 0;
}

SEC("?kprobe")
int prog_b(struct pt_regs *regs)
{
        bpf_tail_call_static(regs, &jmp_table, 0);
        return 0;
}

The jmp_table is shared between prog_a and prog_b.

* Load prog_a.
  At this point, owner->kprobe_write_ctx=true.
* Load prog_b.
  At this point, prog_b passes the compatibility check.
* Add prog_a to jmp_table.
* Attach prog_b to a kernel function.

When the kernel function runs, prog_a will unexpectedly modify regs.

Fixes: 7384893d970e ("bpf: Allow uprobe program to change context registers")
Signed-off-by: Leon Hwang <[email protected]>
---
 include/linux/bpf.h |  7 ++++---
 kernel/bpf/core.c   | 30 +++++++++++++++++++++++++-----
 2 files changed, 29 insertions(+), 8 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 05b34a6355b0..dbafed52b2ba 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -285,9 +285,10 @@ struct bpf_list_node_kern {
  */
 struct bpf_map_owner {
        enum bpf_prog_type type;
-       bool jited;
-       bool xdp_has_frags;
-       bool sleepable;
+       u32 jited:1;
+       u32 xdp_has_frags:1;
+       u32 sleepable:1;
+       u32 kprobe_write_ctx:1;
        u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
        const struct btf_type *attach_func_proto;
        enum bpf_attach_type expected_attach_type;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index b24a613d99f2..121a697d4da5 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2390,6 +2390,7 @@ static void bpf_map_owner_init(struct bpf_map_owner 
*owner, const struct bpf_pro
        owner->jited = fp->jited;
        owner->xdp_has_frags = aux->xdp_has_frags;
        owner->sleepable = fp->sleepable;
+       owner->kprobe_write_ctx = aux->kprobe_write_ctx;
        owner->expected_attach_type = fp->expected_attach_type;
        owner->attach_func_proto = aux->attach_func_proto;
        for_each_cgroup_storage_type(i)
@@ -2397,8 +2398,14 @@ static void bpf_map_owner_init(struct bpf_map_owner 
*owner, const struct bpf_pro
                        aux->cgroup_storage[i]->cookie : 0;
 }
 
+enum bpf_map_owner_match_type {
+       BPF_MAP_OWNER_MATCH_FOR_INIT,
+       BPF_MAP_OWNER_MATCH_FOR_UPDATE,
+};
+
 static bool bpf_map_owner_matches(const struct bpf_map *map, const struct 
bpf_prog *fp,
-                                 enum bpf_prog_type prog_type)
+                                 enum bpf_prog_type prog_type,
+                                 enum bpf_map_owner_match_type match)
 {
        struct bpf_map_owner *owner = map->owner;
        struct bpf_prog_aux *aux = fp->aux;
@@ -2411,6 +2418,18 @@ static bool bpf_map_owner_matches(const struct bpf_map 
*map, const struct bpf_pr
            owner->sleepable != fp->sleepable)
                return false;
 
+       switch (match) {
+       case BPF_MAP_OWNER_MATCH_FOR_INIT:
+               if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
+                       return false;
+               break;
+
+       case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
+               if (!owner->kprobe_write_ctx && aux->kprobe_write_ctx)
+                       return false;
+               break;
+       }
+
        if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
            owner->expected_attach_type != fp->expected_attach_type)
                return false;
@@ -2436,7 +2455,8 @@ static bool bpf_map_owner_matches(const struct bpf_map 
*map, const struct bpf_pr
 }
 
 static bool __bpf_prog_map_compatible(struct bpf_map *map,
-                                     const struct bpf_prog *fp)
+                                     const struct bpf_prog *fp,
+                                     enum bpf_map_owner_match_type match)
 {
        enum bpf_prog_type prog_type = resolve_prog_type(fp);
        bool ret = false;
@@ -2453,7 +2473,7 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
                bpf_map_owner_init(map->owner, fp, prog_type);
                ret = true;
        } else {
-               ret = bpf_map_owner_matches(map, fp, prog_type);
+               ret = bpf_map_owner_matches(map, fp, prog_type, match);
        }
 err:
        spin_unlock(&map->owner_lock);
@@ -2470,7 +2490,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map, const 
struct bpf_prog *fp)
        if (bpf_prog_is_dev_bound(fp->aux))
                return false;
 
-       return __bpf_prog_map_compatible(map, fp);
+       return __bpf_prog_map_compatible(map, fp, 
BPF_MAP_OWNER_MATCH_FOR_UPDATE);
 }
 
 static int bpf_check_tail_call(const struct bpf_prog *fp)
@@ -2485,7 +2505,7 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
                if (!map_type_contains_progs(map))
                        continue;
 
-               if (!__bpf_prog_map_compatible(map, fp)) {
+               if (!__bpf_prog_map_compatible(map, fp, 
BPF_MAP_OWNER_MATCH_FOR_INIT)) {
                        ret = -EINVAL;
                        goto out;
                }
-- 
2.52.0


Reply via email to