On Mon, Apr 13, 2026 at 8:29 PM Alexis Lothoré (eBPF Foundation)
<[email protected]> wrote:
>
> Add a basic KASAN test runner that loads and test-run programs that can
> trigger memory management bugs. The test captures kernel logs and ensure
> that the expected KASAN splat is emitted by searching for the
> corresponding first lines in the report.
>
> This version implements two faulty programs triggering either a
> user-after-free, or an out-of-bounds memory usage. The bugs are
> triggered thanks to some dedicated kfuncs in bpf_testmod.c, but two
> different techniques are used, as some cases can be quite hard to
> trigger in a pure "black box" approach:
> - for reads, we can make the used kfuncs return some faulty pointers
> that ebpf programs will manipulate, they will generate legitimate
> kasan reports as a consequence
> - applying the same trick for faulty writes is harder, as ebpf programs
> can't write kernel data freely. So ebpf programs can call another
> specific testing kfunc that will alter the shadow memory matching the
> passed memory (eg: a map). When the program will try to write to the
> corresponding memory, it will trigger a report as well.
>
> Signed-off-by: Alexis Lothoré (eBPF Foundation) <[email protected]>
> ---
> The way of bringing kasan_poison into bpf_testmod is definitely not
> ideal. But I would like to validate the testing approach (triggering
> real faulty accesses, which is hard on some cases, VS manually poisoning
> BPF-manipulated memory) before eventually making clean bridges between
> KASAN APIs and bpf_testmod.c, if the latter approach is the valid one.
Would it make sense to put these tests into KASAN KUnit tests in
mm/kasan/kasan_test_c.c? I assume there is a kernel API to JIT BPF
programs from the kernel itself?
There, you can just call kasan_poison(), some tests already do this.
And you can also extend the KASAN KUnit test framework to find out
whether the bad access is a read or write, if you want to check this.
> ---
> tools/testing/selftests/bpf/prog_tests/kasan.c | 165
> +++++++++++++++++++++
> tools/testing/selftests/bpf/progs/kasan.c | 146 ++++++++++++++++++
> .../testing/selftests/bpf/test_kmods/bpf_testmod.c | 79 ++++++++++
> 3 files changed, 390 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/kasan.c
> b/tools/testing/selftests/bpf/prog_tests/kasan.c
> new file mode 100644
> index 000000000000..fd628aaa8005
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/kasan.c
> @@ -0,0 +1,165 @@
> +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
> +#include <bpf/bpf.h>
> +#include <fcntl.h>
> +#include <linux/if_ether.h>
> +#include <sys/klog.h>
> +#include <test_progs.h>
> +#include <unpriv_helpers.h>
> +#include "kasan.skel.h"
> +
> +#define SUBTEST_NAME_MAX_LEN 64
> +#define SYSLOG_ACTION_READ_ALL 3
> +#define SYSLOG_ACTION_CLEAR 5
> +
> +#define MAX_LOG_SIZE (8*1024)
> +#define READ_CHUNK_SIZE 128
> +
> +#define KASAN_PATTERN_SLAB_UAF "BUG: KASAN: slab-use-after-free in bpf_prog_"
> +#define KASAN_PATTERN_GLOBAL_OOB "BUG: KASAN: global-out-of-bounds in
> bpf_prog_"
> +
> +static char klog_buffer[MAX_LOG_SIZE];
> +
> +static int read_kernel_logs(char *buf, size_t max_len)
> +{
> + return klogctl(SYSLOG_ACTION_READ_ALL, buf, max_len);
> +}
> +
> +static int clear_kernel_logs(void)
> +{
> + return klogctl(SYSLOG_ACTION_CLEAR, NULL, 0);
> +}
> +
> +static int kernel_logs_have_matching_kasan_report(char *buf, char *pattern,
> + bool is_write, int size)
> +{
> + char *access_desc_start, *access_desc_end, *tmp;
> + char access_log[READ_CHUNK_SIZE];
> + char *kasan_report_start;
> + int hsize, nsize;
> + /* Searched kasan report is valid if
> + * - it contains the expected kasan pattern
> + * - the next line is the description of the faulty access
> + * - faulty access properties match the tested type and size
> + */
> + kasan_report_start = strstr(buf, pattern);
> +
> + if (!kasan_report_start)
> + return 1;
> +
> + /* Find next line */
> + access_desc_start = strchr(kasan_report_start, '\n');
> + if (!access_desc_start)
> + return 1;
> + access_desc_start++;
> +
> + access_desc_end = strchr(access_desc_start, '\n');
> + if (!access_desc_end)
> + return 1;
> +
> + nsize = snprintf(access_log, READ_CHUNK_SIZE, "%s of size %d at addr",
> + is_write ? "Write" : "Read", size);
> +
> + hsize = access_desc_end - access_desc_start;
> + tmp = memmem(access_desc_start, hsize, access_log, nsize);
> +
> + if (!tmp)
> + return 1;
> +
> + return 0;
> +}
> +
> +struct test_spec {
> + char *prog_name;
> + char *expected_report_pattern;
> +};
> +
> +static struct test_spec tests[] = {
> + {
> + .prog_name = "bpf_kasan_uaf",
> + .expected_report_pattern = KASAN_PATTERN_SLAB_UAF
> + },
> + {
> + .prog_name = "bpf_kasan_oob",
> + .expected_report_pattern = KASAN_PATTERN_GLOBAL_OOB
> + }
> +};
> +
> +static void run_test_with_type_and_size(struct kasan *skel,
> + struct test_spec *test, bool is_write,
> + int access_size)
> +{
> + char subtest_name[SUBTEST_NAME_MAX_LEN];
> + struct bpf_program *prog;
> + uint8_t buf[ETH_HLEN];
> + int ret;
> +
> + prog = bpf_object__find_program_by_name(skel->obj, test->prog_name);
> + if (!ASSERT_OK_PTR(prog, "find test prog"))
> + return;
> +
> + snprintf(subtest_name, SUBTEST_NAME_MAX_LEN, "%s_%s_%d",
> + test->prog_name, is_write ? "write" : "read", access_size);
> +
> + if (!test__start_subtest(subtest_name))
> + return;
> +
> + ret = clear_kernel_logs();
> + if (!ASSERT_OK(ret, "reset log buffer"))
> + return;
> +
> + LIBBPF_OPTS(bpf_test_run_opts, topts);
> + topts.sz = sizeof(struct bpf_test_run_opts);
> + topts.data_size_in = ETH_HLEN;
> + topts.data_in = buf;
> + skel->bss->is_write = is_write;
> + skel->bss->access_size = access_size;
> + ret = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
> + if (!ASSERT_OK(ret, "run prog"))
> + return;
> +
> + ret = read_kernel_logs(klog_buffer, MAX_LOG_SIZE);
> + if (ASSERT_GE(ret, 0, "read kernel logs"))
> + ASSERT_OK(kernel_logs_have_matching_kasan_report(
> + klog_buffer, test->expected_report_pattern,
> + is_write, access_size),
> + test->prog_name);
> +}
> +
> +static void run_test_with_type(struct kasan *skel, struct test_spec *test,
> + bool is_write)
> +{
> + run_test_with_type_and_size(skel, test, is_write, 1);
> + run_test_with_type_and_size(skel, test, is_write, 2);
> + run_test_with_type_and_size(skel, test, is_write, 4);
> + run_test_with_type_and_size(skel, test, is_write, 8);
> +}
> +
> +static void run_test(struct kasan *skel, struct test_spec *test)
> +{
> + run_test_with_type(skel, test, false);
> + run_test_with_type(skel, test, true);
> +}
> +
> +void test_kasan(void)
> +{
> + struct test_spec *test;
> + struct kasan *skel;
> + int i;
> +
> + if (!is_jit_enabled() || !get_kasan_jit_enabled()) {
> + test__skip();
> + return;
> + }
> +
> + skel = kasan__open_and_load();
> + if (!ASSERT_OK_PTR(skel, "open and load prog"))
> + return;
> +
> + for (i = 0; i < ARRAY_SIZE(tests); i++) {
> + test = &tests[i];
> +
> + run_test(skel, test);
> + }
> +
> + kasan__destroy(skel);
> +}
> diff --git a/tools/testing/selftests/bpf/progs/kasan.c
> b/tools/testing/selftests/bpf/progs/kasan.c
> new file mode 100644
> index 000000000000..f713c9b7c9ce
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/kasan.c
> @@ -0,0 +1,146 @@
> +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
> +
> +#include <linux/bpf.h>
> +#include <bpf/bpf_helpers.h>
> +#include <bpf/bpf_tracing.h>
> +
> +#define KASAN_SLAB_FREE 0xFB
> +#define KASAN_GLOBAL_REDZONE 0xF9
> +
> +extern __u8 *bpf_kfunc_kasan_uaf_1(void) __ksym;
> +extern __u16 *bpf_kfunc_kasan_uaf_2(void) __ksym;
> +extern __u32 *bpf_kfunc_kasan_uaf_4(void) __ksym;
> +extern __u64 *bpf_kfunc_kasan_uaf_8(void) __ksym;
> +extern __u8 *bpf_kfunc_kasan_oob_1(void) __ksym;
> +extern __u16 *bpf_kfunc_kasan_oob_2(void) __ksym;
> +extern __u32 *bpf_kfunc_kasan_oob_4(void) __ksym;
> +extern __u64 *bpf_kfunc_kasan_oob_8(void) __ksym;
> +extern void bpf_kfunc_kasan_poison(void *mem, __u32 mem__sz, __u8 byte)
> __ksym;
> +
> +int access_size;
> +int is_write;
> +
> +struct kasan_write_val {
> + __u8 data_1;
> + __u16 data_2;
> + __u32 data_4;
> + __u64 data_8;
> +};
> +
> +struct {
> + __uint(type, BPF_MAP_TYPE_ARRAY);
> + __uint(max_entries, 1);
> + __type(key, __u32);
> + __type(value, struct kasan_write_val);
> +} test_map SEC(".maps");
> +
> +static void bpf_kasan_faulty_write(int size, __u8 poison_byte)
> +{
> + struct kasan_write_val *val;
> + __u32 key = 0;
> +
> + val = bpf_map_lookup_elem(&test_map, &key);
> + if (!val)
> + return;
> +
> + bpf_kfunc_kasan_poison(val, sizeof(struct kasan_write_val),
> + poison_byte);
> + switch (size) {
> + case 1:
> + val->data_1 = 0xAA;
> + break;
> + case 2:
> + val->data_2 = 0xAA;
> + break;
> + case 4:
> + val->data_4 = 0xAA;
> + break;
> + case 8:
> + val->data_8 = 0xAA;
> + break;
> + }
> + bpf_kfunc_kasan_poison(val, sizeof(struct kasan_write_val), 0x00);
> +}
> +
> +
> +static int bpf_kasan_uaf_read(int size)
> +{
> + __u8 *result_1;
> + __u16 *result_2;
> + __u32 *result_4;
> + __u64 *result_8;
> + int ret = 0;
> +
> + switch (size) {
> + case 1:
> + result_1 = bpf_kfunc_kasan_uaf_1();
> + ret = result_1[0] ? 1 : 0;
> + break;
> + case 2:
> + result_2 = bpf_kfunc_kasan_uaf_2();
> + ret = result_2[0] ? 1 : 0;
> + break;
> + case 4:
> + result_4 = bpf_kfunc_kasan_uaf_4();
> + ret = result_4[0] ? 1 : 0;
> + break;
> + case 8:
> + result_8 = bpf_kfunc_kasan_uaf_8();
> + ret = result_8[0] ? 1 : 0;
> + break;
> + }
> + return ret;
> +}
> +
> +SEC("tcx/ingress")
> +int bpf_kasan_uaf(struct __sk_buff *skb)
> +{
> + if (is_write) {
> + bpf_kasan_faulty_write(access_size, KASAN_SLAB_FREE);
> + return 0;
> + }
> +
> + return bpf_kasan_uaf_read(access_size);
> +}
> +
> +static int bpf_kasan_oob_read(int size)
> +{
> + __u8 *result_1;
> + __u16 *result_2;
> + __u32 *result_4;
> + __u64 *result_8;
> + int ret = 0;
> +
> + switch (size) {
> + case 1:
> + result_1 = bpf_kfunc_kasan_oob_1();
> + ret = result_1[0] ? 1 : 0;
> + break;
> + case 2:
> + result_2 = bpf_kfunc_kasan_oob_2();
> + ret = result_2[0] ? 1 : 0;
> + break;
> + case 4:
> + result_4 = bpf_kfunc_kasan_oob_4();
> + ret = result_4[0] ? 1 : 0;
> + break;
> + case 8:
> + result_8 = bpf_kfunc_kasan_oob_8();
> + ret = result_8[0] ? 1 : 0;
> + break;
> + }
> + return ret;
> +}
> +
> +SEC("tcx/ingress")
> +int bpf_kasan_oob(struct __sk_buff *skb)
> +{
> + if (is_write) {
> + bpf_kasan_faulty_write(access_size, KASAN_GLOBAL_REDZONE);
> + return 0;
> + }
> +
> + return bpf_kasan_oob_read(access_size);
> +}
> +
> +char LICENSE[] SEC("license") = "GPL";
> diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
> b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
> index d876314a4d67..01554bcbbbb0 100644
> --- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
> +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
> @@ -271,6 +271,76 @@ __bpf_kfunc void
> bpf_kfunc_put_default_trusted_ptr_test(struct prog_test_member
> */
> }
>
> +static void *kasan_uaf(void)
> +{
> + void *p = kmalloc(64, GFP_ATOMIC);
> +
> + if (!p)
> + return NULL;
> + memset(p, 0xAA, 64);
> + kfree(p);
> +
> + return p;
> +}
> +
> +#ifdef CONFIG_KASAN_GENERIC
> +extern void kasan_poison(const void *addr, size_t size, u8 value, bool init);
> +
> +__bpf_kfunc void bpf_kfunc_kasan_poison(void *mem, u32 mem__sz, u8 byte)
> +{
> + kasan_poison(mem, mem__sz, byte, false);
> +}
> +#else
> +__bpf_kfunc void bpf_kfunc_kasan_poison(void *mem, u32 mem__sz, u8 byte) { }
> +#endif
> +
> +__bpf_kfunc u8 *bpf_kfunc_kasan_uaf_1(void)
> +{
> + return kasan_uaf();
> +}
> +
> +__bpf_kfunc u16 *bpf_kfunc_kasan_uaf_2(void)
> +{
> + return kasan_uaf();
> +}
> +
> +__bpf_kfunc u32 *bpf_kfunc_kasan_uaf_4(void)
> +{
> + return kasan_uaf();
> +}
> +
> +__bpf_kfunc u64 *bpf_kfunc_kasan_uaf_8(void)
> +{
> + return kasan_uaf();
> +}
> +
> +static u8 test_oob_buffer[64];
> +
> +static void *bpf_kfunc_kasan_oob(void)
> +{
> + return test_oob_buffer+64;
> +}
> +
> +__bpf_kfunc u8 *bpf_kfunc_kasan_oob_1(void)
> +{
> + return bpf_kfunc_kasan_oob();
> +}
> +
> +__bpf_kfunc u16 *bpf_kfunc_kasan_oob_2(void)
> +{
> + return bpf_kfunc_kasan_oob();
> +}
> +
> +__bpf_kfunc u32 *bpf_kfunc_kasan_oob_4(void)
> +{
> + return bpf_kfunc_kasan_oob();
> +}
> +
> +__bpf_kfunc u64 *bpf_kfunc_kasan_oob_8(void)
> +{
> + return bpf_kfunc_kasan_oob();
> +}
> +
> __bpf_kfunc struct bpf_testmod_ctx *
> bpf_testmod_ctx_create(int *err)
> {
> @@ -740,6 +810,15 @@ BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
> BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2)
> BTF_ID_FLAGS(func, bpf_kfunc_get_default_trusted_ptr_test);
> BTF_ID_FLAGS(func, bpf_kfunc_put_default_trusted_ptr_test);
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_poison)
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_uaf_1)
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_uaf_2)
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_uaf_4)
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_uaf_8)
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_oob_1)
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_oob_2)
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_oob_4)
> +BTF_ID_FLAGS(func, bpf_kfunc_kasan_oob_8)
> BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
>
> BTF_ID_LIST(bpf_testmod_dtor_ids)
>
> --
> 2.53.0
>