Adding test that attaches kprobe/kretprobe and verifies the
ORC stacktrace matches expected functions.

The test is only for ORC unwinder to keep it simple.

Signed-off-by: Jiri Olsa <[email protected]>
---
 .../selftests/bpf/prog_tests/stacktrace_ips.c | 50 +++++++++++++++++++
 .../selftests/bpf/progs/stacktrace_ips.c      |  7 +++
 2 files changed, 57 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c 
b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
index c93718dafd9b..852830536109 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
@@ -137,6 +137,52 @@ static void test_stacktrace_ips_raw_tp(void)
        stacktrace_ips__destroy(skel);
 }
 
+static void test_stacktrace_ips_kprobe(bool retprobe)
+{
+       LIBBPF_OPTS(bpf_kprobe_opts, opts,
+               .retprobe = retprobe
+       );
+       LIBBPF_OPTS(bpf_test_run_opts, topts);
+       struct stacktrace_ips *skel;
+
+       skel = stacktrace_ips__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+               return;
+
+       if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+               test__skip();
+               goto cleanup;
+       }
+
+       skel->links.kprobe_test = bpf_program__attach_kprobe_opts(
+                                               skel->progs.kprobe_test,
+                                               "bpf_testmod_stacktrace_test", 
&opts);
+       if (!ASSERT_OK_PTR(skel->links.kprobe_test, 
"bpf_program__attach_kprobe_opts"))
+               goto cleanup;
+
+       trigger_module_test_read(1);
+
+       load_kallsyms();
+
+       if (retprobe) {
+               check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), 
skel->bss->stack_key, 4,
+                                    
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+                                    
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+                                    
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+                                    ksym_get_addr("bpf_testmod_test_read"));
+       } else {
+               check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), 
skel->bss->stack_key, 5,
+                                    
ksym_get_addr("bpf_testmod_stacktrace_test"),
+                                    
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+                                    
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+                                    
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+                                    ksym_get_addr("bpf_testmod_test_read"));
+       }
+
+cleanup:
+       stacktrace_ips__destroy(skel);
+}
+
 static void __test_stacktrace_ips(void)
 {
        if (test__start_subtest("kprobe_multi"))
@@ -145,6 +191,10 @@ static void __test_stacktrace_ips(void)
                test_stacktrace_ips_kprobe_multi(true);
        if (test__start_subtest("raw_tp"))
                test_stacktrace_ips_raw_tp();
+       if (test__start_subtest("kprobe"))
+               test_stacktrace_ips_kprobe(false);
+       if (test__start_subtest("kretprobe"))
+               test_stacktrace_ips_kprobe(true);
 }
 #else
 static void __test_stacktrace_ips(void)
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_ips.c 
b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
index a96c8150d7f5..cae077a4061b 100644
--- a/tools/testing/selftests/bpf/progs/stacktrace_ips.c
+++ b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
@@ -31,6 +31,13 @@ int unused(void)
 
 __u32 stack_key;
 
+SEC("kprobe")
+int kprobe_test(struct pt_regs *ctx)
+{
+       stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+       return 0;
+}
+
 SEC("kprobe.multi")
 int kprobe_multi_test(struct pt_regs *ctx)
 {
-- 
2.52.0


Reply via email to