attach_probe already verifies manual single-kprobe attaches with
func_name and optional offset.

This commit adds address-based single-kprobe attach subtests for the
two underlying attach paths, legacy tracefs/debugfs and PMU-based
non-legacy. The new subtests resolve SYS_NANOSLEEP_KPROBE_NAME through
kallsyms, pass the result through bpf_kprobe_opts.addr, and verify that
kprobe and kretprobe are still triggered.

Signed-off-by: Hoyeon Lee <[email protected]>
---
 .../selftests/bpf/prog_tests/attach_probe.c   | 49 +++++++++++++++++++
 1 file changed, 49 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c 
b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 9e77e5da7097..64f2ed75779d 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -123,6 +123,51 @@ static void test_attach_probe_manual(enum 
probe_attach_mode attach_mode)
        test_attach_probe_manual__destroy(skel);
 }
 
+/* manual attach address-based kprobe/kretprobe testings */
+static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
+{
+       DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+       struct bpf_link *kprobe_link, *kretprobe_link;
+       struct test_attach_probe_manual *skel;
+       unsigned long func_addr;
+
+       if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+               return;
+
+       func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
+       if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
+               return;
+
+       skel = test_attach_probe_manual__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
+               return;
+
+       kprobe_opts.attach_mode = attach_mode;
+       kprobe_opts.retprobe = false;
+       kprobe_opts.addr = func_addr;
+       kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+                                                     NULL, &kprobe_opts);
+       if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_by_addr"))
+               goto cleanup;
+       skel->links.handle_kprobe = kprobe_link;
+
+       kprobe_opts.retprobe = true;
+       kretprobe_link = 
bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+                                                        NULL, &kprobe_opts);
+       if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_by_addr"))
+               goto cleanup;
+       skel->links.handle_kretprobe = kretprobe_link;
+
+       /* trigger & validate kprobe && kretprobe */
+       usleep(1);
+
+       ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
+       ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
+
+cleanup:
+       test_attach_probe_manual__destroy(skel);
+}
+
 /* attach uprobe/uretprobe long event name testings */
 static void test_attach_uprobe_long_event_name(void)
 {
@@ -416,6 +461,10 @@ void test_attach_probe(void)
                test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
        if (test__start_subtest("manual-link"))
                test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
+       if (test__start_subtest("kprobe-legacy-by-addr"))
+               test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LEGACY);
+       if (test__start_subtest("kprobe-perf-by-addr"))
+               test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
 
        if (test__start_subtest("auto"))
                test_attach_probe_auto(skel);
-- 
2.52.0


Reply via email to