Adding test that makes sure parallel execution of the uprobe and
attach/detach of optimized uprobe on it works properly.

Signed-off-by: Jiri Olsa <jo...@kernel.org>
---
 .../selftests/bpf/prog_tests/uprobe_syscall.c | 74 +++++++++++++++++++
 1 file changed, 74 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c 
b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
index d648bf8eca64..5c10cf173e6d 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
@@ -600,6 +600,78 @@ static void test_uprobe_usdt(void)
        uprobe_syscall_executed__destroy(skel);
 }
 
+static volatile bool race_stop;
+
+static void *worker_trigger(void *arg)
+{
+       unsigned long rounds = 0;
+
+       while (!race_stop) {
+               uprobe_test();
+               rounds++;
+       }
+
+       printf("tid %d trigger rounds: %lu\n", gettid(), rounds);
+       return NULL;
+}
+
+static void *worker_attach(void *arg)
+{
+       struct uprobe_syscall_executed *skel;
+       unsigned long rounds = 0, offset;
+
+       offset = get_uprobe_offset(&uprobe_test);
+       if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+               return NULL;
+
+       skel = uprobe_syscall_executed__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+               return NULL;
+
+       while (!race_stop) {
+               skel->links.test_uprobe = 
bpf_program__attach_uprobe_opts(skel->progs.test_uprobe,
+                                       0, "/proc/self/exe", offset, NULL);
+               if (!ASSERT_OK_PTR(skel->links.test_uprobe, 
"bpf_program__attach_uprobe_opts"))
+                       break;
+
+               bpf_link__destroy(skel->links.test_uprobe);
+               skel->links.test_uprobe = NULL;
+               rounds++;
+       }
+
+       printf("tid %d attach rounds: %lu hits: %d\n", gettid(), rounds, 
skel->bss->executed);
+       uprobe_syscall_executed__destroy(skel);
+       return NULL;
+}
+
+static void test_uprobe_race(void)
+{
+       int err, i, nr_threads;
+       pthread_t *threads;
+
+       nr_threads = libbpf_num_possible_cpus();
+       if (!ASSERT_GE(nr_threads, 0, "libbpf_num_possible_cpus"))
+               return;
+
+       threads = malloc(sizeof(*threads) * nr_threads);
+       if (!ASSERT_OK_PTR(threads, "malloc"))
+               return;
+
+       for (i = 0; i < nr_threads; i++) {
+               err = pthread_create(&threads[i], NULL, i % 2 ? worker_trigger 
: worker_attach,
+                                    NULL);
+               if (!ASSERT_OK(err, "pthread_create"))
+                       goto cleanup;
+       }
+
+       sleep(4);
+
+cleanup:
+       race_stop = true;
+       for (nr_threads = i, i = 0; i < nr_threads; i++)
+               pthread_join(threads[i], NULL);
+}
+
 static void __test_uprobe_syscall(void)
 {
        if (test__start_subtest("uretprobe_regs_equal"))
@@ -618,6 +690,8 @@ static void __test_uprobe_syscall(void)
                test_uprobe_session();
        if (test__start_subtest("uprobe_usdt"))
                test_uprobe_usdt();
+       if (test__start_subtest("uprobe_race"))
+               test_uprobe_race();
 }
 #else
 static void __test_uprobe_syscall(void)
-- 
2.49.0


Reply via email to