Add a testcase for the ring_buffer__consume_n() API.

The test produces multiple samples in a ring buffer, using a
sys_getpid() fentry prog, and consumes them from user-space in batches,
rather than consuming all of them greedily, like ring_buffer__consume()
does.

Link: 
https://lore.kernel.org/lkml/CAEf4BzaR4zqUpDmj44KNLdpJ=tpa97grvzuzvno5nm6b7ow...@mail.gmail.com
Signed-off-by: Andrea Righi <andrea.ri...@canonical.com>
---
 tools/testing/selftests/bpf/Makefile          |  2 +-
 .../selftests/bpf/prog_tests/ringbuf.c        | 65 +++++++++++++++++++
 .../selftests/bpf/progs/test_ringbuf_n.c      | 52 +++++++++++++++
 3 files changed, 118 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/bpf/progs/test_ringbuf_n.c

diff --git a/tools/testing/selftests/bpf/Makefile 
b/tools/testing/selftests/bpf/Makefile
index edc73f8f5aef..6332277edeca 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -455,7 +455,7 @@ LINKED_SKELS := test_static_linked.skel.h 
linked_funcs.skel.h               \
 LSKELS := fentry_test.c fexit_test.c fexit_sleep.c atomics.c           \
        trace_printk.c trace_vprintk.c map_ptr_kern.c                   \
        core_kern.c core_kern_overflow.c test_ringbuf.c                 \
-       test_ringbuf_map_key.c
+       test_ringbuf_n.c test_ringbuf_map_key.c
 
 # Generate both light skeleton and libbpf skeleton for these
 LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c 
b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index 48c5695b7abf..7e085bfce9b5 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -13,6 +13,7 @@
 #include <linux/perf_event.h>
 #include <linux/ring_buffer.h>
 #include "test_ringbuf.lskel.h"
+#include "test_ringbuf_n.lskel.h"
 #include "test_ringbuf_map_key.lskel.h"
 
 #define EDONE 7777
@@ -60,6 +61,7 @@ static int process_sample(void *ctx, void *data, size_t len)
 }
 
 static struct test_ringbuf_map_key_lskel *skel_map_key;
+static struct test_ringbuf_n_lskel *skel_n;
 static struct test_ringbuf_lskel *skel;
 static struct ring_buffer *ringbuf;
 
@@ -326,6 +328,67 @@ static void ringbuf_subtest(void)
        test_ringbuf_lskel__destroy(skel);
 }
 
+/*
+ * Test ring_buffer__consume_n() by producing N_TOT_SAMPLES samples in the ring
+ * buffer, via getpid(), and consuming them in chunks of N_SAMPLES.
+ */
+#define N_TOT_SAMPLES  32
+#define N_SAMPLES      4
+
+/* Sample value to verify the callback validity */
+#define SAMPLE_VALUE   42L
+
+static int process_n_sample(void *ctx, void *data, size_t len)
+{
+       struct sample *s = data;
+
+       CHECK(s->value != SAMPLE_VALUE,
+             "sample_value", "exp %ld, got %ld\n", SAMPLE_VALUE, s->value);
+
+       return 0;
+}
+
+static void ringbuf_n_subtest(void)
+{
+       int err, i;
+
+       skel_n = test_ringbuf_n_lskel__open();
+       if (!ASSERT_OK_PTR(skel_n, "test_ringbuf_n_lskel__open"))
+               return;
+
+       skel_n->maps.ringbuf.max_entries = getpagesize();
+       skel_n->bss->pid = getpid();
+
+       err = test_ringbuf_n_lskel__load(skel_n);
+       if (!ASSERT_OK(err, "test_ringbuf_n_lskel__load"))
+               goto cleanup;
+
+       ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd,
+                                  process_n_sample, NULL, NULL);
+       if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
+               goto cleanup;
+
+       err = test_ringbuf_n_lskel__attach(skel_n);
+       if (!ASSERT_OK(err, "test_ringbuf_n_lskel__attach"))
+               goto cleanup_ringbuf;
+
+       /* Produce N_TOT_SAMPLES samples in the ring buffer by calling getpid() 
*/
+       skel->bss->value = SAMPLE_VALUE;
+       for (i = 0; i < N_TOT_SAMPLES; i++)
+               syscall(__NR_getpgid);
+
+       /* Consume all samples from the ring buffer in batches of N_SAMPLES */
+       for (i = 0; i < N_TOT_SAMPLES; i += err) {
+               err = ring_buffer__consume_n(ringbuf, N_SAMPLES);
+               ASSERT_EQ(err, N_SAMPLES, "rb_consume");
+       }
+
+cleanup_ringbuf:
+       ring_buffer__free(ringbuf);
+cleanup:
+       test_ringbuf_n_lskel__destroy(skel_n);
+}
+
 static int process_map_key_sample(void *ctx, void *data, size_t len)
 {
        struct sample *s;
@@ -384,6 +447,8 @@ void test_ringbuf(void)
 {
        if (test__start_subtest("ringbuf"))
                ringbuf_subtest();
+       if (test__start_subtest("ringbuf_n"))
+               ringbuf_n_subtest();
        if (test__start_subtest("ringbuf_map_key"))
                ringbuf_map_key_subtest();
 }
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_n.c 
b/tools/testing/selftests/bpf/progs/test_ringbuf_n.c
new file mode 100644
index 000000000000..b98b5bb20699
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_n.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Andrea Righi <andrea.ri...@canonical.com>
+
+#include <linux/bpf.h>
+#include <sched.h>
+#include <unistd.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define TASK_COMM_LEN 16
+
+struct sample {
+       int pid;
+       int seq;
+       long value;
+       char comm[16];
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+int pid = 0;
+long value = 0;
+
+/* inner state */
+long seq = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_n(void *ctx)
+{
+       int cur_pid = bpf_get_current_pid_tgid() >> 32;
+       struct sample *sample;
+
+       if (cur_pid != pid)
+               return 0;
+
+       sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
+       if (!sample)
+               return 0;
+
+       sample->pid = pid;
+       sample->seq = seq++;
+       sample->value = value;
+       bpf_get_current_comm(sample->comm, sizeof(sample->comm));
+
+       bpf_ringbuf_submit(sample, 0);
+
+       return 0;
+}
-- 
2.43.0


Reply via email to