The test_stacktrace_map is enhanced to call bpf_get_stack
in the helper to get the stack trace as well.
The stack traces from bpf_get_stack and bpf_get_stackid
are compared to ensure that for the same stack as
represented as the same hash, their ip addresses
must be the same.

Signed-off-by: Yonghong Song <y...@fb.com>
---
 tools/testing/selftests/bpf/test_progs.c          | 41 ++++++++++++++++++++++-
 tools/testing/selftests/bpf/test_stacktrace_map.c | 20 +++++++++--
 2 files changed, 57 insertions(+), 4 deletions(-)

diff --git a/tools/testing/selftests/bpf/test_progs.c 
b/tools/testing/selftests/bpf/test_progs.c
index faadbe2..8aa2844 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -865,9 +865,39 @@ static int compare_map_keys(int map1_fd, int map2_fd)
        return 0;
 }
 
+static int compare_stack_ips(int smap_fd, int amap_fd)
+{
+       int max_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
+       __u32 key, next_key, *cur_key_p, *next_key_p;
+       char val_buf1[max_len], val_buf2[max_len];
+       int i, err;
+
+       cur_key_p = NULL;
+       next_key_p = &key;
+       while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
+               err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
+               if (err)
+                       return err;
+               err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
+               if (err)
+                       return err;
+               for (i = 0; i < max_len; i++) {
+                       if (val_buf1[i] != val_buf2[i])
+                               return -1;
+               }
+               key = *next_key_p;
+               cur_key_p = &key;
+               next_key_p = &next_key;
+       }
+       if (errno != ENOENT)
+               return -1;
+
+       return 0;
+}
+
 static void test_stacktrace_map()
 {
-       int control_map_fd, stackid_hmap_fd, stackmap_fd;
+       int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
        const char *file = "./test_stacktrace_map.o";
        int bytes, efd, err, pmu_fd, prog_fd;
        struct perf_event_attr attr = {};
@@ -925,6 +955,10 @@ static void test_stacktrace_map()
        if (stackmap_fd < 0)
                goto disable_pmu;
 
+       stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+       if (stack_amap_fd < 0)
+               goto disable_pmu;
+
        /* give some time for bpf program run */
        sleep(1);
 
@@ -946,6 +980,11 @@ static void test_stacktrace_map()
                  "err %d errno %d\n", err, errno))
                goto disable_pmu_noerr;
 
+       err = compare_stack_ips(stackmap_fd, stack_amap_fd);
+       if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu_noerr;
+
        goto disable_pmu_noerr;
 disable_pmu:
        error_cnt++;
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c 
b/tools/testing/selftests/bpf/test_stacktrace_map.c
index 76d85c5d..f83c7b6 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/test_stacktrace_map.c
@@ -19,14 +19,21 @@ struct bpf_map_def SEC("maps") stackid_hmap = {
        .type = BPF_MAP_TYPE_HASH,
        .key_size = sizeof(__u32),
        .value_size = sizeof(__u32),
-       .max_entries = 10000,
+       .max_entries = 16384,
 };
 
 struct bpf_map_def SEC("maps") stackmap = {
        .type = BPF_MAP_TYPE_STACK_TRACE,
        .key_size = sizeof(__u32),
        .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
-       .max_entries = 10000,
+       .max_entries = 16384,
+};
+
+struct bpf_map_def SEC("maps") stack_amap = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
+       .max_entries = 16384,
 };
 
 /* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
@@ -44,7 +51,10 @@ struct sched_switch_args {
 SEC("tracepoint/sched/sched_switch")
 int oncpu(struct sched_switch_args *ctx)
 {
+       __u32 max_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
        __u32 key = 0, val = 0, *value_p;
+       void *stack_p;
+
 
        value_p = bpf_map_lookup_elem(&control_map, &key);
        if (value_p && *value_p)
@@ -52,8 +62,12 @@ int oncpu(struct sched_switch_args *ctx)
 
        /* The size of stackmap and stackid_hmap should be the same */
        key = bpf_get_stackid(ctx, &stackmap, 0);
-       if ((int)key >= 0)
+       if ((int)key >= 0) {
                bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+               stack_p = bpf_map_lookup_elem(&stack_amap, &key);
+               if (stack_p)
+                       bpf_get_stack(ctx, stack_p, max_len, 0);
+       }
 
        return 0;
 }
-- 
2.9.5

Reply via email to