Re: [PATCH bpf-next v2 7/9] samples/bpf: add a test for bpf_get_stack helper

2018-04-19 Thread Yonghong Song



On 4/18/18 9:37 PM, Alexei Starovoitov wrote:

On Wed, Apr 18, 2018 at 09:54:42AM -0700, Yonghong Song wrote:

The test attached a kprobe program to kernel function sys_write.
It tested to get stack for user space, kernel space and user
space with build_id request. It also tested to get user
and kernel stack into the same buffer with back-to-back
bpf_get_stack helper calls.

Whenever the kernel stack is available, the user space
application will check to ensure that sys_write/SyS_write
is part of the stack.

Signed-off-by: Yonghong Song 
---
  samples/bpf/Makefile   |   4 +
  samples/bpf/trace_get_stack_kern.c |  86 +
  samples/bpf/trace_get_stack_user.c | 150 +
  3 files changed, 240 insertions(+)


since perf_read is being refactored out of trace_output_user.c in the previous 
patch
please move it to selftests (instead of bpf_load.c) and move
this whole test to selftests as well.


I put it here since I am attaching to a kprobe so that I can compare 
address. I guess I can still do it by attaching to a kernel tracepoint.

Will move the tests to selftests as suggested.


Re: [PATCH bpf-next v2 7/9] samples/bpf: add a test for bpf_get_stack helper

2018-04-18 Thread Alexei Starovoitov
On Wed, Apr 18, 2018 at 09:54:42AM -0700, Yonghong Song wrote:
> The test attached a kprobe program to kernel function sys_write.
> It tested to get stack for user space, kernel space and user
> space with build_id request. It also tested to get user
> and kernel stack into the same buffer with back-to-back
> bpf_get_stack helper calls.
> 
> Whenever the kernel stack is available, the user space
> application will check to ensure that sys_write/SyS_write
> is part of the stack.
> 
> Signed-off-by: Yonghong Song 
> ---
>  samples/bpf/Makefile   |   4 +
>  samples/bpf/trace_get_stack_kern.c |  86 +
>  samples/bpf/trace_get_stack_user.c | 150 
> +
>  3 files changed, 240 insertions(+)

since perf_read is being refactored out of trace_output_user.c in the previous 
patch
please move it to selftests (instead of bpf_load.c) and move
this whole test to selftests as well.

>  create mode 100644 samples/bpf/trace_get_stack_kern.c
>  create mode 100644 samples/bpf/trace_get_stack_user.c
> 
> diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> index 4d6a6ed..94e7b10 100644
> --- a/samples/bpf/Makefile
> +++ b/samples/bpf/Makefile
> @@ -44,6 +44,7 @@ hostprogs-y += xdp_monitor
>  hostprogs-y += xdp_rxq_info
>  hostprogs-y += syscall_tp
>  hostprogs-y += cpustat
> +hostprogs-y += trace_get_stack
>  
>  # Libbpf dependencies
>  LIBBPF := ../../tools/lib/bpf/bpf.o ../../tools/lib/bpf/nlattr.o
> @@ -95,6 +96,7 @@ xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o
>  xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o
>  syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
>  cpustat-objs := bpf_load.o $(LIBBPF) cpustat_user.o
> +trace_get_stack-objs := bpf_load.o $(LIBBPF) trace_get_stack_user.o
>  
>  # Tell kbuild to always build the programs
>  always := $(hostprogs-y)
> @@ -148,6 +150,7 @@ always += xdp_rxq_info_kern.o
>  always += xdp2skb_meta_kern.o
>  always += syscall_tp_kern.o
>  always += cpustat_kern.o
> +always += trace_get_stack_kern.o
>  
>  HOSTCFLAGS += -I$(objtree)/usr/include
>  HOSTCFLAGS += -I$(srctree)/tools/lib/
> @@ -193,6 +196,7 @@ HOSTLOADLIBES_xdp_monitor += -lelf
>  HOSTLOADLIBES_xdp_rxq_info += -lelf
>  HOSTLOADLIBES_syscall_tp += -lelf
>  HOSTLOADLIBES_cpustat += -lelf
> +HOSTLOADLIBES_trace_get_stack += -lelf
>  
>  # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on 
> cmdline:
>  #  make samples/bpf/ LLC=~/git/llvm/build/bin/llc 
> CLANG=~/git/llvm/build/bin/clang
> diff --git a/samples/bpf/trace_get_stack_kern.c 
> b/samples/bpf/trace_get_stack_kern.c
> new file mode 100644
> index 000..665e4ad
> --- /dev/null
> +++ b/samples/bpf/trace_get_stack_kern.c
> @@ -0,0 +1,86 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include 
> +#include 
> +#include 
> +#include "bpf_helpers.h"
> +
> +/* Permit pretty deep stack traces */
> +#define MAX_STACK 100
> +struct stack_trace_t {
> + int pid;
> + int kern_stack_size;
> + int user_stack_size;
> + int user_stack_buildid_size;
> + u64 kern_stack[MAX_STACK];
> + u64 user_stack[MAX_STACK];
> + struct bpf_stack_build_id user_stack_buildid[MAX_STACK];
> +};
> +
> +struct bpf_map_def SEC("maps") perfmap = {
> + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
> + .key_size = sizeof(int),
> + .value_size = sizeof(u32),
> + .max_entries = 2,
> +};
> +
> +struct bpf_map_def SEC("maps") stackdata_map = {
> + .type = BPF_MAP_TYPE_PERCPU_ARRAY,
> + .key_size = sizeof(u32),
> + .value_size = sizeof(struct stack_trace_t),
> + .max_entries = 1,
> +};
> +
> +struct bpf_map_def SEC("maps") rawdata_map = {
> + .type = BPF_MAP_TYPE_PERCPU_ARRAY,
> + .key_size = sizeof(u32),
> + .value_size = MAX_STACK * sizeof(u64) * 2,
> + .max_entries = 1,
> +};
> +
> +SEC("kprobe/sys_write")
> +int bpf_prog1(struct pt_regs *ctx)
> +{
> + int max_len, max_buildid_len, usize, ksize, total_size;
> + struct stack_trace_t *data;
> + void *raw_data;
> + u32 key = 0;
> +
> + data = bpf_map_lookup_elem(_map, );
> + if (!data)
> + return 0;
> +
> + max_len = MAX_STACK * sizeof(u64);
> + max_buildid_len = MAX_STACK * sizeof(struct bpf_stack_build_id);
> + data->pid = bpf_get_current_pid_tgid();
> + data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
> +   max_len, 0);
> + data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
> + BPF_F_USER_STACK);
> + data->user_stack_buildid_size = bpf_get_stack(
> + ctx, data->user_stack_buildid, max_buildid_len,
> + BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
> + bpf_perf_event_output(ctx, , 0, data, sizeof(*data));
> +
> + /* write both kernel and user stacks to the same buffer */
> + raw_data = bpf_map_lookup_elem(_map, );
> + if (!raw_data)
> +   

[PATCH bpf-next v2 7/9] samples/bpf: add a test for bpf_get_stack helper

2018-04-18 Thread Yonghong Song
The test attached a kprobe program to kernel function sys_write.
It tested to get stack for user space, kernel space and user
space with build_id request. It also tested to get user
and kernel stack into the same buffer with back-to-back
bpf_get_stack helper calls.

Whenever the kernel stack is available, the user space
application will check to ensure that sys_write/SyS_write
is part of the stack.

Signed-off-by: Yonghong Song 
---
 samples/bpf/Makefile   |   4 +
 samples/bpf/trace_get_stack_kern.c |  86 +
 samples/bpf/trace_get_stack_user.c | 150 +
 3 files changed, 240 insertions(+)
 create mode 100644 samples/bpf/trace_get_stack_kern.c
 create mode 100644 samples/bpf/trace_get_stack_user.c

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4d6a6ed..94e7b10 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -44,6 +44,7 @@ hostprogs-y += xdp_monitor
 hostprogs-y += xdp_rxq_info
 hostprogs-y += syscall_tp
 hostprogs-y += cpustat
+hostprogs-y += trace_get_stack
 
 # Libbpf dependencies
 LIBBPF := ../../tools/lib/bpf/bpf.o ../../tools/lib/bpf/nlattr.o
@@ -95,6 +96,7 @@ xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o
 xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o
 syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
 cpustat-objs := bpf_load.o $(LIBBPF) cpustat_user.o
+trace_get_stack-objs := bpf_load.o $(LIBBPF) trace_get_stack_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -148,6 +150,7 @@ always += xdp_rxq_info_kern.o
 always += xdp2skb_meta_kern.o
 always += syscall_tp_kern.o
 always += cpustat_kern.o
+always += trace_get_stack_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 HOSTCFLAGS += -I$(srctree)/tools/lib/
@@ -193,6 +196,7 @@ HOSTLOADLIBES_xdp_monitor += -lelf
 HOSTLOADLIBES_xdp_rxq_info += -lelf
 HOSTLOADLIBES_syscall_tp += -lelf
 HOSTLOADLIBES_cpustat += -lelf
+HOSTLOADLIBES_trace_get_stack += -lelf
 
 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on 
cmdline:
 #  make samples/bpf/ LLC=~/git/llvm/build/bin/llc 
CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/trace_get_stack_kern.c 
b/samples/bpf/trace_get_stack_kern.c
new file mode 100644
index 000..665e4ad
--- /dev/null
+++ b/samples/bpf/trace_get_stack_kern.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include 
+#include 
+#include 
+#include "bpf_helpers.h"
+
+/* Permit pretty deep stack traces */
+#define MAX_STACK 100
+struct stack_trace_t {
+   int pid;
+   int kern_stack_size;
+   int user_stack_size;
+   int user_stack_buildid_size;
+   u64 kern_stack[MAX_STACK];
+   u64 user_stack[MAX_STACK];
+   struct bpf_stack_build_id user_stack_buildid[MAX_STACK];
+};
+
+struct bpf_map_def SEC("maps") perfmap = {
+   .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+   .key_size = sizeof(int),
+   .value_size = sizeof(u32),
+   .max_entries = 2,
+};
+
+struct bpf_map_def SEC("maps") stackdata_map = {
+   .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+   .key_size = sizeof(u32),
+   .value_size = sizeof(struct stack_trace_t),
+   .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") rawdata_map = {
+   .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+   .key_size = sizeof(u32),
+   .value_size = MAX_STACK * sizeof(u64) * 2,
+   .max_entries = 1,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog1(struct pt_regs *ctx)
+{
+   int max_len, max_buildid_len, usize, ksize, total_size;
+   struct stack_trace_t *data;
+   void *raw_data;
+   u32 key = 0;
+
+   data = bpf_map_lookup_elem(_map, );
+   if (!data)
+   return 0;
+
+   max_len = MAX_STACK * sizeof(u64);
+   max_buildid_len = MAX_STACK * sizeof(struct bpf_stack_build_id);
+   data->pid = bpf_get_current_pid_tgid();
+   data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
+ max_len, 0);
+   data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
+   BPF_F_USER_STACK);
+   data->user_stack_buildid_size = bpf_get_stack(
+   ctx, data->user_stack_buildid, max_buildid_len,
+   BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
+   bpf_perf_event_output(ctx, , 0, data, sizeof(*data));
+
+   /* write both kernel and user stacks to the same buffer */
+   raw_data = bpf_map_lookup_elem(_map, );
+   if (!raw_data)
+   return 0;
+
+   usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
+   if (usize < 0)
+   return 0;
+
+   ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
+   if (ksize < 0)
+   return 0;
+
+   total_size = usize + ksize;
+   if (total_size > 0 && total_size <= max_len)
+   bpf_perf_event_output(ctx, , 0, raw_data, total_size);
+
+