On Tue, May 15, 2018 at 12:01 PM, Daniel Borkmann wrote:
> On 05/14/2018 07:00 PM, John Fastabend wrote:
>> Sockmap is currently backed by an array and enforces keys to be
>> four bytes. This works well for many use cases and was originally
>> modeled after devmap which also
t to use RST formatting
> bpf, doc: howto use/run the BPF selftests
This initial conversion from .txt to .rst files look good to me. Ack
for the whole series.
Acked-by: Yonghong Song <y...@fb.com>
>
>
> Documentation/bpf/README.rst| 36 ++
> Documentation/bpf
On Mon, May 14, 2018 at 2:11 PM, Sean Young wrote:
> This implements the grundig-16 IR protocol.
>
> Signed-off-by: Sean Young
> ---
> samples/bpf/Makefile | 4 +
> samples/bpf/bpf_load.c| 9 +-
>
On Mon, May 14, 2018 at 2:11 PM, Sean Young wrote:
> The context provided to a BPF_PROG_RAWIR_DECODER is a struct ir_raw_event;
> ensure user space has a a definition.
>
> Signed-off-by: Sean Young
> ---
> include/media/rc-core.h| 19 +--
>
On Mon, May 14, 2018 at 2:10 PM, Sean Young wrote:
> This implements attaching, detaching, querying and execution. The target
> fd has to be the /dev/lircN device.
>
> Signed-off-by: Sean Young
> ---
> drivers/media/rc/ir-bpf-decoder.c | 191
On Mon, May 14, 2018 at 2:10 PM, Sean Young wrote:
> Add support for BPF_PROG_IR_DECODER. This type of BPF program can call
> rc_keydown() to reported decoded IR scancodes, or rc_repeat() to report
> that the last key should be repeated.
>
> Signed-off-by: Sean Young
Signed-off-by: Daniel Borkmann <dan...@iogearbox.net>
Acked-by: Yonghong Song <y...@fb.com>
y: Alexei Starovoitov <a...@kernel.org>
Tested and the change looks good.
Acked-by: Yonghong Song <y...@fb.com>
> ---
> tools/testing/selftests/bpf/Makefile | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/tools/testing/selftests/bpf/Makefile
On Sun, May 13, 2018 at 10:33 AM, Alban Crequy wrote:
> From: Alban Crequy
>
> bpf_get_current_cgroup_ino() allows BPF trace programs to get the inode
> of the cgroup where the current process resides.
>
> My use case is to get statistics about syscalls
On Fri, May 11, 2018 at 5:17 PM, Jakub Kicinski
wrote:
> There are many ways users may compile samples, some of them got
> broken by commit 5f9380572b4b ("samples: bpf: compile and link
> against full libbpf"). Improve path resolution and make libbpf
> building a
+# ifdef CONFIG_X86_64
> +# define RETPOLINE_RAX_BPF_JIT_SIZE 2
> +# define RETPOLINE_RAX_BPF_JIT() \
> + EMIT2(0xFF, 0xE0); /* jmp *%rax */
> +# else /* !CONFIG_X86_64 */
> +# define RETPOLINE_EDX_BPF_JIT() \
> + EMIT2(0xFF, 0xE2)/* jmp *%edx */
> +# endif
> #endif
>
> #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
> --
> 2.9.5
>
Acked-by: Yonghong Song <y...@fb.com>
ed_u64 btf;
> + __u32 btf_size;
> + __u32 id;
> } __attribute__((aligned(8)));
>
> /* User bpf_sock_addr struct to access socket fields and sockaddr struct
> passed
> --
> 2.9.5
>
Acked-by: Song Liu <songliubrav...@fb.com>
int test_num)
> goto done;
> }
>
> - user_btf_size = (int)raw_btf_size + test->info_size_delta;
> + user_btf_size = (int)raw_btf_size + test->btf_size_delta;
> expected_nbytes = min(raw_btf_size, user_btf_size);
> if (raw_btf_size > expected_nbytes)
> memset(user_btf + expected_nbytes, 0xff,
> raw_btf_size - expected_nbytes);
>
> - err = bpf_obj_get_info_by_fd(btf_fd, user_btf, _btf_size);
> - if (CHECK(err || user_btf_size != raw_btf_size ||
> - memcmp(raw_btf, user_btf, expected_nbytes),
> - "err:%d(errno:%d) raw_btf_size:%u user_btf_size:%u
> expected_nbytes:%u memcmp:%d",
> - err, errno,
> - raw_btf_size, user_btf_size, expected_nbytes,
> - memcmp(raw_btf, user_btf, expected_nbytes))) {
> + info_len = sizeof(info);
> + info.btf = ptr_to_u64(user_btf);
> + info.btf_size = user_btf_size;
> +
> + ret = 0;
> + err = bpf_obj_get_info_by_fd(btf_fd, , _len);
> + if (CHECK(err || !info.id || info_len != sizeof(info) ||
> + info.btf_size != raw_btf_size ||
> + (ret = memcmp(raw_btf, user_btf, expected_nbytes)),
> + "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu
> raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
> + err, errno, info.id, info_len, sizeof(info),
> + raw_btf_size, info.btf_size, expected_nbytes, ret)) {
> err = -1;
> goto done;
> }
> --
> 2.9.5
>
Acked-by: Song Liu <songliubrav...@fb.com>
f(expected_line),
> + "expected_line is too long")) {
> err = -1;
> - fprintf(stderr, "expected_line is too long\n");
> goto done;
> }
>
> @@ -1535,15 +1524,15 @@ static int test_pprint(void)
> nread = getline(, _len, pin_file);
> } while (++key < test->max_entries && nread > 0);
>
> - if (key < test->max_entries) {
> + if (CHECK(key < test->max_entries,
> + "Unexpected EOF. key:%u test->max_entries:%u",
> + key, test->max_entries)) {
> err = -1;
> - fprintf(stderr, "Unexpected EOF\n");
> goto done;
> }
>
> - if (nread > 0) {
> + if (CHECK(nread > 0, "Unexpected extra pprint output: %s", line)) {
> err = -1;
> - fprintf(stderr, "Unexpected extra pprint output: %s\n", line);
> goto done;
> }
>
> @@ -1551,9 +1540,9 @@ static int test_pprint(void)
>
> done:
> if (!err)
> - fprintf(stderr, "OK\n");
> + fprintf(stderr, "OK");
> if (*btf_log_buf && (err || args.always_log))
> - fprintf(stderr, "%s\n", btf_log_buf);
> + fprintf(stderr, "\n%s", btf_log_buf);
> if (btf_fd != -1)
> close(btf_fd);
> if (map_fd != -1)
> @@ -1634,6 +1623,12 @@ static int parse_args(int argc, char **argv)
> return 0;
> }
>
> +static void print_summary(void)
> +{
> + fprintf(stderr, "PASS:%u SKIP:%u FAIL:%u\n",
> + pass_cnt - skip_cnt, skip_cnt, error_cnt);
> +}
> +
> int main(int argc, char **argv)
> {
> int err = 0;
> @@ -1655,15 +1650,17 @@ int main(int argc, char **argv)
> err |= test_file();
>
> if (args.pprint_test)
> - err |= test_pprint();
> + err |= count_result(test_pprint());
>
> if (args.raw_test || args.get_info_test || args.file_test ||
> args.pprint_test)
> - return err;
> + goto done;
>
> err |= test_raw();
> err |= test_get_info();
> err |= test_file();
>
> +done:
> + print_summary();
> return err;
> }
> --
> 2.9.5
>
Acked-by: Song Liu <songliubrav...@fb.com>
re+0x3b0/0x3b0
> [ 34.870726] ? security_capable+0x54/0x90
> [ 34.871247] __x64_sys_bpf+0x1b2/0x310
> [ 34.871761] ? __ia32_sys_bpf+0x310/0x310
> [ 34.872285] ? bad_area_access_error+0x310/0x310
> [ 34.872894] do_syscall_64+0x95/0x3f0
>
> This patch uses refcou
r);
> +}
> +
> #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
>
> static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
> @@ -2042,7 +2057,7 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr
> *attr,
> err = bpf_map_get_info_by_fd(f.file->private_data, attr,
>uattr);
> else if (f.file->f_op == _fops)
> - err = btf_get_info_by_fd(f.file->private_data, attr, uattr);
> + err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
> else
> err = -EINVAL;
>
> --
> 2.9.5
>
Acked-by: Song Liu <songliubrav...@fb.com>
info.btf_value_id = map->btf_value_id;
> + }
> +
> if (bpf_map_is_dev_bound(map)) {
> err = bpf_map_offload_info_fill(, map);
> if (err)
> @@ -2057,6 +2063,19 @@ static int bpf_btf_load(const union bpf_attr *attr)
> return btf_new_fd(attr);
> }
>
> +#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
> +
> +static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
> +{
> + if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
> + return -EINVAL;
> +
> + if (!capable(CAP_SYS_ADMIN))
> + return -EPERM;
> +
> + return btf_get_fd_by_id(attr->btf_id);
> +}
> +
> SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int,
> size)
> {
> union bpf_attr attr = {};
> @@ -2140,6 +2159,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *,
> uattr, unsigned int, siz
> case BPF_BTF_LOAD:
> err = bpf_btf_load();
> break;
> + case BPF_BTF_GET_FD_BY_ID:
> + err = bpf_btf_get_fd_by_id();
> + break;
> default:
> err = -EINVAL;
> break;
> --
> 2.9.5
>
Acked-by: Song Liu <songliubrav...@fb.com>
> On May 8, 2018, at 11:06 AM, Konstantin Ryabitsev
> <konstan...@linuxfoundation.org> wrote:
>
> On 05/08/18 13:46, Song Liu wrote:
>> We are seeing the following error on multiple different systems while
>> cloning net-next tree.
>>
>> $ git clon
into it?
If you see same problem. Here is a work-around for it:
1. clone a smaller tree, for example net.git;
2. add net-next as a new remote, and fetch it.
Thanks,
Song
riable for the irq_work. As a result, only
one irq_work per cpu is allowed. If the irq_work is in-use, we
fallback to only report ips.
Cc: Alexei Starovoitov <a...@kernel.org>
Cc: Daniel Borkmann <dan...@iogearbox.net>
Cc: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Song Liu <songl
dd static to DEFINE_PER_CPU();
5. Remove pr_info() in stack_map_init().
Song Liu (2):
bpf: enable stackmap with build_id in nmi context
bpf: add selftest for stackmap with build_id in NMI context
init/Kconfig | 1 +
kernel/bpf/stackmap.c
.
urandom_read.c is extended to run configurable cycles so that it can be
caught by the perf event.
Signed-off-by: Song Liu <songliubrav...@fb.com>
---
tools/testing/selftests/bpf/test_progs.c | 134 +
tools/testing/selftests/bpf/urandom_read.c | 10 ++-
2 files c
Thanks Tobin. I will fold these changes in.
> On May 3, 2018, at 12:19 AM, Tobin C. Harding <to...@apporbit.com> wrote:
>
> On Wed, May 02, 2018 at 04:20:30PM -0700, Song Liu wrote:
>> This new test captures stackmap with build_id with hardware event
>> PERF_COUNT_HW
Remove FAST_FEATURE_TESTS")
Suggested-by: Alexei Starovoitov <a...@kernel.org>
Signed-off-by: Yonghong Song <y...@fb.com>
---
Makefile | 1 +
arch/x86/include/asm/cpufeature.h | 5 +
2 files changed, 6 insertions(+)
Changelog:
v2 -> v3:
. Changed macr
.
urandom_read.c is extended to run configurable cycles so that it can be
caught by the perf event.
Signed-off-by: Song Liu <songliubrav...@fb.com>
---
tools/testing/selftests/bpf/test_progs.c | 137 +
tools/testing/selftests/bpf/urandom_read.c | 10 ++-
2 files c
riable for the irq_work. As a result, only
one irq_work per cpu is allowed. If the irq_work is in-use, we
fallback to only report ips.
Cc: Alexei Starovoitov <a...@kernel.org>
Cc: Daniel Borkmann <dan...@iogearbox.net>
Cc: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Song Liu <songl
Changes v1 -> v2:
1. Rename some variables to (hopefully) reduce confusion;
2. Check irq_work status with IRQ_WORK_BUSY (instead of work->sem);
3. In Kconfig, let BPF_SYSCALL select IRQ_WORK;
4. Add static to DEFINE_PER_CPU();
5. Remove pr_info() in stack_map_init().
Song Liu (2):
> On May 2, 2018, at 10:30 AM, Peter Zijlstra <pet...@infradead.org> wrote:
>
> On Wed, May 02, 2018 at 04:48:32PM +, Song Liu wrote:
>>> It's broken though, I've bet you've never actually ran this with lockdep
>>> enabled for example.
>>
>>
> On May 2, 2018, at 2:21 AM, Peter Zijlstra <pet...@infradead.org> wrote:
>
> On Tue, May 01, 2018 at 05:02:19PM -0700, Song Liu wrote:
>> @@ -267,17 +285,27 @@ static void stack_map_get_build_id_offset(struct
>> bpf_stack_build_id *id_offs,
>> {
>>
> On May 1, 2018, at 10:50 PM, Stephen Rothwell <s...@canb.auug.org.au> wrote:
>
> Hi Song,
>
> On Wed, 2 May 2018 04:40:20 + Song Liu <songliubrav...@fb.com> wrote:
>>
>>> - CHECK(build_id_matches < 1, "build id match",
>
ey = key;
> } while (bpf_map_get_next_key(stackmap_fd, _key, ) == 0);
>
> - CHECK(build_id_matches < 1, "build id match",
> - "Didn't find expected build ID from the map\n");
> + if (CHECK(build_id_matches < 1, "build id match",
> -
.
urandom_read.c is extended to run configurable cycles so that it can be
caught by the perf event.
Signed-off-by: Song Liu <songliubrav...@fb.com>
---
tools/testing/selftests/bpf/test_progs.c | 137 +
tools/testing/selftests/bpf/urandom_read.c | 10 ++-
2 files c
riable for the irq_work. As a result, only
one irq_work per cpu is allowed. If the irq_work is in-use, we
fallback to only report ips.
Cc: Alexei Starovoitov <a...@kernel.org>
Cc: Daniel Borkmann <dan...@iogearbox.net>
Cc: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Song Li
1. remove useless parameter list to ./urandom_read
2. add missing "\n" to the end of an error message
Fixes: 81f77fd0deeb ("bpf: add selftest for stackmap with BPF_F_STACK_BUILD_ID")
Cc: Alexei Starovoitov <a...@kernel.org>
Cc: Daniel Borkmann <dan...@iogearbox
On Mon, Apr 30, 2018 at 7:33 AM, Daniel Borkmann <dan...@iogearbox.net> wrote:
> On 04/30/2018 04:26 PM, William Tu wrote:
>> Bring the erspan uapi header file so BPF tunnel helpers can use it.
>>
>> Fixes: 933a741e3b82 ("selftests/bpf: bpf tunnel test.")
>&
Hi, William,
When compiled the selftests/bpf in my centos 7 based system, I have
the following failures,
clang -I. -I./include/uapi -I../../../include/uapi
-Wno-compare-distinct-pointer-types \
-O2 -target bpf -emit-llvm -c test_tunnel_kern.c -o - | \
llc -march=bpf -mcpu=generic
) to __x64_sys_*()")
Signed-off-by: Yonghong Song <y...@fb.com>
---
samples/bpf/bpf_load.c | 34 ++
1 file changed, 26 insertions(+), 8 deletions(-)
Changelogs:
v1 -> v2:
. make change in bpf_load.c instead of each individual bpf programs.
diff --g
On 4/29/18 5:06 PM, Alexei Starovoitov wrote:
On Sun, Apr 29, 2018 at 05:00:23PM -0700, Yonghong Song wrote:
On 4/29/18 4:20 PM, Alexei Starovoitov wrote:
On Sun, Apr 29, 2018 at 03:06:31PM -0700, Yonghong Song wrote:
Commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename
s
On 4/29/18 4:20 PM, Alexei Starovoitov wrote:
On Sun, Apr 29, 2018 at 03:06:31PM -0700, Yonghong Song wrote:
Commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename
struct pt_regs-based sys_*() to __x64_sys_*()") renamed a lot
of syscall function sys_*() to __x64_sys_*().
T
ad of sys_*(), in bpf program SEC annotations if
the target arch is __TARGET_ARCH_x86.
Fixes: d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct pt_regs-based
sys_*() to __x64_sys_*()")
Signed-off-by: Yonghong Song <y...@fb.com>
---
samples/bpf/map_perf_test_ke
-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/Makefile | 4 +-
tools/testing/selftests/bpf/test_get_stack_rawtp.c | 102
tools/testing/selftests/bpf/test_progs.c | 172 +++--
3 files changed, 266 insertions(
com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
include/linux/bpf.h | 1 +
include/linux/filter.h | 3 ++-
include/uapi/linux/bpf.h | 42 --
kernel/bpf/core.c| 5
kernel/bpf/stackmap.c| 67
id's must be the same.
Acked-by: Alexei Starovoitov <a...@kernel.org>
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_progs.c | 70 --
.../selftests/bpf/test_stacktrace_build_id.c | 20 ++-
tools/testing/s
nghong Song <y...@fb.com>
---
kernel/bpf/verifier.c | 11 ++-
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 988400e..6e3f859 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2940,10 +2940,7 @
The test_verifier already has a few ARSH test cases.
This patch adds a new test case which takes advantage of newly
improved verifier behavior for bpf_get_stack and ARSH.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_verifier.
This patch didn't incur functionality change. The function prototype
got changed so that the same function can be reused later.
Signed-off-by: Yonghong Song <y...@fb.com>
---
kernel/bpf/stackmap.c | 13 +
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/kern
The tools header file bpf.h is synced with kernel uapi bpf.h.
The new helper is also added to bpf_helpers.h.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/include/uapi/linux/bpf.h| 42 +--
tools/testing/selftests/bpf/bpf_helpers.h | 2 ++
2
(id=0,umax_value=800,var_off=(0x0; 0x3ff))
R1=inv0 R6=ctx(id=0,off=0,imm=0)
R7=map_value(id=0,off=0,ks=4,vs=1600,imm=0)
R8=inv(id=0,umax_value=800,var_off=(0x0; 0x3ff)) R9=inv800
R10=fp0,call_-1
58: (bf) r2 = r7
59: (0f) r2 += r8
60: (1f) r9 -= r8
61: (bf)
ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
..
Without improving ARSH value range tracking, the register representing
"max_len - usize" will have smin_value equal to S64_MIN and will be
rejected by verifier.
Acked-by: Alexei Starovoitov <a...@kernel.
per memory size argument
. implemented range checking for ARSH in verifier
. moved perf event polling and ksym related functions
from samples/bpf to tools/bpf
. added test to compare build id's between bpf_get_stackid
and bpf_get_stack
v1 -> v2:
. fixed compil
Starovoitov <a...@fb.com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
samples/bpf/Makefile| 11 +-
samples/bpf/bpf_load.c | 63 --
samples/bpf/bpf_load.h | 7 --
samples/bpf/offwaketime_user.c | 1 +
On 4/28/18 12:06 PM, Alexei Starovoitov wrote:
On Sat, Apr 28, 2018 at 11:17:30AM -0700, Y Song wrote:
On Sat, Apr 28, 2018 at 9:56 AM, Alexei Starovoitov
<alexei.starovoi...@gmail.com> wrote:
On Sat, Apr 28, 2018 at 12:02:04AM -0700, Yonghong Song wrote:
The test attached a raw_trac
On Sat, Apr 28, 2018 at 9:56 AM, Alexei Starovoitov
<alexei.starovoi...@gmail.com> wrote:
> On Sat, Apr 28, 2018 at 12:02:04AM -0700, Yonghong Song wrote:
>> The test attached a raw_tracepoint program to sched/sched_switch.
>> It tested to get stack for user space, kernel sp
(id=0,umax_value=800,var_off=(0x0; 0x3ff))
R1=inv0 R6=ctx(id=0,off=0,imm=0)
R7=map_value(id=0,off=0,ks=4,vs=1600,imm=0)
R8=inv(id=0,umax_value=800,var_off=(0x0; 0x3ff)) R9=inv800
R10=fp0,call_-1
58: (bf) r2 = r7
59: (0f) r2 += r8
60: (1f) r9 -= r8
61: (bf)
is available, the user space
application will check to ensure that the kernel function
for raw_tracepoint ___bpf_prog_run is part of the stack.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/Makefile | 4 +-
tools/testing/selftests/bpf/test_get_stack_r
id's must be the same.
Acked-by: Alexei Starovoitov <a...@kernel.org>
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_progs.c | 70 --
.../selftests/bpf/test_stacktrace_build_id.c | 20 ++-
tools/testing/s
ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
..
Without improving ARSH value range tracking, the register representing
"max_len - usize" will have smin_value equal to S64_MIN and will be
rejected by verifier.
Signed-off-by: Yonghong Song <y...@fb.
Starovoitov <a...@fb.com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
samples/bpf/Makefile| 11 +-
samples/bpf/bpf_load.c | 63 --
samples/bpf/bpf_load.h | 7 --
samples/bpf/offwaketime_user.c | 1 +
This patch didn't incur functionality change. The function prototype
got changed so that the same function can be reused later.
Signed-off-by: Yonghong Song <y...@fb.com>
---
kernel/bpf/stackmap.c | 13 +
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/kern
The tools header file bpf.h is synced with kernel uapi bpf.h.
The new helper is also added to bpf_helpers.h.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/include/uapi/linux/bpf.h| 42 +--
tools/testing/selftests/bpf/bpf_helpers.h | 2 ++
2
nghong Song <y...@fb.com>
---
kernel/bpf/verifier.c | 11 ++-
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 988400e..6e3f859 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2940,10 +2940,7 @
The test_verifier already has a few ARSH test cases.
This patch adds a new test case which takes advantage of newly
improved verifier behavior for bpf_get_stack and ARSH.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_verifier.
com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
include/linux/bpf.h | 1 +
include/linux/filter.h | 3 ++-
include/uapi/linux/bpf.h | 42 --
kernel/bpf/core.c| 5
kernel/bpf/stackmap.c| 67
2:
. fixed compilation error when CONFIG_PERF_EVENTS is not enabled
Yonghong Song (10):
bpf: change prototype for stack_map_get_build_id_offset
bpf: add bpf_get_stack helper
bpf/verifier: refine retval R0 state for bpf_get_stack helper
bpf: remove never-hit branches in verifier adjust_sc
On 4/27/18 4:48 PM, Alexei Starovoitov wrote:
On Wed, Apr 25, 2018 at 12:29:05PM -0700, Yonghong Song wrote:
When helpers like bpf_get_stack returns an int value
and later on used for arithmetic computation, the LSH and ARSH
operations are often required to get proper sign extension into
64
The test_verifier already has a few ARSH test cases.
This patch adds a new test case which takes advantage of newly
improved verifier behavior for bpf_get_stack and ARSH.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_verifier.
This patch didn't incur functionality change. The function prototype
got changed so that the same function can be reused later.
Signed-off-by: Yonghong Song <y...@fb.com>
---
kernel/bpf/stackmap.c | 13 +
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/kern
The tools header file bpf.h is synced with kernel uapi bpf.h.
The new helper is also added to bpf_helpers.h.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/include/uapi/linux/bpf.h| 19 +--
tools/testing/selftests/bpf/bpf_helpers.h | 2 ++
2 files chang
is available, the user space
application will check to ensure that the kernel function
for raw_tracepoint ___bpf_prog_run is part of the stack.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/Makefile | 3 +-
tools/testing/selftests/bpf/test_get_stack_r
Starovoitov <a...@fb.com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
samples/bpf/Makefile| 11 +-
samples/bpf/bpf_load.c | 63 --
samples/bpf/bpf_load.h | 7 --
samples/bpf/offwaketime_user.c | 1 +
id's must be the same.
Acked-by: Alexei Starovoitov <a...@kernel.org>
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_progs.c | 70 --
.../selftests/bpf/test_stacktrace_build_id.c | 20 ++-
tools/testing/s
. added test to compare build id's between bpf_get_stackid
and bpf_get_stack
v1 -> v2:
. fixed compilation error when CONFIG_PERF_EVENTS is not enabled
Yonghong Song (10):
bpf: change prototype for stack_map_get_build_id_offset
bpf: add bpf_get_stack helper
bpf/verifier: refine retv
In verifier function adjust_scalar_min_max_vals,
when src_known is false and the opcode is BPF_LSH/BPF_RSH,
early return will happen in the function. So remove
the branch in handling BPF_LSH/BPF_RSH when src_known is false.
Signed-off-by: Yonghong Song <y...@fb.com>
---
kernel/bpf/veri
(id=0,umax_value=800,var_off=(0x0; 0x3ff))
R1=inv0 R6=ctx(id=0,off=0,imm=0)
R7=map_value(id=0,off=0,ks=4,vs=1600,imm=0)
R8=inv(id=0,umax_value=800,var_off=(0x0; 0x3ff)) R9=inv800
R10=fp0,call_-1
58: (bf) r2 = r7
59: (0f) r2 += r8
60: (1f) r9 -= r
ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
..
Without improving ARSH value range tracking, the register representing
"max_len - usize" will have smin_value equal to S64_MIN and will be
rejected by verifier.
Signed-off-by: Yonghong Song <y...@fb.
com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
include/linux/bpf.h | 1 +
include/linux/filter.h | 3 ++-
include/uapi/linux/bpf.h | 19 --
kernel/bpf/core.c| 5
kernel/bpf/stackmap.c| 67
kernel/bp
On 4/25/18 2:00 AM, Daniel Borkmann wrote:
On 04/23/2018 11:27 PM, Yonghong Song wrote:
Currently, stackmap and bpf_get_stackid helper are provided
for bpf program to get the stack trace. This approach has
a limitation though. If two stack traces have the same hash,
only one will get stored
Since test_sock_addr is not supposed to run by itself,
remove it from TEST_GEN_PROGS and add it to
TEST_GEN_PROGS_EXTENDED. This way, run_tests will
not run test_sock_addr. The corresponding test to run
is test_sock_addr.sh.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/sel
Hi, Peter,
I have a question regarding to one of your comments below.
On 3/12/18 3:01 PM, Peter Zijlstra wrote:
On Mon, Mar 12, 2018 at 01:39:56PM -0700, Song Liu wrote:
+static void stack_map_get_build_id_offset(struct bpf_map *map,
+ struct
The tools header file bpf.h is synced with kernel uapi bpf.h.
The new helper is also added to bpf_helpers.h.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/include/uapi/linux/bpf.h| 19 +--
tools/testing/selftests/bpf/bpf_helpers.h | 3 ++-
2 files chang
is available, the user space
application will check to ensure that the kernel function
for raw_tracepoint ___bpf_prog_run is part of the stack.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/Makefile | 3 +-
tools/testing/selftests/bpf/test_get_stack_r
ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
..
Without improving ARSH value range tracking, the register representing
"max_len - usize" will have smin_value equal to S64_MIN and will be
rejected by verifier.
Signed-off-by: Yonghong Song <y...@fb.
The test_verifier already has a few ARSH test cases.
This patch adds a new test case which takes advantage of newly
improved verifier behavior for bpf_get_stack and ARSH.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_verifier.
com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
include/linux/bpf.h | 1 +
include/linux/filter.h | 3 ++-
include/uapi/linux/bpf.h | 19 --
kernel/bpf/core.c| 5
kernel/bpf/stackmap.c| 67
kernel/b
bpf_get_stackid
and bpf_get_stack
v1 -> v2:
. fixed compilation error when CONFIG_PERF_EVENTS is not enabled
Yonghong Song (10):
bpf: change prototype for stack_map_get_build_id_offset
bpf: add bpf_get_stack helper
bpf/verifier: refine retval R0 state for bpf_get_stack helper
bpf
id's must be the same.
Acked-by: Alexei Starovoitov <a...@kernel.org>
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_progs.c | 70 --
.../selftests/bpf/test_stacktrace_build_id.c | 20 ++-
tools/testing/s
Starovoitov <a...@fb.com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
samples/bpf/Makefile| 11 +-
samples/bpf/bpf_load.c | 63 --
samples/bpf/bpf_load.h | 7 --
samples/bpf/offwaketime_user.c | 1 +
In verifier function adjust_scalar_min_max_vals,
when src_known is false and the opcode is BPF_LSH/BPF_RSH,
early return will happen in the function. So remove
the branch in handling BPF_LSH/BPF_RSH when src_known is false.
Signed-off-by: Yonghong Song <y...@fb.com>
---
kernel/bpf/veri
This patch didn't incur functionality change. The function prototype
got changed so that the same function can be reused later.
Signed-off-by: Yonghong Song <y...@fb.com>
---
kernel/bpf/stackmap.c | 13 +
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/kern
(id=0,umax_value=800,var_off=(0x0; 0x3ff))
R1=inv0 R6=ctx(id=0,off=0,imm=0)
R7=map_value(id=0,off=0,ks=4,vs=1600,imm=0)
R8=inv(id=0,umax_value=800,var_off=(0x0; 0x3ff)) R9=inv800
R10=fp0,call_-1
58: (bf) r2 = r7
59: (0f) r2 += r8
60: (1f) r9 -= r
id's must be the same.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_progs.c | 70 --
.../selftests/bpf/test_stacktrace_build_id.c | 20 ++-
tools/testing/selftests/bpf/test_stacktrace_map.c | 20 ++-
3 files c
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/include/uapi/linux/bpf.h| 19 +--
tools/testing/selftests/bpf/bpf_helpers.h | 3 ++-
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux
is available, the user space
application will check to ensure that the kernel function
for raw_tracepoint ___bpf_prog_run is part of the stack.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/Makefile | 3 +-
tools/testing/selftests/bpf/test_get_stack_r
com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
include/linux/bpf.h | 1 +
include/linux/filter.h | 3 ++-
include/uapi/linux/bpf.h | 19 --
kernel/bpf/core.c| 5
kernel/bpf/stackmap.c| 67
kernel/b
ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
..
Without improving ARSH value range tracking, the register representing
"max_len - usize" will have smin_value equal to S64_MIN and will be
rejected by verifier.
Signed-off-by: Yonghong Song <y...@fb.
Starovoitov <a...@fb.com>
Signed-off-by: Yonghong Song <y...@fb.com>
---
samples/bpf/Makefile| 11 +-
samples/bpf/bpf_load.c | 63 --
samples/bpf/bpf_load.h | 7 --
samples/bpf/offwaketime_user.c | 1 +
The test_verifier already has a few ARSH test cases.
This patch adds a new test case which takes advantage of newly
improved verifier behavior for bpf_get_stack and ARSH.
Signed-off-by: Yonghong Song <y...@fb.com>
---
tools/testing/selftests/bpf/test_verifier.
_get_stackid
and bpf_get_stack
v1 -> v2:
. fixed compilation error when CONFIG_PERF_EVENTS is not enabled
Yonghong Song (10):
bpf: change prototype for stack_map_get_build_id_offset
bpf: add bpf_get_stack helper
bpf/verifier: refine retval R0 state for bpf_get_stack helper
bpf: r
nv(id=0,umax_value=800) R1=inv0 R6=ctx(id=0,off=0,imm=0)
R7=map_value(id=0,off=0,ks=4,vs=1600,imm=0)
R8=inv(id=0,umax_value=800,var_off=(0x0; 0x3ff)) R9=inv800
R10=fp0,call_-1
58: (bf) r2 = r7
59: (0f) r2 += r8
60: (1f) r9 -= r8
61: (bf) r1 = r6
Signed-off-by: Yonghon
In verifier function adjust_scalar_min_max_vals,
when src_known is false and the opcode is BPF_LSH/BPF_RSH,
early return will happen in the function. So remove
the branch in handling BPF_LSH/BPF_RSH when src_known is false.
Signed-off-by: Yonghong Song <y...@fb.com>
---
kernel/bpf/veri
501 - 600 of 1050 matches
Mail list logo