If TRACE_SESSION exists, we will use extra 8-bytes in the stack of the trampoline to store the flags that we needed, and the 8-bytes lie after the return value, which means ctx[nr_args + 1]. And we will store the flag "is_exit" to the first bit of it.
Introduce the kfunc bpf_tracing_is_exit(), which is used to tell if it is fexit currently. Meanwhile, inline it in the verifier. Add the kfunc bpf_fsession_cookie(), which is similar to bpf_session_cookie() and return the address of the session cookie. The address of the session cookie is stored after session flags, which means ctx[nr_args + 2]. Inline this kfunc in the verifier too. Signed-off-by: Menglong Dong <[email protected]> Co-developed-by: Leon Hwang <[email protected]> Signed-off-by: Leon Hwang <[email protected]> --- v3: - merge the bpf_tracing_is_exit and bpf_fsession_cookie into a single patch v2: - store the session flags after return value, instead of before nr_args - inline the bpf_tracing_is_exit, as Jiri suggested --- include/linux/bpf.h | 1 + kernel/bpf/verifier.c | 33 ++++++++++++++++++++-- kernel/trace/bpf_trace.c | 59 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 88 insertions(+), 5 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6b5855c80fa6..ce55d3881c0d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1736,6 +1736,7 @@ struct bpf_prog { enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ call_get_func_ip:1, /* Do we call get_func_ip() */ + call_session_cookie:1, /* Do we call bpf_fsession_cookie() */ tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ sleepable:1; /* BPF program is sleepable */ enum bpf_prog_type type; /* Type of BPF program */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 818deb6a06e4..6f8aa4718d6f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12293,6 +12293,8 @@ enum special_kfunc_type { KF___bpf_trap, KF_bpf_task_work_schedule_signal, KF_bpf_task_work_schedule_resume, + KF_bpf_tracing_is_exit, + KF_bpf_fsession_cookie, }; BTF_ID_LIST(special_kfunc_list) @@ -12365,6 +12367,8 @@ BTF_ID(func, bpf_res_spin_unlock_irqrestore) BTF_ID(func, __bpf_trap) BTF_ID(func, bpf_task_work_schedule_signal) BTF_ID(func, bpf_task_work_schedule_resume) +BTF_ID(func, bpf_tracing_is_exit) +BTF_ID(func, bpf_fsession_cookie) static bool is_task_work_add_kfunc(u32 func_id) { @@ -12419,7 +12423,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg = ®s[regno]; bool arg_mem_size = false; - if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) + if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || + meta->func_id == special_kfunc_list[KF_bpf_tracing_is_exit] || + meta->func_id == special_kfunc_list[KF_bpf_fsession_cookie]) return KF_ARG_PTR_TO_CTX; /* In this function, we verify the kfunc's BTF as per the argument type, @@ -13912,7 +13918,8 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } } - if (meta.func_id == special_kfunc_list[KF_bpf_session_cookie]) { + if (meta.func_id == special_kfunc_list[KF_bpf_session_cookie] || + meta.func_id == special_kfunc_list[KF_bpf_fsession_cookie]) { meta.r0_size = sizeof(u64); meta.r0_rdonly = false; } @@ -14193,6 +14200,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return err; } + if (meta.func_id == special_kfunc_list[KF_bpf_fsession_cookie]) + env->prog->call_session_cookie = true; + return 0; } @@ -22012,6 +22022,25 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); *cnt = 1; + } else if (desc->func_id == special_kfunc_list[KF_bpf_tracing_is_exit]) { + /* Load nr_args from ctx - 8 */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); + /* add rax, 1 */ + insn_buf[1] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1); + insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); + insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); + insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); + insn_buf[5] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1); + *cnt = 6; + } else if (desc->func_id == special_kfunc_list[KF_bpf_fsession_cookie]) { + /* Load nr_args from ctx - 8 */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); + /* add rax, 2 */ + insn_buf[1] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2); + insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); + insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); + insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); + *cnt = 5; } if (env->insn_aux_data[insn_idx].arg_prog) { diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 4f87c16d915a..4a8568bd654d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3356,12 +3356,65 @@ static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = { .filter = bpf_kprobe_multi_filter, }; -static int __init bpf_kprobe_multi_kfuncs_init(void) +__bpf_kfunc_start_defs(); + +__bpf_kfunc bool bpf_tracing_is_exit(void *ctx) { - return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set); + /* This helper call is inlined by verifier. */ + u64 nr_args = ((u64 *)ctx)[-1]; + + /* + * ctx[nr_args + 1] is the session flags, and the last bit is + * is_exit. + */ + return ((u64 *)ctx)[nr_args + 1] & 1; +} + +__bpf_kfunc u64 *bpf_fsession_cookie(void *ctx) +{ + /* This helper call is inlined by verifier. */ + u64 nr_args = ((u64 *)ctx)[-1]; + + /* ctx[nr_args + 2] is the session cookie address */ + return (u64 *)((u64 *)ctx)[nr_args + 2]; +} + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(tracing_kfunc_set_ids) +BTF_ID_FLAGS(func, bpf_tracing_is_exit, KF_FASTCALL) +BTF_ID_FLAGS(func, bpf_fsession_cookie, KF_FASTCALL) +BTF_KFUNCS_END(tracing_kfunc_set_ids) + +static int bpf_tracing_filter(const struct bpf_prog *prog, u32 kfunc_id) +{ + if (!btf_id_set8_contains(&tracing_kfunc_set_ids, kfunc_id)) + return 0; + + if (prog->type != BPF_PROG_TYPE_TRACING || + prog->expected_attach_type != BPF_TRACE_SESSION) + return -EINVAL; + + return 0; +} + +static const struct btf_kfunc_id_set bpf_tracing_kfunc_set = { + .owner = THIS_MODULE, + .set = &tracing_kfunc_set_ids, + .filter = bpf_tracing_filter, +}; + +static int __init bpf_trace_kfuncs_init(void) +{ + int err = 0; + + err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set); + err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_tracing_kfunc_set); + + return err; } -late_initcall(bpf_kprobe_multi_kfuncs_init); +late_initcall(bpf_trace_kfuncs_init); typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk); -- 2.51.1
