If TRACE_SESSION exists, we will use extra 8-bytes in the stack of the
trampoline to store the flags that we needed, and the 8-bytes lie after
the return value, which means ctx[nr_args + 1]. And we will store the
flag "is_exit" to the first bit of it.

Introduce the kfunc bpf_tracing_is_exit(), which is used to tell if it
is fexit currently. Meanwhile, inline it in the verifier.

Signed-off-by: Menglong Dong <[email protected]>
Co-developed-by: Leon Hwang <[email protected]>
Signed-off-by: Leon Hwang <[email protected]>
---
v2:
- store the session flags after return value, instead of before nr_args
- inline the bpf_tracing_is_exit, as Jiri suggested
---
 kernel/bpf/verifier.c    | 15 +++++++++++-
 kernel/trace/bpf_trace.c | 49 +++++++++++++++++++++++++++++++++++++---
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 3ffdf2143f16..a4d0dd4440fd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12293,6 +12293,7 @@ enum special_kfunc_type {
        KF___bpf_trap,
        KF_bpf_task_work_schedule_signal,
        KF_bpf_task_work_schedule_resume,
+       KF_bpf_tracing_is_exit,
 };
 
 BTF_ID_LIST(special_kfunc_list)
@@ -12365,6 +12366,7 @@ BTF_ID(func, bpf_res_spin_unlock_irqrestore)
 BTF_ID(func, __bpf_trap)
 BTF_ID(func, bpf_task_work_schedule_signal)
 BTF_ID(func, bpf_task_work_schedule_resume)
+BTF_ID(func, bpf_tracing_is_exit)
 
 static bool is_task_work_add_kfunc(u32 func_id)
 {
@@ -12419,7 +12421,8 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
        struct bpf_reg_state *reg = &regs[regno];
        bool arg_mem_size = false;
 
-       if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
+       if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
+           meta->func_id == special_kfunc_list[KF_bpf_tracing_is_exit])
                return KF_ARG_PTR_TO_CTX;
 
        /* In this function, we verify the kfunc's BTF as per the argument type,
@@ -21994,6 +21997,16 @@ static int fixup_kfunc_call(struct bpf_verifier_env 
*env, struct bpf_insn *insn,
                   desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
                insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
                *cnt = 1;
+       } else if (desc->func_id == special_kfunc_list[KF_bpf_tracing_is_exit]) 
{
+               /* Load nr_args from ctx - 8 */
+               insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
+               /* add rax, 1 */
+               insn_buf[1] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1);
+               insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
+               insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
+               insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
+               insn_buf[5] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1);
+               *cnt = 6;
        }
 
        if (env->insn_aux_data[insn_idx].arg_prog) {
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 4f87c16d915a..d0720d850621 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -3356,12 +3356,55 @@ static const struct btf_kfunc_id_set 
bpf_kprobe_multi_kfunc_set = {
        .filter = bpf_kprobe_multi_filter,
 };
 
-static int __init bpf_kprobe_multi_kfuncs_init(void)
+__bpf_kfunc_start_defs();
+
+__bpf_kfunc bool bpf_tracing_is_exit(void *ctx)
+{
+       /* This helper call is inlined by verifier. */
+       u64 nr_args = ((u64 *)ctx)[-1];
+
+       /*
+        * ctx[nr_args + 1] is the session flags, and the last bit is
+        * is_exit.
+        */
+       return ((u64 *)ctx)[nr_args + 1] & 1;
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(tracing_kfunc_set_ids)
+BTF_ID_FLAGS(func, bpf_tracing_is_exit, KF_FASTCALL)
+BTF_KFUNCS_END(tracing_kfunc_set_ids)
+
+static int bpf_tracing_filter(const struct bpf_prog *prog, u32 kfunc_id)
 {
-       return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, 
&bpf_kprobe_multi_kfunc_set);
+       if (!btf_id_set8_contains(&tracing_kfunc_set_ids, kfunc_id))
+               return 0;
+
+       if (prog->type != BPF_PROG_TYPE_TRACING ||
+           prog->expected_attach_type != BPF_TRACE_SESSION)
+               return -EINVAL;
+
+       return 0;
+}
+
+static const struct btf_kfunc_id_set bpf_tracing_kfunc_set = {
+       .owner = THIS_MODULE,
+       .set = &tracing_kfunc_set_ids,
+       .filter = bpf_tracing_filter,
+};
+
+static int __init bpf_trace_kfuncs_init(void)
+{
+       int err = 0;
+
+       err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, 
&bpf_kprobe_multi_kfunc_set);
+       err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 
&bpf_tracing_kfunc_set);
+
+       return err;
 }
 
-late_initcall(bpf_kprobe_multi_kfuncs_init);
+late_initcall(bpf_trace_kfuncs_init);
 
 typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct 
task_struct *tsk);
 
-- 
2.51.1.dirty


Reply via email to