Introduce bpf_in_nmi(), bpf_in_hardirq(), bpf_in_serving_softirq(), and bpf_in_task() kfuncs to allow BPF programs to query the current execution context.
While BPF programs can sometimes infer context based on the attach point, certain programs (such as those in sched_ext) may be called from multiple contexts. These kfuncs provide a reliable way for logic to branch based on whether the CPU is currently handling an interrupt or executing in task context. For example, this is particularly useful for sched_ext schedulers that need to differentiate between task-to-task wake-ups and interrupt-to-task wake-ups. As the names imply, these helpers wrap the kernel's internal in_nmi(), in_hardirq(), in_serving_softirq(), and in_task() macros. Signed-off-by: Changwoo Min <[email protected]> --- kernel/bpf/helpers.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 637677815365..cb36bc7a80c6 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -4365,6 +4365,46 @@ __bpf_kfunc int bpf_dynptr_file_discard(struct bpf_dynptr *dynptr) return 0; } +/** + * bpf_in_nmi - Test if the current execution context is in NMI context. + * + * Return: true if we are in NMI context, false otherwise. + */ +__bpf_kfunc bool bpf_in_nmi(void) +{ + return in_nmi(); +} + +/** + * bpf_in_hardirq - Test if the current execution context is in hard IRQ context. + * + * Return: true if we are in hard IRQ context, false otherwise. + */ +__bpf_kfunc bool bpf_in_hardirq(void) +{ + return in_hardirq(); +} + +/** + * bpf_in_serving_softirq - Test if the current execution context is in softirq context. + * + * Return: true if we are in softirq context, false otherwise. + */ +__bpf_kfunc bool bpf_in_serving_softirq(void) +{ + return in_serving_softirq(); +} + +/** + * bpf_in_task - Test if the current execution context is in task context. + * + * Return: true if we are in task context, false otherwise. + */ +__bpf_kfunc bool bpf_in_task(void) +{ + return in_task(); +} + __bpf_kfunc_end_defs(); static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work) @@ -4546,6 +4586,10 @@ BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_dynptr_from_file) BTF_ID_FLAGS(func, bpf_dynptr_file_discard) +BTF_ID_FLAGS(func, bpf_in_nmi) +BTF_ID_FLAGS(func, bpf_in_hardirq) +BTF_ID_FLAGS(func, bpf_in_serving_softirq) +BTF_ID_FLAGS(func, bpf_in_task) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { -- 2.52.0

