Introduce bpf_in_nmi(), bpf_in_hardirq(), bpf_in_serving_softirq(), and
bpf_in_task() inline helpers in bpf_experimental.h. These allow BPF
programs to query the current execution context with higher granularity
than the existing bpf_in_interrupt() helper.

While BPF programs can often infer their context from attachment points,
subsystems like sched_ext may call the same BPF logic from multiple
contexts (e.g., task-to-task wake-ups vs. interrupt-to-task wake-ups).
These helpers provide a reliable way for logic to branch based on
the current CPU execution state.

Implementing these as BPF-native inline helpers wrapping
get_preempt_count() allows the compiler and JIT to inline the logic. The
implementation accounts for differences in preempt_count layout between
standard and PREEMPT_RT kernels.

Signed-off-by: Changwoo Min <[email protected]>
---
 .../testing/selftests/bpf/bpf_experimental.h  | 38 +++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/tools/testing/selftests/bpf/bpf_experimental.h 
b/tools/testing/selftests/bpf/bpf_experimental.h
index 68a49b1f77ae..30abdc08366f 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -610,6 +610,8 @@ extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, 
const char *name__str,
 #define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
 #define NMI_MASK       (__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
 
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+
 extern bool CONFIG_PREEMPT_RT __kconfig __weak;
 #ifdef bpf_target_x86
 extern const int __preempt_count __ksym;
@@ -648,4 +650,40 @@ static inline int bpf_in_interrupt(void)
               (tsk->softirq_disable_cnt & SOFTIRQ_MASK);
 }
 
+static inline int bpf_in_nmi(void)
+{
+       return get_preempt_count() & NMI_MASK;
+}
+
+static inline int bpf_in_hardirq(void)
+{
+       return get_preempt_count() & HARDIRQ_MASK;
+}
+
+static inline int bpf_in_serving_softirq(void)
+{
+       struct task_struct___preempt_rt *tsk;
+       int pcnt;
+
+       pcnt = get_preempt_count();
+       if (!CONFIG_PREEMPT_RT)
+               return (pcnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET;
+
+       tsk = (void *) bpf_get_current_task_btf();
+       return (tsk->softirq_disable_cnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET;
+}
+
+static inline int bpf_in_task(void)
+{
+       struct task_struct___preempt_rt *tsk;
+       int pcnt;
+
+       pcnt = get_preempt_count();
+       if (!CONFIG_PREEMPT_RT)
+               return !(pcnt & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+       tsk = (void *) bpf_get_current_task_btf();
+       return !((pcnt & (NMI_MASK | HARDIRQ_MASK)) |
+                ((tsk->softirq_disable_cnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET));
+}
 #endif
-- 
2.52.0


Reply via email to