From: Su Zhidao <[email protected]> scx_bypass_depth is a global counter that will be moved into struct scx_sched when multi-scheduler support lands. Add a comment explaining why READ_ONCE/WRITE_ONCE are used despite bypass_lock serialization: modifications are serialized by the lock, but the value can be observed locklessly from softirq context (e.g., in scx_bypass_lb_timerfn()).
Signed-off-by: Su Zhidao <[email protected]> --- kernel/sched/ext.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index c269e489902c..b1e5a95682c1 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -33,6 +33,12 @@ static DEFINE_MUTEX(scx_enable_mutex); DEFINE_STATIC_KEY_FALSE(__scx_enabled); DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED); +/* + * Counts the number of active bypass requests. Protected by bypass_lock + * inside scx_bypass(), but read locklessly (e.g., from + * scx_bypass_lb_timerfn() in softirq context) using READ_ONCE(). Will + * be moved into struct scx_sched when multi-scheduler support lands. + */ static int scx_bypass_depth; static cpumask_var_t scx_bypass_lb_donee_cpumask; static cpumask_var_t scx_bypass_lb_resched_cpumask; -- 2.43.0

