From: Peter Zijlstra <pet...@infradead.org> Avoid pointless SCHED_SMT code when running on !SMT hardware.
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> --- kernel/sched/core.c | 19 +++++++++++++++++++ kernel/sched/fair.c | 8 +++++++- kernel/sched/idle_task.c | 2 -- kernel/sched/sched.h | 17 +++++++++++++++++ 4 files changed, 43 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e224581..b41059d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7276,6 +7276,22 @@ int sched_cpu_dying(unsigned int cpu) } #endif +#ifdef CONFIG_SCHED_SMT +DEFINE_STATIC_KEY_FALSE(sched_smt_present); + +static void sched_init_smt(void) +{ + /* + * We've enumerated all CPUs and will assume that if any CPU + * has SMT siblings, CPU0 will too. + */ + if (cpumask_weight(cpu_smt_mask(0)) > 1) + static_branch_enable(&sched_smt_present); +} +#else +static inline void sched_init_smt(void) { } +#endif + void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; @@ -7305,6 +7321,9 @@ void __init sched_init_smp(void) init_sched_rt_class(); init_sched_dl_class(); + + sched_init_smt(); + sched_smp_initialized = true; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8335ed5..d048203 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5264,7 +5264,7 @@ static inline bool test_idle_cores(int cpu, bool def) * Since SMT siblings share all cache levels, inspecting this limited remote * state should be fairly cheap. */ -void update_idle_core(struct rq *rq) +void __update_idle_core(struct rq *rq) { int core = cpu_of(rq); int cpu; @@ -5296,6 +5296,9 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); int core, cpu, wrap; + if (!static_branch_likely(&sched_smt_present)) + return -1; + if (!test_idle_cores(target, false)) return -1; @@ -5329,6 +5332,9 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t { int cpu; + if (!static_branch_likely(&sched_smt_present)) + return -1; + for_each_cpu(cpu, cpu_smt_mask(target)) { if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) continue; diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 5baf75c..73c39cb 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -23,8 +23,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl resched_curr(rq); } -extern void update_idle_core(struct rq *rq); - static struct task_struct * pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cdc63d9..df27200 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1810,3 +1810,20 @@ static inline void account_reset_rq(struct rq *rq) rq->prev_steal_time_rq = 0; #endif } + + +#ifdef CONFIG_SCHED_SMT + +extern struct static_key_false sched_smt_present; + +extern void __update_idle_core(struct rq *rq); + +static inline void update_idle_core(struct rq *rq) +{ + if (static_branch_unlikely(&sched_smt_present)) + __update_idle_core(rq); +} + +#else +static inline void update_idle_core(struct rq *rq) { } +#endif -- 1.7.9.5