This static key tells whether all CPUs share last level cache, which is
common in single-socket x86 boxes. This allows us to know better to fast
select an idle sibling in select_task_rq_fair().

Signed-off-by: Yuyang Du <yuyang...@intel.com>
---
 kernel/sched/core.c  |    9 +++++++++
 kernel/sched/sched.h |    4 ++++
 2 files changed, 13 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0a332ed..3f26fea 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7303,6 +7303,14 @@ static void sched_init_smt(void)
 static inline void sched_init_smt(void) { }
 #endif
 
+DEFINE_STATIC_KEY_TRUE(sched_llc_complete);
+
+static void sched_init_llc_complete(void)
+{
+       if (cpumask_weight(cpu_active_mask) > per_cpu(sd_llc_size, 0))
+               static_branch_disable(&sched_llc_complete);
+}
+
 void __init sched_init_smp(void)
 {
        cpumask_var_t non_isolated_cpus;
@@ -7334,6 +7342,7 @@ void __init sched_init_smp(void)
        init_sched_dl_class();
 
        sched_init_smt();
+       sched_init_llc_complete();
 
        sched_smp_initialized = true;
 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 654bc65..f11c5dd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1828,3 +1828,7 @@ static inline void update_idle_core(struct rq *rq)
 #else
 static inline void update_idle_core(struct rq *rq) { }
 #endif
+
+#ifdef CONFIG_SCHED_MC
+extern struct static_key_true sched_llc_complete;
+#endif
-- 
1.7.9.5

Reply via email to