From: Peter Zijlstra (Intel) <pet...@infradead.org>

In order to make the next patch more readable, and to quantify the
actual effectiveness of this pass, start by removing it.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
---
 kernel/sched/fair.c | 30 ------------------------------
 1 file changed, 30 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0811e2fe4f19..12e08da90024 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6103,27 +6103,6 @@ static int select_idle_core(struct task_struct *p, 
struct sched_domain *sd, int
        return -1;
 }
 
-/*
- * Scan the local SMT mask for idle CPUs.
- */
-static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int 
target)
-{
-       int cpu;
-
-       if (!static_branch_likely(&sched_smt_present))
-               return -1;
-
-       for_each_cpu(cpu, cpu_smt_mask(target)) {
-               if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
-                   !cpumask_test_cpu(cpu, sched_domain_span(sd)))
-                       continue;
-               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
-                       return cpu;
-       }
-
-       return -1;
-}
-
 #else /* CONFIG_SCHED_SMT */
 
 #define sched_smt_weight       1
@@ -6133,11 +6112,6 @@ static inline int select_idle_core(struct task_struct 
*p, struct sched_domain *s
        return -1;
 }
 
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain 
*sd, int target)
-{
-       return -1;
-}
-
 #endif /* CONFIG_SCHED_SMT */
 
 #define sis_min_cores          2
@@ -6331,10 +6305,6 @@ static int select_idle_sibling(struct task_struct *p, 
int prev, int target)
        if ((unsigned)i < nr_cpumask_bits)
                return i;
 
-       i = select_idle_smt(p, sd, target);
-       if ((unsigned)i < nr_cpumask_bits)
-               return i;
-
        return target;
 }
 
-- 
2.26.2

Reply via email to