SIS_AVG_CPU was introduced as a means of avoiding a search when the
average search cost indicated that the search would likely fail. It was
a blunt instrument and disabled by commit 4c77b18cf8b7 ("sched/fair: Make
select_idle_cpu() more aggressive") and later replaced with a proportional
search depth by commit 1ad3aaf3fcd2 ("sched/core: Implement new approach
to scale select_idle_cpu()").

While there are corner cases where SIS_AVG_CPU is better, it has now been
disabled for almost three years. As the intent of SIS_PROP is to reduce
the time complexity of select_idle_cpu(), lets drop SIS_AVG_CPU and focus
on SIS_PROP as a throttling mechanism.

Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Reviewed-by: Vincent Guittot <vincent.guit...@linaro.org>
---
 kernel/sched/fair.c     | 20 +++++++++-----------
 kernel/sched/features.h |  1 -
 2 files changed, 9 insertions(+), 12 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 04a3ce20da67..9f5682aeda2e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6145,7 +6145,6 @@ static int select_idle_cpu(struct task_struct *p, struct 
sched_domain *sd, int t
 {
        struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
        struct sched_domain *this_sd;
-       u64 avg_cost, avg_idle;
        u64 time;
        int this = smp_processor_id();
        int cpu, nr = INT_MAX;
@@ -6154,18 +6153,17 @@ static int select_idle_cpu(struct task_struct *p, 
struct sched_domain *sd, int t
        if (!this_sd)
                return -1;
 
-       /*
-        * Due to large variance we need a large fuzz factor; hackbench in
-        * particularly is sensitive here.
-        */
-       avg_idle = this_rq()->avg_idle / 512;
-       avg_cost = this_sd->avg_scan_cost + 1;
+       if (sched_feat(SIS_PROP)) {
+               u64 avg_cost, avg_idle, span_avg;
 
-       if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
-               return -1;
+               /*
+                * Due to large variance we need a large fuzz factor;
+                * hackbench in particularly is sensitive here.
+                */
+               avg_idle = this_rq()->avg_idle / 512;
+               avg_cost = this_sd->avg_scan_cost + 1;
 
-       if (sched_feat(SIS_PROP)) {
-               u64 span_avg = sd->span_weight * avg_idle;
+               span_avg = sd->span_weight * avg_idle;
                if (span_avg > 4*avg_cost)
                        nr = div_u64(span_avg, avg_cost);
                else
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 68d369cba9e4..e875eabb6600 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -54,7 +54,6 @@ SCHED_FEAT(TTWU_QUEUE, true)
 /*
  * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
  */
-SCHED_FEAT(SIS_AVG_CPU, false)
 SCHED_FEAT(SIS_PROP, true)
 
 /*
-- 
2.26.2

Reply via email to