Put upper and lower limit on CPU search in select_idle_cpu. The lower limit
is set to amount of CPUs in a core  while upper limit is derived from the
latency-nice of the thread. This ensures for any architecture we will
usually search beyond a core. Changing the latency-nice value by user will
change the search cost making it appropriate for given workload.

Signed-off-by: subhra mazumdar <subhra.mazum...@oracle.com>
---
 kernel/sched/fair.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b08d00c..c31082d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6188,7 +6188,7 @@ static int select_idle_cpu(struct task_struct *p, struct 
sched_domain *sd, int t
        u64 avg_cost, avg_idle;
        u64 time, cost;
        s64 delta;
-       int cpu, nr = INT_MAX;
+       int cpu, floor, nr = INT_MAX;
 
        this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
        if (!this_sd)
@@ -6205,11 +6205,12 @@ static int select_idle_cpu(struct task_struct *p, 
struct sched_domain *sd, int t
                return -1;
 
        if (sched_feat(SIS_PROP)) {
-               u64 span_avg = sd->span_weight * avg_idle;
-               if (span_avg > 4*avg_cost)
-                       nr = div_u64(span_avg, avg_cost);
-               else
-                       nr = 4;
+               floor = cpumask_weight(topology_sibling_cpumask(target));
+               if (floor < 2)
+                       floor = 2;
+               nr = (p->latency_nice * sd->span_weight) / LATENCY_NICE_MAX;
+               if (nr < floor)
+                       nr = floor;
        }
 
        time = local_clock();
-- 
2.9.3

Reply via email to