select_idle_sibling() must disregard cpus with cpu_power=1 to avoid using
cpus disabled by the power scheduler.

This is a quick fix. The algorithm should be updated to handle cpu_power=1
properly.

Signed-off-by: Morten Rasmussen <morten.rasmus...@arm.com>
CC: Ingo Molnar <mi...@kernel.org>
CC: Peter Zijlstra <pet...@infradead.org>
CC: Catalin Marinas <catalin.mari...@arm.com>
---
 kernel/sched/fair.c |    8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 01f1f26..f637ea5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3287,13 +3287,14 @@ static int select_idle_sibling(struct task_struct *p, 
int target)
        struct sched_group *sg;
        int i = task_cpu(p);
 
-       if (idle_cpu(target))
+       if (idle_cpu(target) && power_cpu_balance(target))
                return target;
 
        /*
         * If the prevous cpu is cache affine and idle, don't be stupid.
         */
-       if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
+       if (i != target && cpus_share_cache(i, target) && idle_cpu(i) &&
+               power_cpu_balance(i))
                return i;
 
        /*
@@ -3308,7 +3309,8 @@ static int select_idle_sibling(struct task_struct *p, int 
target)
                                goto next;
 
                        for_each_cpu(i, sched_group_cpus(sg)) {
-                               if (i == target || !idle_cpu(i))
+                               if (i == target || !idle_cpu(i) ||
+                                       !power_cpu_balance(i))
                                        goto next;
                        }
 
-- 
1.7.9.5


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to