When trying to migrate to a CPU in task_numa_migrate(), we invoke task_numa_find_cpu() to choose a spot, in which function we skip the CPU which is not in cpus_allowed, but forgot to concern the isolated CPUs, and this may cause the task would run on the isolcpus.
This patch fixes this issue by checking the load_balance_mask. Signed-off-by: Yi Wang <[email protected]> Reviewed-by: Yi Liu <[email protected]> --- kernel/sched/fair.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 908c9cd..0fa0cee 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1709,6 +1709,7 @@ static void task_numa_compare(struct task_numa_env *env, rcu_read_unlock(); } +static int is_cpu_load_balance(int cpu); static void task_numa_find_cpu(struct task_numa_env *env, long taskimp, long groupimp) { @@ -1731,6 +1732,9 @@ static void task_numa_find_cpu(struct task_numa_env *env, if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) continue; + if (!is_cpu_load_balance(cpu)) + continue; + env->dst_cpu = cpu; task_numa_compare(env, taskimp, groupimp, maymove); } @@ -8528,6 +8532,12 @@ static int should_we_balance(struct lb_env *env) return balance_cpu == env->dst_cpu; } +static int is_cpu_load_balance(int cpu) +{ + struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); + + return cpumask_test_cpu(cpu, cpus); +} + /* * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. -- 1.8.3.1

