Asymmetric systems may not offer the same level of userspace ISA support
across all CPUs, meaning that some applications cannot be executed by
some CPUs. As a concrete example, upcoming arm64 big.LITTLE designs do
not feature support for 32-bit applications on both clusters.

Modify guarantee_online_cpus() to take task_cpu_possible_mask() into
account when trying to find a suitable set of online CPUs for a given
task. This will avoid passing an invalid mask to set_cpus_allowed_ptr()
during ->attach() and will subsequently allow the cpuset hierarchy to be
taken into account when forcefully overriding the affinity mask for a
task which requires migration to a compatible CPU.

Cc: Li Zefan <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Johannes Weiner <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
---
 include/linux/cpuset.h |  3 ++-
 kernel/cgroup/cpuset.c | 33 +++++++++++++++++++--------------
 2 files changed, 21 insertions(+), 15 deletions(-)

diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 04c20de66afc..414a8e694413 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -15,6 +15,7 @@
 #include <linux/cpumask.h>
 #include <linux/nodemask.h>
 #include <linux/mm.h>
+#include <linux/mmu_context.h>
 #include <linux/jump_label.h>
 
 #ifdef CONFIG_CPUSETS
@@ -184,7 +185,7 @@ static inline void cpuset_read_unlock(void) { }
 static inline void cpuset_cpus_allowed(struct task_struct *p,
                                       struct cpumask *mask)
 {
-       cpumask_copy(mask, cpu_possible_mask);
+       cpumask_copy(mask, task_cpu_possible_mask(p));
 }
 
 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index e970737c3ed2..d30febf1f69f 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -372,18 +372,26 @@ static inline bool is_in_v2_mode(void)
 }
 
 /*
- * Return in pmask the portion of a cpusets's cpus_allowed that
- * are online.  If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online cpus.
+ * Return in pmask the portion of a task's cpusets's cpus_allowed that
+ * are online and are capable of running the task.  If none are found,
+ * walk up the cpuset hierarchy until we find one that does have some
+ * appropriate cpus.
  *
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
  *
  * Call with callback_lock or cpuset_mutex held.
  */
-static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
+static void guarantee_online_cpus(struct task_struct *tsk,
+                                 struct cpumask *pmask)
 {
-       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
+       struct cpuset *cs = task_cs(tsk);
+       const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
+
+       if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
+               cpumask_copy(pmask, cpu_online_mask);
+
+       while (!cpumask_intersects(cs->effective_cpus, pmask)) {
                cs = parent_cs(cs);
                if (unlikely(!cs)) {
                        /*
@@ -393,11 +401,10 @@ static void guarantee_online_cpus(struct cpuset *cs, 
struct cpumask *pmask)
                         * cpuset's effective_cpus is on its way to be
                         * identical to cpu_online_mask.
                         */
-                       cpumask_copy(pmask, cpu_online_mask);
                        return;
                }
        }
-       cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
+       cpumask_and(pmask, pmask, cs->effective_cpus);
 }
 
 /*
@@ -2176,15 +2183,13 @@ static void cpuset_attach(struct cgroup_taskset *tset)
 
        percpu_down_write(&cpuset_rwsem);
 
-       /* prepare for attach */
-       if (cs == &top_cpuset)
-               cpumask_copy(cpus_attach, cpu_possible_mask);
-       else
-               guarantee_online_cpus(cs, cpus_attach);
-
        guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
 
        cgroup_taskset_for_each(task, css, tset) {
+               if (cs != &top_cpuset)
+                       guarantee_online_cpus(task, cpus_attach);
+               else
+                       cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
                /*
                 * can_attach beforehand should guarantee that this doesn't
                 * fail.  TODO: have a better way to handle failure here
@@ -3280,7 +3285,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct 
cpumask *pmask)
 
        spin_lock_irqsave(&callback_lock, flags);
        rcu_read_lock();
-       guarantee_online_cpus(task_cs(tsk), pmask);
+       guarantee_online_cpus(tsk, pmask);
        rcu_read_unlock();
        spin_unlock_irqrestore(&callback_lock, flags);
 }
-- 
2.29.2.576.ga3fc446d84-goog

Reply via email to