Implement push_task_to_cpu(), which moves the task received as argument
to the destination cpu's runqueue. It only does so if the CPU is within
the CPU allowed mask of the task, else it returns -EINVAL.

It does not change the CPU allowed mask, and can therefore be used
within applications which rely on owning the sched_setaffinity() state.

It does not pin the task to the destination CPU, which means that the
scheduler may choose to move the task away from that CPU before the
task executes. Code invoking push_task_to_cpu() must be prepared to
retry in that case.

Signed-off-by: Mathieu Desnoyers <[email protected]>
CC: "Paul E. McKenney" <[email protected]>
CC: Peter Zijlstra <[email protected]>
CC: Paul Turner <[email protected]>
CC: Thomas Gleixner <[email protected]>
CC: Andrew Hunter <[email protected]>
CC: Andy Lutomirski <[email protected]>
CC: Andi Kleen <[email protected]>
CC: Dave Watson <[email protected]>
CC: Chris Lameter <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: "H. Peter Anvin" <[email protected]>
CC: Ben Maurer <[email protected]>
CC: Steven Rostedt <[email protected]>
CC: Josh Triplett <[email protected]>
CC: Linus Torvalds <[email protected]>
CC: Andrew Morton <[email protected]>
CC: Russell King <[email protected]>
CC: Catalin Marinas <[email protected]>
CC: Will Deacon <[email protected]>
CC: Michael Kerrisk <[email protected]>
CC: Boqun Feng <[email protected]>
CC: [email protected]
---
 kernel/sched/core.c  | 37 +++++++++++++++++++++++++++++++++++++
 kernel/sched/sched.h |  9 +++++++++
 2 files changed, 46 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 317136421ac7..4bbe297574b5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1061,6 +1061,43 @@ void do_set_cpus_allowed(struct task_struct *p, const 
struct cpumask *new_mask)
                set_curr_task(rq, p);
 }
 
+int push_task_to_cpu(struct task_struct *p, unsigned int dest_cpu)
+{
+       struct rq_flags rf;
+       struct rq *rq;
+       int ret = 0;
+
+       rq = task_rq_lock(p, &rf);
+       update_rq_clock(rq);
+
+       if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (task_cpu(p) == dest_cpu)
+               goto out;
+
+       if (task_running(rq, p) || p->state == TASK_WAKING) {
+               struct migration_arg arg = { p, dest_cpu };
+               /* Need help from migration thread: drop lock and wait. */
+               task_rq_unlock(rq, p, &rf);
+               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+               tlb_migrate_finish(p->mm);
+               return 0;
+       } else if (task_on_rq_queued(p)) {
+               /*
+                * OK, since we're going to drop the lock immediately
+                * afterwards anyway.
+                */
+               rq = move_queued_task(rq, &rf, p, dest_cpu);
+       }
+out:
+       task_rq_unlock(rq, p, &rf);
+
+       return ret;
+}
+
 /*
  * Change a given task's CPU affinity. Migrate the thread to a
  * proper CPU and schedule it away if the CPU it's executing on
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b19552a212de..8d262d732d35 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1223,6 +1223,15 @@ static inline void __set_task_cpu(struct task_struct *p, 
unsigned int cpu)
 #endif
 }
 
+#ifdef CONFIG_SMP
+int push_task_to_cpu(struct task_struct *p, unsigned int dest_cpu);
+#else
+static inline int push_task_to_cpu(struct task_struct *p, unsigned int 
dest_cpu)
+{
+       return 0;
+}
+#endif
+
 /*
  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  */
-- 
2.11.0

Reply via email to