Commit-ID:  c080b5a62379f0d26a5f3bc3eb80c93fdc888be4
Gitweb:     http://git.kernel.org/tip/c080b5a62379f0d26a5f3bc3eb80c93fdc888be4
Author:     Peter Zijlstra <[email protected]>
AuthorDate: Thu, 10 Mar 2016 12:54:14 +0100
Committer:  Thomas Gleixner <[email protected]>
CommitDate: Thu, 5 May 2016 13:17:53 +0200

sched/hotplug: Move sync_rcu to be with set_cpu_active(false)

The sync_rcu stuff is specificically for clearing bits in the active
mask, such that everybody will observe the bit cleared and will not
consider the cleared CPU for load-balancing etc.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>

---
 kernel/cpu.c        | 15 ---------------
 kernel/sched/core.c | 14 ++++++++++++++
 2 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/kernel/cpu.c b/kernel/cpu.c
index 15402b7..c134a35 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -703,21 +703,6 @@ static int takedown_cpu(unsigned int cpu)
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
        int err;
 
-       /*
-        * By now we've cleared cpu_active_mask, wait for all preempt-disabled
-        * and RCU users of this state to go away such that all new such users
-        * will observe it.
-        *
-        * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
-        * not imply sync_sched(), so wait for both.
-        *
-        * Do sync before park smpboot threads to take care the rcu boost case.
-        */
-       if (IS_ENABLED(CONFIG_PREEMPT))
-               synchronize_rcu_mult(call_rcu, call_rcu_sched);
-       else
-               synchronize_rcu();
-
        /* Park the smpboot threads */
        kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
        smpboot_park_threads(cpu);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 73bcd93..0a31078 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7112,6 +7112,20 @@ int sched_cpu_deactivate(unsigned int cpu)
        int ret;
 
        set_cpu_active(cpu, false);
+       /*
+        * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
+        * users of this state to go away such that all new such users will
+        * observe it.
+        *
+        * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
+        * not imply sync_sched(), so wait for both.
+        *
+        * Do sync before park smpboot threads to take care the rcu boost case.
+        */
+       if (IS_ENABLED(CONFIG_PREEMPT))
+               synchronize_rcu_mult(call_rcu, call_rcu_sched);
+       else
+               synchronize_rcu();
 
        if (!sched_smp_initialized)
                return 0;

Reply via email to