From: Frederic Weisbecker <[email protected]>

cpusets is going to use the NOCB (de-)offloading interface while
holding hotplug lock. Therefore pull out the responsibility of protecting
against concurrent CPU-hotplug changes to the callers of
rcu_nocb_cpumask_update().

Signed-off-by: Frederic Weisbecker <[email protected]>
Cc: Zefan Li <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Phil Auld <[email protected]>
Cc: Nicolas Saenz Julienne <[email protected]>
Cc: Marcelo Tosatti <[email protected]>
Cc: Paul Gortmaker <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Daniel Bristot de Oliveira <[email protected]>
Cc: Peter Zijlstra <[email protected]>
---
 kernel/rcu/rcutorture.c | 2 ++
 kernel/rcu/tree_nocb.h  | 4 ++--
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 228a5488eb5e..e935152346ff 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -2139,6 +2139,7 @@ static int rcu_nocb_toggle(void *arg)
        do {
                r = torture_random(&rand);
                cpu = (r >> 1) % (maxcpu + 1);
+               cpus_read_lock();
                if (r & 0x1) {
                        rcu_nocb_cpumask_update(cpumask_of(cpu), true);
                        atomic_long_inc(&n_nocb_offload);
@@ -2146,6 +2147,7 @@ static int rcu_nocb_toggle(void *arg)
                        rcu_nocb_cpumask_update(cpumask_of(cpu), false);
                        atomic_long_inc(&n_nocb_deoffload);
                }
+               cpus_read_unlock();
                toggle_delay = torture_random(&rand) % toggle_fuzz + 
toggle_interval;
                set_current_state(TASK_INTERRUPTIBLE);
                schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 60b0a15ed6e2..bbcf6f4152a3 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1301,12 +1301,13 @@ int rcu_nocb_cpumask_update(const struct cpumask 
*cpumask, bool offload)
        int err_cpu;
        cpumask_var_t saved_nocb_mask;
 
+       lockdep_assert_cpus_held();
+
        if (!alloc_cpumask_var(&saved_nocb_mask, GFP_KERNEL))
                return -ENOMEM;
 
        cpumask_copy(saved_nocb_mask, rcu_nocb_mask);
 
-       cpus_read_lock();
        mutex_lock(&rcu_state.barrier_mutex);
        for_each_cpu(cpu, cpumask) {
                if (offload) {
@@ -1340,7 +1341,6 @@ int rcu_nocb_cpumask_update(const struct cpumask 
*cpumask, bool offload)
        }
 
        mutex_unlock(&rcu_state.barrier_mutex);
-       cpus_read_unlock();
 
        free_cpumask_var(saved_nocb_mask);
 
-- 
2.39.3


Reply via email to