The functions get_online_cpus() and put_online_cpus() have been
deprecated during the CPU hotplug rework. They map directly to
cpus_read_lock() and cpus_read_unlock().

Replace deprecated CPU-hotplug functions with the official version.
The behavior remains unchanged.

Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: Paul Mackerras <pau...@samba.org>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: kvm-...@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
---
 arch/powerpc/kernel/rtasd.c               |  4 ++--
 arch/powerpc/kvm/book3s_hv_builtin.c      | 10 +++++-----
 arch/powerpc/platforms/powernv/idle.c     |  4 ++--
 arch/powerpc/platforms/powernv/opal-imc.c |  8 ++++----
 4 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 8561dfb33f241..32ee17753eb4a 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -429,7 +429,7 @@ static void rtas_event_scan(struct work_struct *w)
 
        do_event_scan();
 
-       get_online_cpus();
+       cpus_read_lock();
 
        /* raw_ OK because just using CPU as starting point. */
        cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
@@ -451,7 +451,7 @@ static void rtas_event_scan(struct work_struct *w)
        schedule_delayed_work_on(cpu, &event_scan_work,
                __round_jiffies_relative(event_scan_delay, cpu));
 
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c 
b/arch/powerpc/kvm/book3s_hv_builtin.c
index be8ef1c5b1bfb..fcf4760a3a0ea 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -137,23 +137,23 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int 
target,
  * exist in the system. We use a counter of VMs to track this.
  *
  * One of the operations we need to block is onlining of secondaries, so we
- * protect hv_vm_count with get/put_online_cpus().
+ * protect hv_vm_count with cpus_read_lock/unlock().
  */
 static atomic_t hv_vm_count;
 
 void kvm_hv_vm_activated(void)
 {
-       get_online_cpus();
+       cpus_read_lock();
        atomic_inc(&hv_vm_count);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
 
 void kvm_hv_vm_deactivated(void)
 {
-       get_online_cpus();
+       cpus_read_lock();
        atomic_dec(&hv_vm_count);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
 
diff --git a/arch/powerpc/platforms/powernv/idle.c 
b/arch/powerpc/platforms/powernv/idle.c
index 528a7e0cf83aa..aa27689b832db 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -199,12 +199,12 @@ static ssize_t 
store_fastsleep_workaround_applyonce(struct device *dev,
         */
        power7_fastsleep_workaround_exit = false;
 
-       get_online_cpus();
+       cpus_read_lock();
        primary_thread_mask = cpu_online_cores_map();
        on_each_cpu_mask(&primary_thread_mask,
                                pnv_fastsleep_workaround_apply,
                                &err, 1);
-       put_online_cpus();
+       cpus_read_unlock();
        if (err) {
                pr_err("fastsleep_workaround_applyonce change failed while 
running pnv_fastsleep_workaround_apply");
                goto fail;
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c 
b/arch/powerpc/platforms/powernv/opal-imc.c
index 7824cc364bc40..ba02a75c14102 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -186,7 +186,7 @@ static void disable_nest_pmu_counters(void)
        int nid, cpu;
        const struct cpumask *l_cpumask;
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_node_with_cpus(nid) {
                l_cpumask = cpumask_of_node(nid);
                cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
@@ -195,7 +195,7 @@ static void disable_nest_pmu_counters(void)
                opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
                                       get_hard_smp_processor_id(cpu));
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static void disable_core_pmu_counters(void)
@@ -203,7 +203,7 @@ static void disable_core_pmu_counters(void)
        cpumask_t cores_map;
        int cpu, rc;
 
-       get_online_cpus();
+       cpus_read_lock();
        /* Disable the IMC Core functions */
        cores_map = cpu_online_cores_map();
        for_each_cpu(cpu, &cores_map) {
@@ -213,7 +213,7 @@ static void disable_core_pmu_counters(void)
                        pr_err("%s: Failed to stop Core (cpu = %d)\n",
                                __FUNCTION__, cpu);
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 int get_max_nest_dev(void)
-- 
2.32.0

Reply via email to