[PATCH 3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500

2015-03-26 Thread Chenhui Zhao
Implemented CPU hotplug on e500mc, e5500 and e6500, and support
multiple threads mode and 64-bits mode.

For e6500 with two threads, if one thread is online, it can
enable/disable the other thread in the same core. If two threads of
one core are offline, the core will enter the PH20 state (a low power
state). When the core is up again, Thread0 is up first, and it will be
bound with the present booting cpu. This way, all CPUs can hotplug
separately.

Signed-off-by: Chenhui Zhao 
---
 arch/powerpc/Kconfig  |   2 +-
 arch/powerpc/include/asm/fsl_pm.h |   4 +
 arch/powerpc/include/asm/smp.h|   2 +
 arch/powerpc/kernel/head_64.S |  20 +++--
 arch/powerpc/kernel/smp.c |   5 ++
 arch/powerpc/platforms/85xx/smp.c | 182 +-
 arch/powerpc/sysdev/fsl_rcpm.c|  56 
 7 files changed, 220 insertions(+), 51 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 22b0940..9846c83 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -380,7 +380,7 @@ config SWIOTLB
 config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && (PPC_PSERIES || \
-   PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
+   PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
---help---
  Say Y here to be able to disable and re-enable individual
  CPUs at runtime on SMP machines.
diff --git a/arch/powerpc/include/asm/fsl_pm.h 
b/arch/powerpc/include/asm/fsl_pm.h
index bbe6089..579f495 100644
--- a/arch/powerpc/include/asm/fsl_pm.h
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -34,6 +34,10 @@ struct fsl_pm_ops {
void (*cpu_enter_state)(int cpu, int state);
/* exit the CPU from the specified state */
void (*cpu_exit_state)(int cpu, int state);
+   /* cpu up */
+   void (*cpu_up)(int cpu);
+   /* cpu die */
+   void (*cpu_die)(int cpu);
/* place the platform in the sleep state */
int (*plat_enter_sleep)(void);
/* freeze the time base */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d607df5..1e500ed 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -67,6 +67,7 @@ void generic_cpu_die(unsigned int cpu);
 void generic_set_cpu_dead(unsigned int cpu);
 void generic_set_cpu_up(unsigned int cpu);
 int generic_check_cpu_restart(unsigned int cpu);
+int generic_check_cpu_dead(unsigned int cpu);
 #endif
 
 #ifdef CONFIG_PPC64
@@ -198,6 +199,7 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+extern unsigned int __cur_boot_cpu;
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d48125d..ac89050 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -181,6 +181,10 @@ exception_marker:
 #endif
 
 #ifdef CONFIG_PPC_BOOK3E
+   .globl  __cur_boot_cpu
+__cur_boot_cpu:
+   .long  0x0
+   .align 3
 _GLOBAL(fsl_secondary_thread_init)
/* Enable branch prediction */
lis r3,BUCSR_INIT@h
@@ -189,16 +193,14 @@ _GLOBAL(fsl_secondary_thread_init)
isync
 
/*
-* Fix PIR to match the linear numbering in the device tree.
-*
-* On e6500, the reset value of PIR uses the low three bits for
-* the thread within a core, and the upper bits for the core
-* number.  There are two threads per core, so shift everything
-* but the low bit right by two bits so that the cpu numbering is
-* continuous.
+* The current thread has been in 64-bit mode,
+* see the value of TMRN_IMSR.
+* compute the address of __cur_boot_cpu
 */
-   mfspr   r3, SPRN_PIR
-   rlwimi  r3, r3, 30, 2, 30
+   bl  10f
+10:mflrr22
+   addir22,r22,(__cur_boot_cpu - 10b)
+   lwz r3,0(r22)
mtspr   SPRN_PIR, r3
 #endif
 
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ec9ec20..2cca27a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -454,6 +454,11 @@ int generic_check_cpu_restart(unsigned int cpu)
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 }
 
+int generic_check_cpu_dead(unsigned int cpu)
+{
+   return per_cpu(cpu_state, cpu) == CPU_DEAD;
+}
+
 static bool secondaries_inhibited(void)
 {
return kvm_hv_mode_active();
diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index fba474f..f51441b 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
  * Author: Andy Fleming 
  *Kumar Gala 
  *
- * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
+ * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc.
  *
  * This program 

[PATCH 3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500

2015-03-26 Thread Chenhui Zhao
Implemented CPU hotplug on e500mc, e5500 and e6500, and support
multiple threads mode and 64-bits mode.

For e6500 with two threads, if one thread is online, it can
enable/disable the other thread in the same core. If two threads of
one core are offline, the core will enter the PH20 state (a low power
state). When the core is up again, Thread0 is up first, and it will be
bound with the present booting cpu. This way, all CPUs can hotplug
separately.

Signed-off-by: Chenhui Zhao chenhui.z...@freescale.com
---
 arch/powerpc/Kconfig  |   2 +-
 arch/powerpc/include/asm/fsl_pm.h |   4 +
 arch/powerpc/include/asm/smp.h|   2 +
 arch/powerpc/kernel/head_64.S |  20 +++--
 arch/powerpc/kernel/smp.c |   5 ++
 arch/powerpc/platforms/85xx/smp.c | 182 +-
 arch/powerpc/sysdev/fsl_rcpm.c|  56 
 7 files changed, 220 insertions(+), 51 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 22b0940..9846c83 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -380,7 +380,7 @@ config SWIOTLB
 config HOTPLUG_CPU
bool Support for enabling/disabling CPUs
depends on SMP  (PPC_PSERIES || \
-   PPC_PMAC || PPC_POWERNV || (PPC_85xx  !PPC_E500MC))
+   PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
---help---
  Say Y here to be able to disable and re-enable individual
  CPUs at runtime on SMP machines.
diff --git a/arch/powerpc/include/asm/fsl_pm.h 
b/arch/powerpc/include/asm/fsl_pm.h
index bbe6089..579f495 100644
--- a/arch/powerpc/include/asm/fsl_pm.h
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -34,6 +34,10 @@ struct fsl_pm_ops {
void (*cpu_enter_state)(int cpu, int state);
/* exit the CPU from the specified state */
void (*cpu_exit_state)(int cpu, int state);
+   /* cpu up */
+   void (*cpu_up)(int cpu);
+   /* cpu die */
+   void (*cpu_die)(int cpu);
/* place the platform in the sleep state */
int (*plat_enter_sleep)(void);
/* freeze the time base */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d607df5..1e500ed 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -67,6 +67,7 @@ void generic_cpu_die(unsigned int cpu);
 void generic_set_cpu_dead(unsigned int cpu);
 void generic_set_cpu_up(unsigned int cpu);
 int generic_check_cpu_restart(unsigned int cpu);
+int generic_check_cpu_dead(unsigned int cpu);
 #endif
 
 #ifdef CONFIG_PPC64
@@ -198,6 +199,7 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+extern unsigned int __cur_boot_cpu;
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d48125d..ac89050 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -181,6 +181,10 @@ exception_marker:
 #endif
 
 #ifdef CONFIG_PPC_BOOK3E
+   .globl  __cur_boot_cpu
+__cur_boot_cpu:
+   .long  0x0
+   .align 3
 _GLOBAL(fsl_secondary_thread_init)
/* Enable branch prediction */
lis r3,BUCSR_INIT@h
@@ -189,16 +193,14 @@ _GLOBAL(fsl_secondary_thread_init)
isync
 
/*
-* Fix PIR to match the linear numbering in the device tree.
-*
-* On e6500, the reset value of PIR uses the low three bits for
-* the thread within a core, and the upper bits for the core
-* number.  There are two threads per core, so shift everything
-* but the low bit right by two bits so that the cpu numbering is
-* continuous.
+* The current thread has been in 64-bit mode,
+* see the value of TMRN_IMSR.
+* compute the address of __cur_boot_cpu
 */
-   mfspr   r3, SPRN_PIR
-   rlwimi  r3, r3, 30, 2, 30
+   bl  10f
+10:mflrr22
+   addir22,r22,(__cur_boot_cpu - 10b)
+   lwz r3,0(r22)
mtspr   SPRN_PIR, r3
 #endif
 
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ec9ec20..2cca27a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -454,6 +454,11 @@ int generic_check_cpu_restart(unsigned int cpu)
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 }
 
+int generic_check_cpu_dead(unsigned int cpu)
+{
+   return per_cpu(cpu_state, cpu) == CPU_DEAD;
+}
+
 static bool secondaries_inhibited(void)
 {
return kvm_hv_mode_active();
diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index fba474f..f51441b 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
  * Author: Andy Fleming aflem...@freescale.com
  *Kumar Gala ga...@kernel.crashing.org
  *
- * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
+ * Copyright 2006-2008,