[PATCH 05/23] powerpc/pmem: Include pmem prototypes

2020-12-20 Thread Cédric Le Goater
It fixes this W=1 compile error :

../arch/powerpc/lib/pmem.c:51:6: error: no previous prototype for 
‘arch_wb_cache_pmem’ [-Werror=missing-prototypes]
   51 | void arch_wb_cache_pmem(void *addr, size_t size)
  |  ^~
../arch/powerpc/lib/pmem.c:58:6: error: no previous prototype for 
‘arch_invalidate_pmem’ [-Werror=missing-prototypes]
   58 | void arch_invalidate_pmem(void *addr, size_t size)
  |  ^~~~

Fixes: 32ce3862af3c ("powerpc/lib: Implement PMEM API")
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/lib/pmem.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c
index 1550e0d2513a..eb2919ddf9b9 100644
--- a/arch/powerpc/lib/pmem.c
+++ b/arch/powerpc/lib/pmem.c
@@ -6,6 +6,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 
-- 
2.26.2



[PATCH 18/23] KVM: PPC: Make the VMX instruction emulation routines static

2020-12-20 Thread Cédric Le Goater
It fixes these W=1 compile errors :

../arch/powerpc/kvm/powerpc.c:1521:5: error: no previous prototype for 
‘kvmppc_get_vmx_dword’ [-Werror=missing-prototypes]
 1521 | int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
  | ^~~~
../arch/powerpc/kvm/powerpc.c:1539:5: error: no previous prototype for 
‘kvmppc_get_vmx_word’ [-Werror=missing-prototypes]
 1539 | int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
  | ^~~
../arch/powerpc/kvm/powerpc.c:1557:5: error: no previous prototype for 
‘kvmppc_get_vmx_hword’ [-Werror=missing-prototypes]
 1557 | int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
  | ^~~~
../arch/powerpc/kvm/powerpc.c:1575:5: error: no previous prototype for 
‘kvmppc_get_vmx_byte’ [-Werror=missing-prototypes]
 1575 | int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
  | ^~~

Cc: Paul Mackerras 
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/kvm/powerpc.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index cf52d26f49cd..25966ae3271e 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1518,7 +1518,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
return emulated;
 }
 
-int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
 {
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1536,7 +1536,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int 
index, u64 *val)
return result;
 }
 
-int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
 {
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1554,7 +1554,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, 
u64 *val)
return result;
 }
 
-int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
 {
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1572,7 +1572,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int 
index, u64 *val)
return result;
 }
 
-int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
 {
union kvmppc_one_reg reg;
int vmx_offset = 0;
-- 
2.26.2



[PATCH 02/23] powerpc/pseries/ras: Remove unused variable 'status'

2020-12-20 Thread Cédric Le Goater
The last use of 'status' was removed in 2012. Remove the variable to
fix this W=1 compile error.

../arch/powerpc/platforms/pseries/ras.c: In function ‘ras_epow_interrupt’:
../arch/powerpc/platforms/pseries/ras.c:318:6: error: variable ‘status’ set but 
not used [-Werror=unused-but-set-variable]
  318 |  int status;
  |  ^~

Fixes: 55fc0c561742 ("powerpc/pseries: Parse and handle EPOW interrupts")
Cc: Mahesh Salgaonkar 
Cc: Ganesh Goudar 
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/platforms/pseries/ras.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/platforms/pseries/ras.c 
b/arch/powerpc/platforms/pseries/ras.c
index 149cec2212e6..e27310fc1481 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -315,11 +315,10 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void 
*dev_id)
 /* Handle environmental and power warning (EPOW) interrupts. */
 static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
 {
-   int status;
int state;
int critical;
 
-   status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
+   rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
  );
 
if (state > 3)
@@ -329,7 +328,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
 
spin_lock(_log_buf_lock);
 
-   status = rtas_call(ras_check_exception_token, 6, 1, NULL,
+   rtas_call(ras_check_exception_token, 6, 1, NULL,
   RTAS_VECTOR_EXTERNAL_INTERRUPT,
   virq_to_hw(irq),
   RTAS_EPOW_WARNING,
-- 
2.26.2



[PATCH 06/23] powerpc/setup_64: Make some routines static

2020-12-20 Thread Cédric Le Goater
Fixes these W=1 errors :

../arch/powerpc/kernel/setup_64.c:261:13: error: no previous prototype for 
‘record_spr_defaults’ [-Werror=missing-prototypes]
  261 | void __init record_spr_defaults(void)
  | ^~~
../arch/powerpc/kernel/setup_64.c:1011:6: error: no previous prototype for 
‘entry_flush_enable’ [-Werror=missing-prototypes]
 1011 | void entry_flush_enable(bool enable)
  |  ^~
../arch/powerpc/kernel/setup_64.c:1023:6: error: no previous prototype for 
‘uaccess_flush_enable’ [-Werror=missing-prototypes]
 1023 | void uaccess_flush_enable(bool enable)
  |  ^~~~

Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/kernel/setup_64.c | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index c28e949cc222..560ed8b975e7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -67,6 +67,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "setup.h"
 
@@ -258,7 +259,7 @@ static void cpu_ready_for_interrupts(void)
 
 unsigned long spr_default_dscr = 0;
 
-void __init record_spr_defaults(void)
+static void __init record_spr_defaults(void)
 {
if (early_cpu_has_feature(CPU_FTR_DSCR))
spr_default_dscr = mfspr(SPRN_DSCR);
@@ -1008,7 +1009,7 @@ void rfi_flush_enable(bool enable)
rfi_flush = enable;
 }
 
-void entry_flush_enable(bool enable)
+static void entry_flush_enable(bool enable)
 {
if (enable) {
do_entry_flush_fixups(enabled_flush_types);
@@ -1020,7 +1021,7 @@ void entry_flush_enable(bool enable)
entry_flush = enable;
 }
 
-void uaccess_flush_enable(bool enable)
+static void uaccess_flush_enable(bool enable)
 {
if (enable) {
do_uaccess_flush_fixups(enabled_flush_types);
-- 
2.26.2



[PATCH 01/23] powerpc/mm: Include __find_linux_pte() prototype

2020-12-20 Thread Cédric Le Goater
It fixes this W=1 compile error :

../arch/powerpc/mm/pgtable.c:337:8: error: no previous prototype for 
‘__find_linux_pte’ [-Werror=missing-prototypes]
  337 | pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
  |^~~~

Cc: "Aneesh Kumar K.V" 
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/mm/pgtable.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 1c95cebc..3a41545e5c07 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static inline int is_exec_fault(void)
 {
-- 
2.26.2



[PATCH 13/23] powerpc/mm: Move hpte_insert_repeating() prototype

2020-12-20 Thread Cédric Le Goater
It fixes this W=1 compile error :

../arch/powerpc/mm/book3s64/hash_utils.c:1867:6: error: no previous prototype 
for ‘hpte_insert_repeating’ [-Werror=missing-prototypes]
 1867 | long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
  |  ^

Cc: "Aneesh Kumar K.V" 
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/include/asm/book3s/64/mmu-hash.h | 5 +
 arch/powerpc/mm/book3s64/hash_hugetlbpage.c   | 4 
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h 
b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index a94fd4e0c182..76ff95950309 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -454,6 +454,11 @@ static inline unsigned long hpt_hash(unsigned long vpn,
 #define HPTE_NOHPTE_UPDATE 0x2
 #define HPTE_USE_KERNEL_KEY0x4
 
+extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
+ unsigned long pa, unsigned long rlags,
+ unsigned long vflags, int psize, int ssize);
+
+
 extern int __hash_page_4K(unsigned long ea, unsigned long access,
  unsigned long vsid, pte_t *ptep, unsigned long trap,
  unsigned long flags, int ssize, int subpage_prot);
diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c 
b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
index b5e9fff8c217..a688e1324ae5 100644
--- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
@@ -16,10 +16,6 @@
 unsigned int hpage_shift;
 EXPORT_SYMBOL(hpage_shift);
 
-extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
- unsigned long pa, unsigned long rlags,
- unsigned long vflags, int psize, int ssize);
-
 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long 
vsid,
 pte_t *ptep, unsigned long trap, unsigned long flags,
 int ssize, unsigned int shift, unsigned int mmu_psize)
-- 
2.26.2



[PATCH 12/23] powerpc/mm: Declare some prototypes

2020-12-20 Thread Cédric Le Goater
It fixes this W=1 compile error :

../arch/powerpc/mm/book3s64/hash_utils.c:1515:5: error: no previous prototype 
for ‘__hash_page’ [-Werror=missing-prototypes]
 1515 | int __hash_page(unsigned long trap, unsigned long ea, unsigned long 
dsisr,
  | ^~~
../arch/powerpc/mm/book3s64/hash_utils.c:1850:6: error: no previous prototype 
for ‘low_hash_fault’ [-Werror=missing-prototypes]
 1850 | void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
  |  ^~

Cc: "Aneesh Kumar K.V" 
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/include/asm/book3s/64/mmu-hash.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h 
b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 066b1d34c7bc..a94fd4e0c182 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -467,6 +467,9 @@ extern int hash_page_mm(struct mm_struct *mm, unsigned long 
ea,
unsigned long flags);
 extern int hash_page(unsigned long ea, unsigned long access, unsigned long 
trap,
 unsigned long dsisr);
+void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc);
+int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr,
+   unsigned long msr);
 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long 
vsid,
 pte_t *ptep, unsigned long trap, unsigned long flags,
 int ssize, unsigned int shift, unsigned int mmu_psize);
-- 
2.26.2



[PATCH 08/23] powerpc/smp: Include tick_broadcast() prototype

2020-12-20 Thread Cédric Le Goater
It fixes this W=1 compile error :

../arch/powerpc/kernel/smp.c:569:6: error: no previous prototype for 
‘tick_broadcast’ [-Werror=missing-prototypes]
  569 | void tick_broadcast(const struct cpumask *mask)
  |  ^~

Cc: "Gautham R. Shenoy" 
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/kernel/smp.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 2b9b1bb4c5f2..a0d094d3797c 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -34,6 +34,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
-- 
2.26.2



[PATCH 00/23] powerpc: Fix W=1 compile errors

2020-12-20 Thread Cédric Le Goater
Hello,

Here is an assorted collection of fixes for W=1.

Cheers,

C. 

Cédric Le Goater (23):
  powerpc/mm: Include __find_linux_pte() prototype
  powerpc/pseries/ras: Remove unused variable 'status'
  powerpc/pseries/eeh: Make pseries_pcibios_bus_add_device() static
  powerpc/pseries/ras: Make init_ras_hotplug_IRQ() static
  powerpc/pmem: Include pmem prototypes
  powerpc/setup_64: Make some routines static
  powerpc/mce: Include prototypes
  powerpc/smp: Include tick_broadcast() prototype
  powerpc/smp: Make debugger_ipi_callback() static
  powerpc/optprobes: Remove unused routine patch_imm32_load_insns()
  powerpc/optprobes: Make patch_imm64_load_insns() static
  powerpc/mm: Declare some prototypes
  powerpc/mm: Move hpte_insert_repeating() prototype
  powerpc/mm: Declare preload_new_slb_context() prototype
  powerpc/mm/hugetlb: Make pseries_alloc_bootmem_huge_page() static
  powerpc/mm: Declare arch_report_meminfo() prototype.
  powerpc/watchdog: Declare soft_nmi_interrupt() prototype
  KVM: PPC: Make the VMX instruction emulation routines static
  KVM: PPC: Book3S HV: Include prototypes
  KVM: PPC: Book3S HV: Declare some prototypes
  powerpc/pseries: Make IOV setup routines static
  powerpc/pcidn: Make IOV setup routines static
  powerpc/pseries/eeh: Make pseries_send_allow_unfreeze() static

 arch/powerpc/include/asm/asm-prototypes.h |  1 +
 arch/powerpc/include/asm/book3s/64/mmu-hash.h |  9 
 arch/powerpc/include/asm/kvm_book3s.h |  7 +++
 arch/powerpc/include/asm/pgtable.h|  3 +++
 arch/powerpc/kernel/mce.c |  1 +
 arch/powerpc/kernel/optprobes.c   | 21 +--
 arch/powerpc/kernel/setup_64.c|  7 ---
 arch/powerpc/kernel/smp.c |  3 ++-
 arch/powerpc/kernel/watchdog.c|  1 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c   |  1 +
 arch/powerpc/kvm/powerpc.c|  8 +++
 arch/powerpc/lib/pmem.c   |  1 +
 arch/powerpc/mm/book3s64/hash_hugetlbpage.c   |  4 
 arch/powerpc/mm/hugetlbpage.c |  2 +-
 arch/powerpc/mm/pgtable.c |  1 +
 arch/powerpc/platforms/pseries/eeh_pseries.c  |  6 +++---
 arch/powerpc/platforms/pseries/pci.c  | 16 +++---
 arch/powerpc/platforms/pseries/ras.c  |  7 +++
 arch/powerpc/platforms/pseries/setup.c|  8 +++
 19 files changed, 55 insertions(+), 52 deletions(-)

-- 
2.26.2



[PATCH 03/23] powerpc/pseries/eeh: Make pseries_pcibios_bus_add_device() static

2020-12-20 Thread Cédric Le Goater
It fixes this W=1 compile error:

../arch/powerpc/platforms/pseries/eeh_pseries.c:46:6: error: no previous 
prototype for ‘pseries_pcibios_bus_add_device’ [-Werror=missing-prototypes]
   46 | void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
  |  ^~

Fixes: dae7253f9f78 ("powerpc/pseries: Add pseries SR-IOV Machine dependent 
calls")
Cc: Alexey Kardashevskiy 
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/platforms/pseries/eeh_pseries.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c 
b/arch/powerpc/platforms/pseries/eeh_pseries.c
index cf024fa37bda..de45ceb634f9 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -43,7 +43,7 @@ static int ibm_get_config_addr_info;
 static int ibm_get_config_addr_info2;
 static int ibm_configure_pe;
 
-void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
+static void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
 {
struct pci_dn *pdn = pci_get_pdn(pdev);
 
-- 
2.26.2



[PATCH] powerpc/smp: Add __init to init_big_cores()

2020-12-20 Thread Cédric Le Goater
It fixes this link warning:

WARNING: modpost: vmlinux.o(.text.unlikely+0x2d98): Section mismatch in 
reference from the function init_big_cores.isra.0() to the function 
.init.text:init_thread_group_cache_map()
The function init_big_cores.isra.0() references
the function __init init_thread_group_cache_map().
This is often because init_big_cores.isra.0 lacks a __init
annotation or the annotation of init_thread_group_cache_map is wrong.

Fixes: 425752c63b6f ("powerpc: Detect the presence of big-cores via "ibm, 
thread-groups"")
Cc: Gautham R. Shenoy 
Signed-off-by: Cédric Le Goater 
---
 arch/powerpc/kernel/smp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 378328b402f0..5a4d59a1070d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -991,7 +991,7 @@ static struct sched_domain_topology_level 
powerpc_topology[] = {
{ NULL, },
 };
 
-static int init_big_cores(void)
+static int __init init_big_cores(void)
 {
int cpu;
 
-- 
2.26.2



unsubscribe

2020-12-20 Thread Shawn Landden



-- 
Shawn Landden



[PATCH] powerpc/32: Fix vmap stack - Properly set r1 before activating MMU on syscall too

2020-12-20 Thread Christophe Leroy
We need r1 to be properly set before activating MMU, otherwise any new
exception taken while saving registers into the stack in syscall
prologs will use the user stack, which is wrong and will even lockup
or crash when KUAP is selected.

Do that by switching the meaning of r11 and r1 until we have saved r1
to the stack: copy r1 into r11 and setup the new stack pointer in r1.
To avoid complicating and impacting all generic and specific prolog
code (and more), copy back r1 into r11 once r11 is save onto
the stack.

We could get rid of copying r1 back and forth at the cost of rewriting
everything to use r1 instead of r11 all the way when CONFIG_VMAP_STACK
is set, but the effort is probably not worth it for now.

Fixes: da7bb43ab9da ("powerpc/32: Fix vmap stack - Properly set r1 before 
activating MMU")
Cc: sta...@vger.kernel.org
Signed-off-by: Christophe Leroy 
---
 arch/powerpc/kernel/head_32.h | 25 -
 1 file changed, 16 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 541664d95702..a2f72c966baf 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -121,18 +121,28 @@
 #ifdef CONFIG_VMAP_STACK
mfspr   r11, SPRN_SRR0
mtctr   r11
-#endif
andi.   r11, r9, MSR_PR
-   lwz r11,TASK_STACK-THREAD(r12)
+   mr  r11, r1
+   lwz r1,TASK_STACK-THREAD(r12)
beq-99f
-   addir11, r11, THREAD_SIZE - INT_FRAME_SIZE
-#ifdef CONFIG_VMAP_STACK
+   addir1, r1, THREAD_SIZE - INT_FRAME_SIZE
li  r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
mtmsr   r10
isync
+   tovirt(r12, r12)
+   stw r11,GPR1(r1)
+   stw r11,0(r1)
+   mr  r11, r1
+#else
+   andi.   r11, r9, MSR_PR
+   lwz r11,TASK_STACK-THREAD(r12)
+   beq-99f
+   addir11, r11, THREAD_SIZE - INT_FRAME_SIZE
+   tophys(r11, r11)
+   stw r1,GPR1(r11)
+   stw r1,0(r11)
+   tovirt(r1, r11) /* set new kernel sp */
 #endif
-   tovirt_vmstack r12, r12
-   tophys_novmstack r11, r11
mflrr10
stw r10, _LINK(r11)
 #ifdef CONFIG_VMAP_STACK
@@ -140,9 +150,6 @@
 #else
mfspr   r10,SPRN_SRR0
 #endif
-   stw r1,GPR1(r11)
-   stw r1,0(r11)
-   tovirt_novmstack r1, r11/* set new kernel sp */
stw r10,_NIP(r11)
mfcrr10
rlwinm  r10,r10,0,4,2   /* Clear SO bit in CR */
-- 
2.25.0



Re: [PATCH] powerpc: always enable queued spinlocks for 64s, disable for others

2020-12-20 Thread Christophe Leroy




Le 21/12/2020 à 04:22, Nicholas Piggin a écrit :

Queued spinlocks have shown to have good performance and fairness
properties even on smaller (2 socket) POWER systems. This selects
them automatically for 64s. For other platforms they are de-selected,
the standard spinlock is far simpler and smaller code, and single
chips with a handful of cores is unlikely to show any improvement.

CONFIG_EXPERT still allows this to be changed, e.g., to help debug
performance or correctness issues.

Signed-off-by: Nicholas Piggin 
---
  arch/powerpc/Kconfig | 8 +++-
  1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ae7391627054..1f9f9e64d638 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -255,6 +255,7 @@ config PPC
select PCI_MSI_ARCH_FALLBACKS   if PCI_MSI
select PCI_SYSCALL  if PCI
select PPC_DAWR if PPC64
+   select PPC_QUEUED_SPINLOCKS if !EXPERT && PPC_BOOK3S_64 && 
SMP


The condition is a bit complicated, and it doesn't set it to Y by default when 
EXPERT is selected.


select RTC_LIB
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
@@ -506,16 +507,13 @@ config HOTPLUG_CPU
  config PPC_QUEUED_SPINLOCKS
bool "Queued spinlocks"
depends on SMP
+   depends on EXPERT || PPC_BOOK3S_64
+


I would do:

   config PPC_QUEUED_SPINLOCKS
bool "Queued spinlocks" if EXPERT
depends on SMP
default PPC_BOOK3S_64




help
  Say Y here to use queued spinlocks which give better scalability and
  fairness on large SMP and NUMA systems without harming single threaded
  performance.
  
-	  This option is currently experimental, the code is more complex and

- less tested so it defaults to "N" for the moment.
-
- If unsure, say "N".
-
  config ARCH_CPU_PROBE_RELEASE
def_bool y
depends on HOTPLUG_CPU



Christophe


Re: [PATCH 01/23] kernel: irq: irqdescs: warn on spurious IRQ

2020-12-20 Thread Michael Ellerman
Andy Shevchenko  writes:
> On Fri, Dec 18, 2020 at 4:37 PM Enrico Weigelt, metux IT consult
>  wrote:
>
>> +   if (printk_ratelimit())
>> +   pr_warn("spurious IRQ: irq=%d hwirq=%d nr_irqs=%d\n",
>> +   irq, hwirq, nr_irqs);
>
> Perhaps you missed pr_warn_ratelimit() macro which is already in the
> kernel for a long time.

pr_warn_ratelimited() which calls printk_ratelimited().

And see the comment above printk_ratelimit():

/*
 * Please don't use printk_ratelimit(), because it shares ratelimiting state
 * with all other unrelated printk_ratelimit() callsites.  Instead use
 * printk_ratelimited() or plain old __ratelimit().
 */


cheers


Re: [PATCH v2] powerpc/perf/hv-24x7: Dont create sysfs event files for dummy events

2020-12-20 Thread Michael Ellerman
Kajol Jain  writes:
> hv_24x7 performance monitoring unit creates list of supported events
> from the event catalog obtained via HCALL. hv_24x7 catalog could also
> contain invalid or dummy events (with names like FREE_* or CPM_FREE_*
> and RESERVED*). These events do not have any hardware counters
> backing them. So patch adds a check to string compare the event names
> to filter out them.
>
> Result in power9 machine:
>
> Before this patch:
> .
>   hv_24x7/PM_XLINK2_OUT_ODD_CYC,chip=?/  [Kernel PMU event]
>   hv_24x7/PM_XLINK2_OUT_ODD_DATA_COUNT,chip=?/   [Kernel PMU event]
>   hv_24x7/PM_XLINK2_OUT_ODD_TOTAL_UTIL,chip=?/   [Kernel PMU event]
>   hv_24x7/PM_XTS_ATR_DEMAND_CHECKOUT,chip=?/ [Kernel PMU event]
>   hv_24x7/PM_XTS_ATR_DEMAND_CHECKOUT_MISS,chip=?/[Kernel PMU event]
>   hv_24x7/PM_XTS_ATSD_SENT,chip=?/   [Kernel PMU event]
>   hv_24x7/PM_XTS_ATSD_TLBI_RCV,chip=?/   [Kernel PMU event]
>   hv_24x7/RESERVED_NEST1,chip=?/ [Kernel PMU event]
>   hv_24x7/RESERVED_NEST10,chip=?/[Kernel PMU event]
>   hv_24x7/RESERVED_NEST11,chip=?/[Kernel PMU event]
>   hv_24x7/RESERVED_NEST12,chip=?/[Kernel PMU event]
>   hv_24x7/RESERVED_NEST13,chip=?/[Kernel PMU event]
> ..
>
> Dmesg:
> [0.000362] printk: console [hvc0] enabled
> [0.815452] hv-24x7: read 1530 catalog entries, created 537 event attrs
> (0 failures), 275 descs
>
> After this patch:
> ..
>   hv_24x7/PM_XLINK2_OUT_ODD_AVLBL_CYC,chip=?/[Kernel PMU event]
>   hv_24x7/PM_XLINK2_OUT_ODD_CYC,chip=?/  [Kernel PMU event]
>   hv_24x7/PM_XLINK2_OUT_ODD_DATA_COUNT,chip=?/   [Kernel PMU event]
>   hv_24x7/PM_XLINK2_OUT_ODD_TOTAL_UTIL,chip=?/   [Kernel PMU event]
>   hv_24x7/PM_XTS_ATR_DEMAND_CHECKOUT,chip=?/ [Kernel PMU event]
>   hv_24x7/PM_XTS_ATR_DEMAND_CHECKOUT_MISS,chip=?/[Kernel PMU event]
>   hv_24x7/PM_XTS_ATSD_SENT,chip=?/   [Kernel PMU event]
>   hv_24x7/PM_XTS_ATSD_TLBI_RCV,chip=?/   [Kernel PMU event]
>   hv_24x7/TOD,chip=?/[Kernel PMU event]
> ..
>
> Demsg:
> [0.000357] printk: console [hvc0] enabled
> [0.808592] hv-24x7: read 1530 catalog entries, created 509 event attrs
> (0 failures), 275 descs
>
> Signed-off-by: Kajol Jain 
> ---
>  arch/powerpc/perf/hv-24x7.c | 17 +
>  1 file changed, 17 insertions(+)
>
> ---
> Changelog
> v1 -> v2
> - Include "RESERVED*" as part of the invalid event check as
>   suggested by Madhavan Srinivasan
> - Add new helper function "ignore_event" to check invalid/dummy
>   events as suggested by Michael Ellerman
> - Remove pr_info to print each invalid event as suggested by
>   Michael Ellerman
> ---
> diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
> index 6e7e820508df..1a6004d88f98 100644
> --- a/arch/powerpc/perf/hv-24x7.c
> +++ b/arch/powerpc/perf/hv-24x7.c
> @@ -764,6 +764,16 @@ static ssize_t catalog_event_len_validate(struct 
> hv_24x7_event_data *event,
>   return ev_len;
>  }
>  
> +/*
> + * Return true incase of invalid or dummy events with names like FREE_* or 
> CPM_FREE_*
> + * and RESERVED*
> + */
> +static bool ignore_event(const char *name)
> +{
> + return (strstr(name, "FREE_") || !strncmp(name, "RESERVED", 8)) ?
> + true : false;

That's FREE_ anywhere in the string, which seems a bit loose.

Do we have any documentation or anything that tells us that any event
with "FREE_" in the name will always be invalid?

cheers


[PATCH] powerpc:Don't print raw EIP/LR hex values in dump_stack() and show_regs()

2020-12-20 Thread Xiaoming Ni
Since the commit 2b0e86cc5de6 ("powerpc/fsl_booke/32: implement KASLR
infrastructure"), the powerpc system is ready to support KASLR.
To reduces the risk of invalidating address randomization, don't print the
EIP/LR hex values in dump_stack() and show_regs().

This patch follows x86 and arm64's lead:
commit a25ffd3a6302a6 ("arm64: traps: Don't print stack or raw
 PC/LR values in backtraces")
commit bb5e5ce545f203 ("x86/dumpstack: Remove kernel text
 addresses from stack dump")

Signed-off-by: Xiaoming Ni 
---
 arch/powerpc/kernel/process.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a66f435dabbf..913cf1ea702e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1455,8 +1455,8 @@ static void __show_regs(struct pt_regs *regs)
 {
int i, trap;
 
-   printk("NIP:  "REG" LR: "REG" CTR: "REG"\n",
-  regs->nip, regs->link, regs->ctr);
+   printk("NIP: %pS LR: %pS CTR: "REG"\n",
+  (void *)regs->nip, (void *)regs->link, regs->ctr);
printk("REGS: %px TRAP: %04lx   %s  (%s)\n",
   regs, regs->trap, print_tainted(), init_utsname()->release);
printk("MSR:  "REG" ", regs->msr);
@@ -1493,8 +1493,8 @@ static void __show_regs(struct pt_regs *regs)
 * above info out without failing
 */
if (IS_ENABLED(CONFIG_KALLSYMS)) {
-   printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
-   printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
+   printk("NIP %pS\n", (void *)regs->nip);
+   printk("LR %pS\n", (void *)regs->link);
}
 }
 
@@ -2160,8 +2160,8 @@ void show_stack(struct task_struct *tsk, unsigned long 
*stack,
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
-   printk("%s["REG"] ["REG"] %pS",
-   loglvl, sp, ip, (void *)ip);
+   printk("%s ["REG"] %pS",
+   loglvl, sp, (void *)ip);
ret_addr = ftrace_graph_ret_addr(current,
_idx, ip, stack);
if (ret_addr != ip)
-- 
2.27.0



[PATCH] powerpc: always enable queued spinlocks for 64s, disable for others

2020-12-20 Thread Nicholas Piggin
Queued spinlocks have shown to have good performance and fairness
properties even on smaller (2 socket) POWER systems. This selects
them automatically for 64s. For other platforms they are de-selected,
the standard spinlock is far simpler and smaller code, and single
chips with a handful of cores is unlikely to show any improvement.

CONFIG_EXPERT still allows this to be changed, e.g., to help debug
performance or correctness issues.

Signed-off-by: Nicholas Piggin 
---
 arch/powerpc/Kconfig | 8 +++-
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ae7391627054..1f9f9e64d638 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -255,6 +255,7 @@ config PPC
select PCI_MSI_ARCH_FALLBACKS   if PCI_MSI
select PCI_SYSCALL  if PCI
select PPC_DAWR if PPC64
+   select PPC_QUEUED_SPINLOCKS if !EXPERT && PPC_BOOK3S_64 && 
SMP
select RTC_LIB
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
@@ -506,16 +507,13 @@ config HOTPLUG_CPU
 config PPC_QUEUED_SPINLOCKS
bool "Queued spinlocks"
depends on SMP
+   depends on EXPERT || PPC_BOOK3S_64
+
help
  Say Y here to use queued spinlocks which give better scalability and
  fairness on large SMP and NUMA systems without harming single threaded
  performance.
 
- This option is currently experimental, the code is more complex and
- less tested so it defaults to "N" for the moment.
-
- If unsure, say "N".
-
 config ARCH_CPU_PROBE_RELEASE
def_bool y
depends on HOTPLUG_CPU
-- 
2.23.0



[PATCH] powerpc/time: Force inlining of get_tb()

2020-12-20 Thread Christophe Leroy
Force inlining of get_tb() in order to avoid getting
following function in vdso32, leading to suboptimal
performance in clock_gettime()

0688 <.get_tb>:
 688:   7c 6d 42 a6 mftbu   r3
 68c:   7c 8c 42 a6 mftbr4
 690:   7d 2d 42 a6 mftbu   r9
 694:   7c 03 48 40 cmplw   r3,r9
 698:   40 e2 ff f0 bne+688 <.get_tb>
 69c:   4e 80 00 20 blr

Signed-off-by: Christophe Leroy 
---
 arch/powerpc/include/asm/vdso/timebase.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/vdso/timebase.h 
b/arch/powerpc/include/asm/vdso/timebase.h
index b558b07959ce..881f655caa0a 100644
--- a/arch/powerpc/include/asm/vdso/timebase.h
+++ b/arch/powerpc/include/asm/vdso/timebase.h
@@ -49,7 +49,7 @@ static inline unsigned long get_tbl(void)
return mftb();
 }
 
-static inline u64 get_tb(void)
+static __always_inline u64 get_tb(void)
 {
unsigned int tbhi, tblo, tbhi2;
 
-- 
2.25.0



Re: [RFC PATCH 3/5] powerpc/64s: add CONFIG_PPC_NMMU for nest MMU support

2020-12-20 Thread Christophe Leroy




Le 20/12/2020 à 00:48, Nicholas Piggin a écrit :

This allows some nest MMU features to be compiled away if coprocessor
support is not selected.

Signed-off-by: Nicholas Piggin 
---
  arch/powerpc/Kconfig  | 1 +
  arch/powerpc/include/asm/book3s/64/mmu.h  | 2 ++
  arch/powerpc/include/asm/book3s/64/tlbflush.h | 2 ++
  arch/powerpc/include/asm/mmu_context.h| 5 +++--
  arch/powerpc/platforms/Kconfig| 3 +++
  arch/powerpc/platforms/powernv/Kconfig| 1 +
  6 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ae7391627054..4376bf4c53b4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -888,6 +888,7 @@ config PPC_PROT_SAO_LPAR
  
  config PPC_COPRO_BASE

bool
+   select PPC_NMMU if PPC_BOOK3S_64
  
  config SCHED_SMT

bool "SMT (Hyperthreading) scheduler support"
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
b/arch/powerpc/include/asm/book3s/64/mmu.h
index 995bbcdd0ef8..07850d68a624 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -103,8 +103,10 @@ typedef struct {
/* Number of bits in the mm_cpumask */
atomic_t active_cpus;
  
+#ifdef CONFIG_PPC_NMMU

/* Number of users of the external (Nest) MMU */
atomic_t copros;
+#endif
  
  	/* Number of user space windows opened in process mm_context */

atomic_t vas_windows;
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h 
b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 0a7431e954c6..c70a82851f78 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -16,8 +16,10 @@ enum {
  
  static inline bool mm_has_nmmu(struct mm_struct *mm)

  {
+#ifdef CONFIG_PPC_NMMU
if (unlikely(atomic_read(>context.copros) > 0))
return true;
+#endif
return false;
  }
  
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h

index d5821834dba9..53eac0cc4929 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -143,6 +143,7 @@ static inline void dec_mm_active_cpus(struct mm_struct *mm)
atomic_dec(>context.active_cpus);
  }
  
+#ifdef CONFIG_PPC_NMMU

  static inline void mm_context_add_copro(struct mm_struct *mm)
  {
/*
@@ -187,6 +188,7 @@ static inline void mm_context_remove_copro(struct mm_struct 
*mm)
dec_mm_active_cpus(mm);
}
  }
+#endif
  
  /*

   * vas_windows counter shows number of open windows in the mm
@@ -218,8 +220,7 @@ static inline void mm_context_remove_vas_window(struct 
mm_struct *mm)
  #else
  static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
  static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
-static inline void mm_context_add_copro(struct mm_struct *mm) { }
-static inline void mm_context_remove_copro(struct mm_struct *mm) { }


Are you sure you can remove those ?
If so, I think it belongs to another patch, I can't see how the new PPC_NMMU 
would allow that by itself.


+static inline bool mm_has_nmmu(struct mm_struct *mm) { return false; }
  #endif
  
  
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig

index 7a5e8f4541e3..b4b04b3f98d1 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -191,6 +191,9 @@ config PPC_INDIRECT_MMIO
  config PPC_IO_WORKAROUNDS
bool
  
+config PPC_NMMU

+   bool
+
  source "drivers/cpufreq/Kconfig"
  
  menu "CPUIdle driver"

diff --git a/arch/powerpc/platforms/powernv/Kconfig 
b/arch/powerpc/platforms/powernv/Kconfig
index 619b093a0657..145009d74457 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -36,6 +36,7 @@ config PPC_MEMTRACE
  config PPC_VAS
bool "IBM Virtual Accelerator Switchboard (VAS)"
depends on PPC_POWERNV && PPC_64K_PAGES
+   select PPC_NMMU
default y
help
  This enables support for IBM Virtual Accelerator Switchboard (VAS).



Re: [RFC PATCH 1/5] powerpc/64s: update_mmu_cache inline the radix test

2020-12-20 Thread Christophe Leroy




Le 20/12/2020 à 00:48, Nicholas Piggin a écrit :

This allows the function to be entirely noped if hash support is
compiled out (not possible yet).

Signed-off-by: Nicholas Piggin 
---
  arch/powerpc/include/asm/book3s/pgtable.h | 11 ++-
  arch/powerpc/mm/book3s32/mmu.c|  4 ++--
  arch/powerpc/mm/book3s64/hash_utils.c |  7 ++-
  3 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/pgtable.h 
b/arch/powerpc/include/asm/book3s/pgtable.h
index 0e1263455d73..914e9fc7b069 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -35,7 +35,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, 
unsigned long pfn,
   * corresponding HPTE into the hash table ahead of time, instead of
   * waiting for the inevitable extra hash-table miss exception.
   */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t 
*ptep);
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 
pte_t *ptep);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long 
address, pte_t *ptep)
+{
+#ifdef CONFIG_PPC64


You shouldn't need that ifdef. radix_enabled() is always defined.


+   if (radix_enabled())
+   return;
+#endif
+   hash__update_mmu_cache(vma, address, ptep);
+}
  
  #endif /* __ASSEMBLY__ */

  #endif
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 859e5bd603ac..c5a570ca37ff 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -325,8 +325,8 @@ static void hash_preload(struct mm_struct *mm, unsigned 
long ea)
   *
   * This must always be called with the pte lock held.
   */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep)
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+   pte_t *ptep)


Now the limit is 100 chars per line. This should fit on a single line I think.


  {
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
return;
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c 
b/arch/powerpc/mm/book3s64/hash_utils.c
index 73b06adb6eeb..d52a3dee7cf2 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1667,8 +1667,8 @@ static void hash_preload(struct mm_struct *mm, pte_t 
*ptep, unsigned long ea,
   *
   * This must always be called with the pte lock held.
   */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep)
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+   pte_t *ptep)


Now the limit is 100 chars per line. This should fit on a single line I think.


  {
/*
 * We don't need to worry about _PAGE_PRESENT here because we are
@@ -1677,9 +1677,6 @@ void update_mmu_cache(struct vm_area_struct *vma, 
unsigned long address,
unsigned long trap;
bool is_exec;
  
-	if (radix_enabled())

-   return;
-
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(*ptep) || address >= TASK_SIZE)
return;