Commit 13799748b957 ("powerpc/64: use interrupt restart table to speed up return from interrupt") removed the inconditional clearing of MSR[RI] when returning from interrupt into kernel. But powerpc/32 doesn't implement interrupt restart table hence still need MSR[RI] to be cleared.
It could be added back in interrupt_exit_kernel_prepare() but it is easier and better to add it back in entry_32.S for following reasons: - Writing to MSR must be followed by a synchronising instruction - The smaller the non recoverable section is the better it is So add a macro called clr_ri and use it in the three places that play up with SRR0/SRR1. Use it just before another mtspr for synchronisation to avoid having to add an isync. Now that's done in entry_32.S, exit_must_hard_disable() can return false for non book3s/64, taking into account that BOOKE doesn't have MSR_RI. Also add back blacklisting syscall_exit_finish for kprobe. This was initially added by commit 7cdf44013885 ("powerpc/entry32: Blacklist syscall exit points for kprobe.") then lost with commit 6f76a01173cc ("powerpc/syscall: implement system call entry/exit logic in C for PPC32"). Fixes: 6f76a01173cc ("powerpc/syscall: implement system call entry/exit logic in C for PPC32") Fixes: 13799748b957 ("powerpc/64: use interrupt restart table to speed up return from interrupt") Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu> --- arch/powerpc/kernel/entry_32.S | 18 +++++++++++++++++- arch/powerpc/kernel/interrupt.c | 2 +- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index c37480176a1c..16f8ee6cb2cd 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -101,6 +101,17 @@ SYM_FUNC_END(__kuep_unlock) .endm #endif +.macro clr_ri trash +#ifndef CONFIG_BOOKE +#ifdef CONFIG_PPC_8xx + mtspr SPRN_NRI, \trash +#else + li \trash, MSR_KERNEL & ~MSR_RI + mtmsr \trash +#endif +#endif +.endm + .globl transfer_to_syscall transfer_to_syscall: stw r3, ORIG_GPR3(r1) @@ -149,6 +160,7 @@ ret_from_syscall: cmpwi r3,0 REST_GPR(3, r1) syscall_exit_finish: + clr_ri r4 mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 @@ -168,6 +180,7 @@ syscall_exit_finish: REST_GPR(0, r1) REST_GPRS(3, 12, r1) b 1b +_ASM_NOKPROBE_SYMBOL(syscall_exit_finish) #ifdef CONFIG_44x .L44x_icache_flush: @@ -224,10 +237,11 @@ fast_exception_return: /* Clear the exception marker on the stack to avoid confusing stacktrace */ li r10, 0 stw r10, 8(r11) - REST_GPR(10, r11) + clr_ri r10 mtspr SPRN_SRR1,r9 mtspr SPRN_SRR0,r12 REST_GPR(9, r11) + REST_GPR(10, r11) REST_GPR(12, r11) REST_GPR(11, r11) rfi @@ -256,6 +270,7 @@ interrupt_return: .Lfast_user_interrupt_return: lwz r11,_NIP(r1) lwz r12,_MSR(r1) + clr_ri r4 mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 @@ -298,6 +313,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) cmpwi cr1,r3,0 lwz r11,_NIP(r1) lwz r12,_MSR(r1) + clr_ri r4 mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index e0c681d0b076..aea6f7e8e9c6 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -38,7 +38,7 @@ static inline bool exit_must_hard_disable(void) #else static inline bool exit_must_hard_disable(void) { - return true; + return false; } #endif -- 2.49.0