Le 19/08/2022 à 05:38, Rohan McLure a écrit : > Restoring the register state of the interrupted thread involves issuing > a large number of predictable loads to the kernel stack frame. Issue the > REST_GPR{,S} macros to clearly signal when this is happening, and bunch > together restores at the end of the interrupt handler where the saved > value is not consumed earlier in the handler code.
Keep all possible restores before restoring SRR0/SRR1, see details below. > > Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com> > Reported-by: Christophe Leroy <christophe.le...@csgroup.eu> > --- > V2 -> V3: New patch. > --- > arch/powerpc/kernel/entry_32.S | 35 ++++++++++++-------------------- > 1 file changed, 13 insertions(+), 22 deletions(-) > > diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S > index 8d6e02ef5dc0..03a105a5806a 100644 > --- a/arch/powerpc/kernel/entry_32.S > +++ b/arch/powerpc/kernel/entry_32.S > @@ -68,7 +68,7 @@ prepare_transfer_to_handler: > lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ > rlwinm r9,r9,0,~MSR_EE > lwz r12,_LINK(r11) /* and return to address in LR */ > - lwz r2, GPR2(r11) > + REST_GPR(2, r11) > b fast_exception_return > _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) > #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */ > @@ -144,7 +144,7 @@ ret_from_syscall: > lwz r7,_NIP(r1) > lwz r8,_MSR(r1) > cmpwi r3,0 > - lwz r3,GPR3(r1) > + REST_GPR(3, r1) > syscall_exit_finish: > mtspr SPRN_SRR0,r7 > mtspr SPRN_SRR1,r8 > @@ -152,8 +152,8 @@ syscall_exit_finish: > bne 3f > mtcr r5 > > -1: lwz r2,GPR2(r1) > - lwz r1,GPR1(r1) > +1: REST_GPR(2, r1) > + REST_GPR(1, r1) > rfi > #ifdef CONFIG_40x > b . /* Prevent prefetch past rfi */ > @@ -165,10 +165,8 @@ syscall_exit_finish: > REST_NVGPRS(r1) > mtctr r4 > mtxer r5 > - lwz r0,GPR0(r1) > - lwz r3,GPR3(r1) > - REST_GPRS(4, 11, r1) > - lwz r12,GPR12(r1) > + REST_GPR(0, r1) > + REST_GPRS(3, 12, r1) > b 1b > > #ifdef CONFIG_44x > @@ -260,24 +258,22 @@ fast_exception_return: > beq 3f /* if not, we've got problems */ > #endif > > -2: REST_GPRS(3, 6, r11) > - lwz r10,_CCR(r11) > - REST_GPRS(1, 2, r11) > +2: lwz r10,_CCR(r11) > mtcr r10 > lwz r10,_LINK(r11) > mtlr r10 > /* Clear the exception_marker on the stack to avoid confusing > stacktrace */ > li r10, 0 > stw r10, 8(r11) > - REST_GPR(10, r11) > #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) > mtspr SPRN_NRI, r0 > #endif > mtspr SPRN_SRR1,r9 > mtspr SPRN_SRR0,r12 > - REST_GPR(9, r11) > + REST_GPRS(1, 6, r11) > + REST_GPRS(9, 10, r11) Please keep this before modification of SRR0/SRR1. Once SRR0/SRR1 are modified, interrupts become unrecoverable. We want to keep that window as small as possible. > REST_GPR(12, r11) > - lwz r11,GPR11(r11) > + REST_GPR(11, r11) > rfi > #ifdef CONFIG_40x > b . /* Prevent prefetch past rfi */ > @@ -454,9 +450,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) > lwz r3,_MSR(r1); \ > andi. r3,r3,MSR_PR; \ > bne interrupt_return; \ > - lwz r0,GPR0(r1); \ > - lwz r2,GPR2(r1); \ > - REST_GPRS(3, 8, r1); \ > + REST_GPR(0, r1); \ > lwz r10,_XER(r1); \ > lwz r11,_CTR(r1); \ > mtspr SPRN_XER,r10; \ > @@ -475,11 +469,8 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) > lwz r12,_MSR(r1); \ > mtspr exc_lvl_srr0,r11; \ > mtspr exc_lvl_srr1,r12; \ > - lwz r9,GPR9(r1); \ > - lwz r12,GPR12(r1); \ > - lwz r10,GPR10(r1); \ > - lwz r11,GPR11(r1); \ > - lwz r1,GPR1(r1); \ > + REST_GPRS(2, 12, r1); \ Same, please keep things minimal between restore of SRR0/SRR1 and RFI to minimise the unrecoverable window. > + REST_GPR(1, r1); \ > exc_lvl_rfi; \ > b .; /* prevent prefetch past exc_lvl_rfi */ >