From: Benjamin Herrenschmidt <[email protected]>

This patch has been added to the 3.12 stable tree. If you have any
objections, please let us know.

===============

commit 0c4888ef1d8a8b82c29075ce7e257ff795af15c7 upstream.

When restoring the PPR value, we incorrectly access the thread structure
at a time where MSR:RI is clear, which means we cannot recover from nested
faults. However the thread structure isn't covered by the "bolted" SLB
entries and thus accessing can fault.

This fixes it by splitting the code so that the PPR value is loaded into
a GPR before MSR:RI is cleared.

Signed-off-by: Benjamin Herrenschmidt <[email protected]>
Signed-off-by: Jiri Slaby <[email protected]>
---
 arch/powerpc/include/asm/ppc_asm.h |  7 -------
 arch/powerpc/kernel/entry_64.S     | 10 +++++++++-
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/include/asm/ppc_asm.h 
b/arch/powerpc/include/asm/ppc_asm.h
index 599545738af3..c2dcfaa51987 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -478,13 +478,6 @@ BEGIN_FTR_SECTION_NESTED(945)                              
                \
        std     ra,TASKTHREADPPR(rb);                                   \
 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
 
-#define RESTORE_PPR(ra, rb)                                            \
-BEGIN_FTR_SECTION_NESTED(946)                                          \
-       ld      ra,PACACURRENT(r13);                                    \
-       ld      rb,TASKTHREADPPR(ra);                                   \
-       mtspr   SPRN_PPR,rb;    /* Restore PPR */                       \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
-
 #endif
 
 /*
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index c04cdf70d487..7be37170fda7 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -820,6 +820,12 @@ fast_exception_return:
        andi.   r0,r3,MSR_RI
        beq-    unrecov_restore
 
+       /* Load PPR from thread struct before we clear MSR:RI */
+BEGIN_FTR_SECTION
+       ld      r2,PACACURRENT(r13)
+       ld      r2,TASKTHREADPPR(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
        /*
         * Clear RI before restoring r13.  If we are returning to
         * userspace and we take an exception after restoring r13,
@@ -840,8 +846,10 @@ fast_exception_return:
         */
        andi.   r0,r3,MSR_PR
        beq     1f
+BEGIN_FTR_SECTION
+       mtspr   SPRN_PPR,r2     /* Restore PPR */
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        ACCOUNT_CPU_USER_EXIT(r2, r4)
-       RESTORE_PPR(r2, r4)
        REST_GPR(13, r1)
 1:
        mtspr   SPRN_SRR1,r3
-- 
1.9.0

--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to