Context tracking involves tracking user, kernel, guest switches. This
enables existing context tracking code for interrupt entry on 32-bit.
interrupt exit already has context tracking calls, and KVM can not be
selected if CONTEXT_TRACKING_USER=y.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/Kconfig                 |  2 +-
 arch/powerpc/include/asm/interrupt.h | 35 +++++++---------------------
 2 files changed, 9 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 81c9f895d690..f667279ec74c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -204,7 +204,7 @@ config PPC
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ASM_MODVERSIONS
-       select HAVE_CONTEXT_TRACKING_USER               if PPC64
+       select HAVE_CONTEXT_TRACKING_USER
        select HAVE_C_RECORDMCOUNT
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DEBUG_STACKOVERFLOW
diff --git a/arch/powerpc/include/asm/interrupt.h 
b/arch/powerpc/include/asm/interrupt.h
index 4745bb9998bd..f88a145fb61a 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -74,17 +74,18 @@
 #include <asm/kprobes.h>
 #include <asm/runlatch.h>
 
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
 /*
  * WARN/BUG is handled with a program interrupt so minimise checks here to
  * avoid recursion and maximise the chance of getting the first oops handled.
  */
 #define INT_SOFT_MASK_BUG_ON(regs, cond)                               \
 do {                                                                   \
-       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) &&               \
-           (user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM)))     \
+       if ((user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM)))     \
                BUG_ON(cond);                                           \
 } while (0)
+#else
+#define INT_SOFT_MASK_BUG_ON(regs, cond)
 #endif
 
 #ifdef CONFIG_PPC_BOOK3S_64
@@ -151,28 +152,8 @@ static inline void booke_restore_dbcr0(void)
 
 static inline void interrupt_enter_prepare(struct pt_regs *regs)
 {
-#ifdef CONFIG_PPC32
-       if (!arch_irq_disabled_regs(regs))
-               trace_hardirqs_off();
-
-       if (user_mode(regs))
-               kuap_lock();
-       else
-               kuap_save_and_lock(regs);
-
-       if (user_mode(regs))
-               account_cpu_user_entry();
-#endif
-
 #ifdef CONFIG_PPC64
-       bool trace_enable = false;
-
-       if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
-               if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
-                       trace_enable = true;
-       } else {
-               irq_soft_mask_set(IRQS_ALL_DISABLED);
-       }
+       irq_soft_mask_set(IRQS_ALL_DISABLED);
 
        /*
         * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
@@ -188,9 +169,10 @@ static inline void interrupt_enter_prepare(struct pt_regs 
*regs)
        } else {
                __hard_RI_enable();
        }
+       /* Enable MSR[RI] early, to support kernel SLB and hash faults */
+#endif
 
-       /* Do this when RI=1 because it can cause SLB faults */
-       if (trace_enable)
+       if (!arch_irq_disabled_regs(regs))
                trace_hardirqs_off();
 
        if (user_mode(regs)) {
@@ -215,7 +197,6 @@ static inline void interrupt_enter_prepare(struct pt_regs 
*regs)
        }
        INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
                                   !(regs->msr & MSR_EE));
-#endif
 
        booke_restore_dbcr0();
 }
-- 
2.37.2

Reply via email to