Le 13/01/2021 à 08:32, Nicholas Piggin a écrit :
This moves the common NMI entry and exit code into the interrupt handler
wrappers.

This changes the behaviour of soft-NMI (watchdog) and HMI interrupts, and
also MCE interrupts on 64e, by adding missing parts of the NMI entry to
them.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
  arch/powerpc/include/asm/interrupt.h | 24 ++++++++++++++++
  arch/powerpc/kernel/mce.c            | 11 --------
  arch/powerpc/kernel/traps.c          | 42 +++++-----------------------
  arch/powerpc/kernel/watchdog.c       | 10 +++----
  4 files changed, 35 insertions(+), 52 deletions(-)

diff --git a/arch/powerpc/include/asm/interrupt.h 
b/arch/powerpc/include/asm/interrupt.h
index e278dffe7657..01192e213f9a 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -95,14 +95,38 @@ static inline void interrupt_async_exit_prepare(struct 
pt_regs *regs, struct int
  }
struct interrupt_nmi_state {
+#ifdef CONFIG_PPC64
+       u8 ftrace_enabled;
+#endif
  };
static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
  {
+#ifdef CONFIG_PPC64
+       state->ftrace_enabled = this_cpu_get_ftrace_enabled();
+       this_cpu_set_ftrace_enabled(0);
+#endif
+
+       /*
+        * Do not use nmi_enter() for pseries hash guest taking a real-mode
+        * NMI because not everything it touches is within the RMA limit.
+        */
+       if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
+                       !firmware_has_feature(FW_FEATURE_LPAR) ||
+                       radix_enabled() || (mfmsr() & MSR_DR))
+               nmi_enter();
  }
static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
  {
+       if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
+                       !firmware_has_feature(FW_FEATURE_LPAR) ||
+                       radix_enabled() || (mfmsr() & MSR_DR))
+               nmi_exit();
+
+#ifdef CONFIG_PPC64
+       this_cpu_set_ftrace_enabled(state->ftrace_enabled);
+#endif
  }
/**
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 54269947113d..51456217ec40 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -592,12 +592,6 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info);
  DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
  {
        long handled = 0;
-       u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
-
-       this_cpu_set_ftrace_enabled(0);
-       /* Do not use nmi_enter/exit for pseries hpte guest */
-       if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
-               nmi_enter();
hv_nmi_check_nonrecoverable(regs); @@ -607,11 +601,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
        if (ppc_md.machine_check_early)
                handled = ppc_md.machine_check_early(regs);
- if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
-               nmi_exit();
-
-       this_cpu_set_ftrace_enabled(ftrace_enabled);
-
        return handled;
  }
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b4f23e871a68..43d23232ef5c 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -435,11 +435,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
  {
        unsigned long hsrr0, hsrr1;
        bool saved_hsrrs = false;
-       u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
-
-       this_cpu_set_ftrace_enabled(0);
-
-       nmi_enter();
/*
         * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
@@ -511,10 +506,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
                mtspr(SPRN_HSRR1, hsrr1);
        }
- nmi_exit();
-
-       this_cpu_set_ftrace_enabled(ftrace_enabled);
-
        /* What should we do here? We could issue a shutdown or hard reset. */
return 0;
@@ -792,6 +783,12 @@ int machine_check_generic(struct pt_regs *regs)
  #endif /* everything else */
+/*
+ * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * (it uses its own early real-mode handler to handle the MCE proper
+ * and then raises irq_work to call this handler when interrupts are
+ * enabled).
+ */
  #ifdef CONFIG_PPC_BOOK3S_64
  DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
  #else
@@ -800,20 +797,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
  {
        int recover = 0;
- /*
-        * BOOK3S_64 does not call this handler as a non-maskable interrupt
-        * (it uses its own early real-mode handler to handle the MCE proper
-        * and then raises irq_work to call this handler when interrupts are
-        * enabled).
-        *
-        * This is silly. The BOOK3S_64 should just call a different function
-        * rather than expecting semantics to magically change. Something
-        * like 'non_nmi_machine_check_exception()', perhaps?
-        */
-       const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
-
-       if (nmi) nmi_enter();
-
        __this_cpu_inc(irq_stat.mce_exceptions);
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
@@ -838,24 +821,17 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
        if (check_io_access(regs))
                goto bail;
- if (nmi) nmi_exit();
-

IIRC, not doing the nmi_exit() before the die() is problematic.

See https://github.com/linuxppc/linux/commit/daf00ae71dad8aa05965713c62558aeebf2df48e#diff-70077148c383252ca949063eaf1b0250620e4607b43f4ef3fd2d8f448a83ab0a

        die("Machine check", regs, SIGBUS);
/* Must die if the interrupt is not recoverable */
        if (!(regs->msr & MSR_RI))
                die("Unrecoverable Machine check", regs, SIGBUS);
-#ifdef CONFIG_PPC_BOOK3S_64
  bail:
+#ifdef CONFIG_PPC_BOOK3S_64
        return;
  #else
        return 0;
-
-bail:
-       if (nmi) nmi_exit();
-
-       return 0;
  #endif
  }
  NOKPROBE_SYMBOL(machine_check_exception);
@@ -1873,14 +1849,10 @@ DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)
  #ifdef CONFIG_PPC64
  DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)
  {
-       nmi_enter();
-
        __this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs); - nmi_exit();
-
        return 0;
  }
  #endif
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 824b9376ac35..dc39534836a3 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -254,11 +254,12 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
        int cpu = raw_smp_processor_id();
        u64 tb;
+ /* should only arrive from kernel, with irqs disabled */
+       WARN_ON_ONCE(!arch_irq_disabled_regs(regs));
+
        if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
                return 0;
- nmi_enter();
-
        __this_cpu_inc(irq_stat.soft_nmi_irqs);
tb = get_tb();
@@ -266,7 +267,7 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
                wd_smp_lock(&flags);
                if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
                        wd_smp_unlock(&flags);
-                       goto out;
+                       return 0;
                }
                set_cpu_stuck(cpu, tb);
@@ -290,9 +291,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
        if (wd_panic_timeout_tb < 0x7fffffff)
                mtspr(SPRN_DEC, wd_panic_timeout_tb);
-out:
-       nmi_exit();
-
        return 0;
  }

Reply via email to