On Tue, Dec 16, 2025 at 10:58:16AM +0100, Christophe Leroy (CS GROUP) wrote:
>
>
> Le 14/12/2025 à 14:02, Mukesh Kumar Chaurasiya a écrit :
> > From: Mukesh Kumar Chaurasiya <[email protected]>
> >
> > Move interrupt entry and exit helper routines from interrupt.h into the
> > PowerPC-specific entry-common.h header as a preparatory step for enabling
> > the generic entry/exit framework.
> >
> > This consolidation places all PowerPC interrupt entry/exit handling in a
> > single common header, aligning with the generic entry infrastructure.
> > The helpers provide architecture-specific handling for interrupt and NMI
> > entry/exit sequences, including:
> >
> > - arch_interrupt_enter/exit_prepare()
> > - arch_interrupt_async_enter/exit_prepare()
> > - arch_interrupt_nmi_enter/exit_prepare()
> > - Supporting helpers such as nap_adjust_return(),
> > check_return_regs_valid(),
> > debug register maintenance, and soft mask handling.
> >
> > The functions are copied verbatim from interrupt.h to avoid functional
> > changes at this stage. Subsequent patches will integrate these routines
> > into the generic entry/exit flow.
>
> Can we move them instead of duplicating them ?
>
Till we enable the Generic framework i didn't want to touch the already
used code path. Once we enable the code all the unused code should be
removed. This helps us in bisecting future issues caused due to this.
> >
> > No functional change intended.
> >
> > Signed-off-by: Mukesh Kumar Chaurasiya <[email protected]>
> > ---
> > arch/powerpc/include/asm/entry-common.h | 422 ++++++++++++++++++++++++
> > 1 file changed, 422 insertions(+)
> >
> > diff --git a/arch/powerpc/include/asm/entry-common.h
> > b/arch/powerpc/include/asm/entry-common.h
> > index e8ebd42a4e6d..e8bde4c67eaf 100644
> > --- a/arch/powerpc/include/asm/entry-common.h
> > +++ b/arch/powerpc/include/asm/entry-common.h
> > @@ -7,10 +7,432 @@
> > #include <asm/cputime.h>
> > #include <asm/interrupt.h>
> > +#include <asm/runlatch.h>
> > #include <asm/stacktrace.h>
> > #include <asm/switch_to.h>
> > #include <asm/tm.h>
> > +#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
> > +/*
> > + * WARN/BUG is handled with a program interrupt so minimise checks here to
> > + * avoid recursion and maximise the chance of getting the first oops
> > handled.
> > + */
> > +#define INT_SOFT_MASK_BUG_ON(regs, cond) \
> > +do {
> > \
> > + if ((user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \
> > + BUG_ON(cond); \
> > +} while (0)
> > +#else
> > +#define INT_SOFT_MASK_BUG_ON(regs, cond)
> > +#endif
> > +
> > +#ifdef CONFIG_PPC_BOOK3S_64
> > +extern char __end_soft_masked[];
> > +bool search_kernel_soft_mask_table(unsigned long addr);
> > +unsigned long search_kernel_restart_table(unsigned long addr);
> > +
> > +DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
> > +
[...]
> > +static inline bool nmi_disables_ftrace(struct pt_regs *regs)
> > +{
> > + /* Allow DEC and PMI to be traced when they are soft-NMI */
> > + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
> > + if (TRAP(regs) == INTERRUPT_DECREMENTER)
> > + return false;
> > + if (TRAP(regs) == INTERRUPT_PERFMON)
> > + return false;
> > + }
> > + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
> > + if (TRAP(regs) == INTERRUPT_PERFMON)
> > + return false;
> > + }
> > +
> > + return true;
> > +}
> > +
> > +static inline void arch_interrupt_nmi_enter_prepare(struct pt_regs *regs,
> > + struct interrupt_nmi_state
> > *state)
>
> CHECK: Alignment should match open parenthesis
> #354: FILE: arch/powerpc/include/asm/entry-common.h:322:
> +static inline void arch_interrupt_nmi_enter_prepare(struct pt_regs *regs,
> + struct interrupt_nmi_state
> *state)
>
>
Will fix this.
> > +{
> > +#ifdef CONFIG_PPC64
> > + state->irq_soft_mask = local_paca->irq_soft_mask;
> > + state->irq_happened = local_paca->irq_happened;
> > + state->softe = regs->softe;
> > +
> > + /*
> > + * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
> > + * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
> > + * because that goes through irq tracing which we don't want in NMI.
> > + */
> > + local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
> > + local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
> > +
> > + if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
> > + /*
> > + * Adjust regs->softe to be soft-masked if it had not been
> > + * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
> > + * not yet set disabled), or if it was in an implicit soft
> > + * masked state. This makes regs_irqs_disabled(regs)
> > + * behave as expected.
> > + */
> > + regs->softe = IRQS_ALL_DISABLED;
> > + }
> > +
> > + __hard_RI_enable();
> > +
> > + /* Don't do any per-CPU operations until interrupt state is fixed */
> > +
> > + if (nmi_disables_ftrace(regs)) {
> > + state->ftrace_enabled = this_cpu_get_ftrace_enabled();
> > + this_cpu_set_ftrace_enabled(0);
> > + }
> > +#endif
> > +
> > + /* If data relocations are enabled, it's safe to use nmi_enter() */
> > + if (mfmsr() & MSR_DR) {
> > + nmi_enter();
> > + return;
> > + }
> > +
> > + /*
> > + * But do not use nmi_enter() for pseries hash guest taking a real-mode
> > + * NMI because not everything it touches is within the RMA limit.
> > + */
> > + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
> > + firmware_has_feature(FW_FEATURE_LPAR) &&
> > + !radix_enabled())
> > + return;
> > +
> > + /*
> > + * Likewise, don't use it if we have some form of instrumentation (like
> > + * KASAN shadow) that is not safe to access in real mode (even on radix)
> > + */
> > + if (IS_ENABLED(CONFIG_KASAN))
> > + return;
> > +
> > + /*
> > + * Likewise, do not use it in real mode if percpu first chunk is not
> > + * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
> > + * are chances where percpu allocation can come from vmalloc area.
> > + */
> > + if (percpu_first_chunk_is_paged)
> > + return;
> > +
> > + /* Otherwise, it should be safe to call it */
> > + nmi_enter();
> > +}
> > +
> > +static inline void arch_interrupt_nmi_exit_prepare(struct pt_regs *regs,
> > + struct interrupt_nmi_state *state)
> > +{
>
> CHECK: Alignment should match open parenthesis
> #425: FILE: arch/powerpc/include/asm/entry-common.h:393:
> +static inline void arch_interrupt_nmi_exit_prepare(struct pt_regs *regs,
> + struct interrupt_nmi_state *state)
>
Will fix this.
Regards,
Mukesh
> > + if (mfmsr() & MSR_DR) {
> > + // nmi_exit if relocations are on
> > + nmi_exit();
> > + } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
> > + firmware_has_feature(FW_FEATURE_LPAR) &&
> > + !radix_enabled()) {
> > + // no nmi_exit for a pseries hash guest taking a real mode
> > exception
> > + } else if (IS_ENABLED(CONFIG_KASAN)) {
> > + // no nmi_exit for KASAN in real mode
> > + } else if (percpu_first_chunk_is_paged) {
> > + // no nmi_exit if percpu first chunk is not embedded
> > + } else {
> > + nmi_exit();
> > + }
> > +
> > + /*
> > + * nmi does not call nap_adjust_return because nmi should not create
> > + * new work to do (must use irq_work for that).
> > + */
> > +
> > +#ifdef CONFIG_PPC64
> > +#ifdef CONFIG_PPC_BOOK3S
> > + if (regs_irqs_disabled(regs)) {
> > + unsigned long rst = search_kernel_restart_table(regs->nip);
> > +
> > + if (rst)
> > + regs_set_return_ip(regs, rst);
> > + }
> > +#endif
> > +
> > + if (nmi_disables_ftrace(regs))
> > + this_cpu_set_ftrace_enabled(state->ftrace_enabled);
> > +
> > + /* Check we didn't change the pending interrupt mask. */
> > + WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) !=
> > local_paca->irq_happened);
> > + regs->softe = state->softe;
> > + local_paca->irq_happened = state->irq_happened;
> > + local_paca->irq_soft_mask = state->irq_soft_mask;
> > +#endif
> > +}
> > +
> > static __always_inline void arch_enter_from_user_mode(struct pt_regs
> > *regs)
> > {
> > if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
>