On Tue, Dec 16, 2025 at 11:43:02AM +0100, Christophe Leroy (CS GROUP) wrote:
> 
> 
> Le 14/12/2025 à 14:02, Mukesh Kumar Chaurasiya a écrit :
> > From: Mukesh Kumar Chaurasiya <[email protected]>
> > 
> > Enable the generic IRQ entry/exit infrastructure on PowerPC by selecting
> > GENERIC_IRQ_ENTRY and integrating the architecture-specific interrupt
> > handlers with the generic entry/exit APIs.
> > 
> > This change replaces PowerPC’s local interrupt entry/exit handling with
> > calls to the generic irqentry_* helpers, aligning the architecture with
> > the common kernel entry model. The macros that define interrupt, async,
> > and NMI handlers are updated to use irqentry_enter()/irqentry_exit()
> > and irqentry_nmi_enter()/irqentry_nmi_exit() where applicable.
> > 
> > Key updates include:
> >   - Select GENERIC_IRQ_ENTRY in Kconfig.
> >   - Replace interrupt_enter/exit_prepare() with arch_interrupt_* helpers.
> >   - Integrate irqentry_enter()/exit() in standard and async interrupt paths.
> >   - Integrate irqentry_nmi_enter()/exit() in NMI handlers.
> >   - Remove redundant irq_enter()/irq_exit() calls now handled generically.
> >   - Use irqentry_exit_cond_resched() for preemption checks.
> > 
> > This change establishes the necessary wiring for PowerPC to use the
> > generic IRQ entry/exit framework while maintaining existing semantics.
> 
> Did you look into resulting code ?
> 
> do_IRQ() is bigger and calls irqentry_enter() which is bigger than
> irq_enter().
> 
> And irq_enter_rcu() was tail-called from irq_enter(), now is it called after
> irqentry_enter().
> 
I am not sure if I understand your question correctly here. Can you
elaborate a little more?

> > 
> > Signed-off-by: Mukesh Kumar Chaurasiya <[email protected]>
> > ---
> >   arch/powerpc/Kconfig                    |   1 +
> >   arch/powerpc/include/asm/entry-common.h |  66 +---
> >   arch/powerpc/include/asm/interrupt.h    | 499 +++---------------------
> >   arch/powerpc/kernel/interrupt.c         |  13 +-
> >   4 files changed, 74 insertions(+), 505 deletions(-)
> > 
> > diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> > index e24f4d88885a..b0c602c3bbe1 100644
> > --- a/arch/powerpc/Kconfig
> > +++ b/arch/powerpc/Kconfig
> > @@ -206,6 +206,7 @@ config PPC
> >     select GENERIC_GETTIMEOFDAY
> >     select GENERIC_IDLE_POLL_SETUP
> >     select GENERIC_IOREMAP
> > +   select GENERIC_IRQ_ENTRY
> >     select GENERIC_IRQ_SHOW
> >     select GENERIC_IRQ_SHOW_LEVEL
> >     select GENERIC_PCI_IOMAP                if PCI
> > diff --git a/arch/powerpc/include/asm/entry-common.h 
> > b/arch/powerpc/include/asm/entry-common.h
> > index e8bde4c67eaf..e2ae7416dee1 100644
> > --- a/arch/powerpc/include/asm/entry-common.h
> > +++ b/arch/powerpc/include/asm/entry-common.h
> > @@ -257,6 +257,17 @@ static inline void arch_interrupt_enter_prepare(struct 
> > pt_regs *regs)
> >    */
> >   static inline void arch_interrupt_exit_prepare(struct pt_regs *regs)
> >   {
> > +   if (user_mode(regs)) {
> > +           BUG_ON(regs_is_unrecoverable(regs));
> > +           BUG_ON(regs_irqs_disabled(regs));
> > +           /*
> > +            * We don't need to restore AMR on the way back to userspace 
> > for KUAP.
> > +            * AMR can only have been unlocked if we interrupted the kernel.
> > +            */
> > +           kuap_assert_locked();
> > +
> > +           local_irq_disable();
> > +   }
> >   }
> >   static inline void arch_interrupt_async_enter_prepare(struct pt_regs 
> > *regs)
> > @@ -275,7 +286,6 @@ static inline void 
> > arch_interrupt_async_enter_prepare(struct pt_regs *regs)
> >         !test_thread_local_flags(_TLF_RUNLATCH))
> >             __ppc64_runlatch_on();
> >   #endif
> > -   irq_enter();
> >   }
> >   static inline void arch_interrupt_async_exit_prepare(struct pt_regs *regs)
> > @@ -288,7 +298,6 @@ static inline void 
> > arch_interrupt_async_exit_prepare(struct pt_regs *regs)
> >      */
> >     nap_adjust_return(regs);
> > -   irq_exit();
> >     arch_interrupt_exit_prepare(regs);
> >   }
> > @@ -354,59 +363,11 @@ static inline void 
> > arch_interrupt_nmi_enter_prepare(struct pt_regs *regs,
> >             this_cpu_set_ftrace_enabled(0);
> >     }
> >   #endif
> > -
> > -   /* If data relocations are enabled, it's safe to use nmi_enter() */
> > -   if (mfmsr() & MSR_DR) {
> > -           nmi_enter();
> > -           return;
> > -   }
> > -
> > -   /*
> > -    * But do not use nmi_enter() for pseries hash guest taking a real-mode
> > -    * NMI because not everything it touches is within the RMA limit.
> > -    */
> > -   if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
> > -       firmware_has_feature(FW_FEATURE_LPAR) &&
> > -       !radix_enabled())
> > -           return;
> > -
> > -   /*
> > -    * Likewise, don't use it if we have some form of instrumentation (like
> > -    * KASAN shadow) that is not safe to access in real mode (even on radix)
> > -    */
> > -   if (IS_ENABLED(CONFIG_KASAN))
> > -           return;
> > -
> > -   /*
> > -    * Likewise, do not use it in real mode if percpu first chunk is not
> > -    * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
> > -    * are chances where percpu allocation can come from vmalloc area.
> > -    */
> > -   if (percpu_first_chunk_is_paged)
> > -           return;
> > -
> > -   /* Otherwise, it should be safe to call it */
> > -   nmi_enter();
> >   }
> >   static inline void arch_interrupt_nmi_exit_prepare(struct pt_regs *regs,
> >                                           struct interrupt_nmi_state *state)
> >   {
> > -   if (mfmsr() & MSR_DR) {
> > -           // nmi_exit if relocations are on
> > -           nmi_exit();
> > -   } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
> > -              firmware_has_feature(FW_FEATURE_LPAR) &&
> > -              !radix_enabled()) {
> > -           // no nmi_exit for a pseries hash guest taking a real mode 
> > exception
> > -   } else if (IS_ENABLED(CONFIG_KASAN)) {
> > -           // no nmi_exit for KASAN in real mode
> > -   } else if (percpu_first_chunk_is_paged) {
> > -           // no nmi_exit if percpu first chunk is not embedded
> > -   } else {
> > -           nmi_exit();
> > -   }
> > -
> >     /*
> >      * nmi does not call nap_adjust_return because nmi should not create
> >      * new work to do (must use irq_work for that).
> > @@ -435,6 +396,8 @@ static inline void 
> > arch_interrupt_nmi_exit_prepare(struct pt_regs *regs,
> >   static __always_inline void arch_enter_from_user_mode(struct pt_regs 
> > *regs)
> >   {
> > +   kuap_lock();
> > +
> 
> A reason why this change comes now and not in the patch that added
> arch_enter_from_user_mode() ?
> 
Yes it should have been. Will fix this in next revision.

> >     if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
> >             BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
> > @@ -467,11 +430,8 @@ static __always_inline void 
> > arch_enter_from_user_mode(struct pt_regs *regs)
> >     } else
> >   #endif
> >             kuap_assert_locked();
> > -
> >     booke_restore_dbcr0();
> > -
> 
> This is cosmetic, should have been done when adding
> arch_enter_from_user_mode()
>
Sure, Will fix this.

Regards,
Mukesh
> >     account_cpu_user_entry();
> > -
> >     account_stolen_time();
> >     /*
> 
> 
> Christophe

Reply via email to