Two new bit mask field "IRQ_DISABLE_MASK_PMU" is introduced to support the masking of PMI and "IRQ_DISABLE_MASK_ALL" to aid interrupt masking checking.
Couple of new irq #defs "PACA_IRQ_PMI" and "SOFTEN_VALUE_0xf0*" added to use in the exception code to check for PMI interrupts. In the masked_interrupt handler, for PMIs we reset the MSR[EE] and return. In the __check_irq_replay(), replay the PMI interrupt by calling performance_monitor_common handler. Signed-off-by: Madhavan Srinivasan <ma...@linux.vnet.ibm.com> --- arch/powerpc/include/asm/exception-64s.h | 5 +++++ arch/powerpc/include/asm/hw_irq.h | 5 ++++- arch/powerpc/kernel/entry_64.S | 5 +++++ arch/powerpc/kernel/exceptions-64s.S | 6 ++++-- arch/powerpc/kernel/irq.c | 24 +++++++++++++++++++++++- 5 files changed, 41 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 72c96162c492..b59f02f9acce 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -474,6 +474,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL #define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI #define SOFTEN_VALUE_0xea0 PACA_IRQ_EE +#define SOFTEN_VALUE_0xf00 PACA_IRQ_PMI #define __SOFTEN_TEST(h, vec, bitmask) \ lbz r10,PACASOFTIRQEN(r13); \ @@ -538,6 +539,10 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \ EXC_STD, SOFTEN_NOTEST_PR, bitmask) +#define MASKABLE_RELON_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \ + MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\ + EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD); + #define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label, bitmask) \ _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \ EXC_HV, SOFTEN_TEST_HV, bitmask) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index fa23c73d2f7a..e21553e9bfc9 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -26,12 +26,15 @@ #define PACA_IRQ_DEC 0x08 /* Or FIT */ #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */ #define PACA_IRQ_HMI 0x20 +#define PACA_IRQ_PMI 0x40 /* * flags for paca->soft_disable_mask */ #define IRQ_DISABLE_MASK_NONE 0 #define IRQ_DISABLE_MASK_LINUX 1 +#define IRQ_DISABLE_MASK_PMU 2 +#define IRQ_DISABLE_MASK_ALL 3 #endif /* CONFIG_PPC64 */ @@ -132,7 +135,7 @@ static inline bool arch_irqs_disabled(void) u8 _was_enabled; \ __hard_irq_disable(); \ _was_enabled = local_paca->soft_disable_mask; \ - local_paca->soft_disable_mask = IRQ_DISABLE_MASK_LINUX;\ + local_paca->soft_disable_mask = IRQ_DISABLE_MASK_ALL;\ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ if (!(_was_enabled & IRQ_DISABLE_MASK_LINUX)) \ trace_hardirqs_off(); \ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f3afa0b9332d..d021f7de79bd 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -931,6 +931,11 @@ restore_check_irq_replay: addi r3,r1,STACK_FRAME_OVERHEAD; bl do_IRQ b ret_from_except +1: cmpwi cr0,r3,0xf00 + bne 1f + addi r3,r1,STACK_FRAME_OVERHEAD; + bl performance_monitor_exception + b ret_from_except 1: cmpwi cr0,r3,0xe60 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 5c93a1700d83..c473f8779646 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1046,8 +1046,8 @@ EXC_REAL_NONE(0xee0, 0x20) EXC_VIRT_NONE(0x4ee0, 0x20) -EXC_REAL_OOL(performance_monitor, 0xf00, 0x20) -EXC_VIRT_OOL(performance_monitor, 0x4f00, 0x20, 0xf00) +EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQ_DISABLE_MASK_PMU) +EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQ_DISABLE_MASK_PMU) TRAMP_KVM(PACA_EXGEN, 0xf00) EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception) @@ -1594,6 +1594,8 @@ _GLOBAL(__replay_interrupt) beq decrementer_common cmpwi r3,0x500 beq hardware_interrupt_common + cmpwi r3,0xf00 + beq performance_monitor_common BEGIN_FTR_SECTION cmpwi r3,0xe80 beq h_doorbell_common diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index fdd817a6f9f8..835a08d9c48f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -169,6 +169,27 @@ notrace unsigned int __check_irq_replay(void) if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) return 0x900; + /* + * In masked_handler() for PMI, we disable MSR[EE] and return. + * Replay it here. + * + * After this point, PMIs could still be disabled in certain + * scenarios like this one. + * + * local_irq_disable(); + * powerpc_irq_pmu_save(); + * powerpc_irq_pmu_restore(); + * local_irq_restore(); + * + * Even though powerpc_irq_pmu_restore() would have replayed the PMIs + * if any, we have still not enabled EE and this will happen only at + * complition of last *_restore in this nested cases. And PMIs will + * once again start firing only when we have MSR[EE] enabled. + */ + local_paca->irq_happened &= ~PACA_IRQ_PMI; + if (happened & PACA_IRQ_PMI) + return 0xf00; + /* Finally check if an external interrupt happened */ local_paca->irq_happened &= ~PACA_IRQ_EE; if (happened & PACA_IRQ_EE) @@ -208,7 +229,8 @@ notrace void arch_local_irq_restore(unsigned long en) /* Write the new soft-enabled value */ soft_disable_mask_set(en); - if (en == IRQ_DISABLE_MASK_LINUX) + /* any bits still disabled */ + if (en) return; /* * From this point onward, we can take interrupts, preempt, -- 2.7.4