On 2019/1/21 23:33, Julien Thierry wrote:
> Implement NMI callbacks for GICv3 irqchip. Install NMI safe handlers
> when setting up interrupt line as NMI.
> 
> Only SPIs and PPIs are allowed to be set up as NMI.
> 
> Signed-off-by: Julien Thierry <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Jason Cooper <[email protected]>
> Cc: Marc Zyngier <[email protected]>
> ---
>  drivers/irqchip/irq-gic-v3.c | 84 
> ++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 84 insertions(+)
> 
> diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
> index 4df1e94..447d8ab 100644
> --- a/drivers/irqchip/irq-gic-v3.c
> +++ b/drivers/irqchip/irq-gic-v3.c
(snip)
>  
> +static int gic_irq_nmi_setup(struct irq_data *d)
> +{
> +     struct irq_desc *desc = irq_to_desc(d->irq);
> +
> +     if (!gic_supports_nmi())
> +             return -EINVAL;
> +
> +     if (gic_peek_irq(d, GICD_ISENABLER)) {
> +             pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
> +             return -EINVAL;
> +     }
> +
> +     /*
> +      * A secondary irq_chip should be in charge of LPI request,
> +      * it should not be possible to get there
> +      */
> +     if (WARN_ON(gic_irq(d) >= 8192))
> +             return -EINVAL;
> +
> +     /* desc lock should already be held */
> +     if (gic_irq(d) < 32) {
> +             /* Setting up PPI as NMI, only switch handler for first NMI */
> +             if (!refcount_inc_not_zero(&ppi_nmi_refs[gic_irq(d) - 16])) {
> +                     refcount_set(&ppi_nmi_refs[gic_irq(d) - 16], 1);
> +                     desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
> +             }
> +     } else {
> +             desc->handle_irq = handle_fasteoi_nmi;
> +     }
> +
> +     gic_set_irq_prio(gic_irq(d), gic_dist_base(d), GICD_INT_NMI_PRI);
> +
> +     return 0;
> +}
> +
> +static void gic_irq_nmi_teardown(struct irq_data *d)
> +{
> +     struct irq_desc *desc = irq_to_desc(d->irq);
> +
> +     if (WARN_ON(!gic_supports_nmi()))
> +             return;
> +
> +     if (gic_peek_irq(d, GICD_ISENABLER)) {
> +             pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
> +             return;
> +     }
> +
> +     /*
> +      * A secondary irq_chip should be in charge of LPI request,
> +      * it should not be possible to get there
> +      */
> +     if (WARN_ON(gic_irq(d) >= 8192))
> +             return;
> +
> +     /* desc lock should already be held */
> +     if (gic_irq(d) < 32) {
> +             /* Tearing down NMI, only switch handler for last NMI */
> +             if (refcount_dec_and_test(&ppi_nmi_refs[gic_irq(d) - 16]))
> +                     desc->handle_irq = handle_percpu_devid_irq;
> +     } else {
> +             desc->handle_irq = handle_fasteoi_irq;
> +     }
> +
> +     gic_set_irq_prio(gic_irq(d), gic_dist_base(d), GICD_INT_DEF_PRI);
> +}
> +

Hello Julien,
I am afraid the setting of priority is not correct here. If the irq is in 
redistributor(gic_irq(d) < 32),
we should set the priority on each cpu, while we just set the priority on the 
current cpu here.
static inline void __iomem *gic_dist_base(struct irq_data *d)
{
        if (gic_irq_in_rdist(d))        /* SGI+PPI -> SGI_base for this CPU */
                return gic_data_rdist_sgi_base();

        if (d->hwirq <= 1023)           /* SPI -> dist_base */
                return gic_data.dist_base;

        return NULL;
}

I tried to add a smp_call_function here, but the kernel reported a warning as 
we have disabled irq
when calling raw_spin_lock_irqsave in request_nmi or ready_percpu_nmi.
[    2.137262] Call trace:
[    2.137265]  smp_call_function_many+0xf8/0x3a0
[    2.137267]  smp_call_function+0x40/0x58
[    2.137271]  gic_irq_nmi_setup+0xe8/0x118
[    2.137275]  ready_percpu_nmi+0x6c/0xf0
[    2.137279]  armpmu_request_irq+0x228/0x250
[    2.137281]  arm_pmu_acpi_init+0x150/0x2f0
[    2.137284]  do_one_initcall+0x54/0x218
[    2.137289]  kernel_init_freeable+0x230/0x354
[    2.137293]  kernel_init+0x18/0x118
[    2.137295]  ret_from_fork+0x10/0x18

I am exploring a better way to solve this issue.

Thanks,
Wei Li

Reply via email to