The set_irq_affinity() function has two issues:

1) It has no protection against selecting an offline cpu from the
   given mask.

2) It pointlessly restricts the affinity masks to have a single cpu
   set. This collides with the irq migration code of arm.

   irq affinity is set to core 3
   core 3 goes offline

   migration code sets mask to cpu_online_mask and calls the
   irq_set_affinity() callback of the irq_chip which fails due to bit
   0,1,2 set.

So instead of doing silly for_each_cpu() loops just pick any bit of
the mask which intersects with the online mask.

The read back of the routing register is pointless as well. We can
simply write the new mask as the bits in this register reflect one
core.

Get rid of fiddling with the default_irq_affinity as well.

Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Gregory CLEMENT <[email protected]>
Cc: Jason Cooper <[email protected]>

---
 drivers/irqchip/irq-armada-370-xp.c |   38 ++++--------------------------------
 1 file changed, 5 insertions(+), 33 deletions(-)

Index: tip/drivers/irqchip/irq-armada-370-xp.c
===================================================================
--- tip.orig/drivers/irqchip/irq-armada-370-xp.c
+++ tip/drivers/irqchip/irq-armada-370-xp.c
@@ -244,35 +244,16 @@ static DEFINE_RAW_SPINLOCK(irq_controlle
 static int armada_xp_set_affinity(struct irq_data *d,
                                  const struct cpumask *mask_val, bool force)
 {
-       unsigned long reg;
-       unsigned long new_mask = 0;
-       unsigned long online_mask = 0;
-       unsigned long count = 0;
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
+       unsigned long mask;
        int cpu;
 
-       for_each_cpu(cpu, mask_val) {
-               new_mask |= 1 << cpu_logical_map(cpu);
-               count++;
-       }
-
-       /*
-        * Forbid mutlicore interrupt affinity
-        * This is required since the MPIC HW doesn't limit
-        * several CPUs from acknowledging the same interrupt.
-        */
-       if (count > 1)
-               return -EINVAL;
-
-       for_each_cpu(cpu, cpu_online_mask)
-               online_mask |= 1 << cpu_logical_map(cpu);
+       /* Select a single core from the affinity mask which is online */
+       cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       mask = 1UL << cpu_logical_map(cpu);
 
        raw_spin_lock(&irq_controller_lock);
-
-       reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-       reg = (reg & (~online_mask)) | new_mask;
-       writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-
+       writel(mask, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
        raw_spin_unlock(&irq_controller_lock);
 
        return 0;
@@ -494,15 +475,6 @@ static int __init armada_370_xp_mpic_of_
 
 #ifdef CONFIG_SMP
        armada_xp_mpic_smp_cpu_init();
-
-       /*
-        * Set the default affinity from all CPUs to the boot cpu.
-        * This is required since the MPIC doesn't limit several CPUs
-        * from acknowledging the same interrupt.
-        */
-       cpumask_clear(irq_default_affinity);
-       cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
-
 #endif
 
        armada_370_xp_msi_init(node, main_int_res.start);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to