From: David Mercado <david.merc...@windriver.com>

Introduce the use of "slow" bus locking to allow use of Linux RPC
mechanism, remove unnecessary memory barriers, etc.

Signed-off-by: David Mercado <david.merc...@windriver.com>
---
 arch/arm/mach-axxia/axxia-gic.c | 1109 ++++++++++++++++++++-------------------
 1 file changed, 563 insertions(+), 546 deletions(-)

diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c
index 8a8e8eb..701d65f 100644
--- a/arch/arm/mach-axxia/axxia-gic.c
+++ b/arch/arm/mach-axxia/axxia-gic.c
@@ -3,7 +3,7 @@
  *
  *  Cloned from linux/arch/arm/common/gic.c
  *
- *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *  Copyright (C) 2013 LSI Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -33,24 +33,11 @@
  * registers are banked per-cpu for these sources.
  */
 
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
 #include <linux/module.h>
-#include <linux/list.h>
-#include <linux/smp.h>
-#include <linux/cpu_pm.h>
-#include <linux/cpumask.h>
 #include <linux/io.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/percpu.h>
-#include <linux/slab.h>
+#include <linux/cpu_pm.h>
 
-#include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
 #include <asm/mach/irq.h>
@@ -87,13 +74,7 @@ enum axxia_ext_ipi_num {
        MAX_AXM_IPI_NUM
 };
 
-/*
- * Some big arbritary number that won't ever conflict with
- * the IPI numbers defined in arch/arm/kernel/smp.c
- */
-#define AXXIA_RPC 0xff
-
-/* RPC Message types. */
+/* MUX Message types. */
 enum axxia_mux_msg_type {
        MUX_MSG_CALL_FUNC = 0,
        MUX_MSG_CALL_FUNC_SINGLE,
@@ -113,11 +94,6 @@ static void muxed_ipi_message_pass(const struct cpumask 
*mask,
        struct axxia_mux_msg *info;
        int cpu;
 
-       /*
-        * Order previous accesses before accesses in the IPI handler.
-        */
-       dmb();
-
        for_each_cpu(cpu, mask) {
                info = &per_cpu(ipi_mux_msg, cpu_logical_map(cpu));
                info->msg |= 1 << ipi_num;
@@ -129,8 +105,6 @@ static void axxia_ipi_demux(struct pt_regs *regs)
        struct axxia_mux_msg *info = &__get_cpu_var(ipi_mux_msg);
        u32 all;
 
-       mb();
-
        do {
                all = xchg(&info->msg, 0);
                if (all & (1 << MUX_MSG_CALL_FUNC))
@@ -153,19 +127,54 @@ struct gic_chip_data {
        union gic_base dist_base;
        union gic_base cpu_base;
 #ifdef CONFIG_CPU_PM
-       u32 saved_spi_enable[DIV_ROUND_UP(MAX_GIC_INTERRUPTS, 32)];
-       u32 saved_spi_conf[DIV_ROUND_UP(MAX_GIC_INTERRUPTS, 16)];
-       u32 saved_spi_target[DIV_ROUND_UP(MAX_GIC_INTERRUPTS, 4)];
-       u32 __percpu *saved_ppi_enable;
-       u32 __percpu *saved_ppi_conf;
+       u32 saved_spi_enable[DIV_ROUND_UP(MAX_GIC_INTERRUPTS, 32)]
+                           [MAX_NUM_CLUSTERS];
+       u32 saved_spi_conf[DIV_ROUND_UP(MAX_GIC_INTERRUPTS, 16)]
+                         [MAX_NUM_CLUSTERS];
+       u32 saved_spi_target[DIV_ROUND_UP(MAX_GIC_INTERRUPTS, 4)]
+                           [MAX_NUM_CLUSTERS];
+       u32 __percpu *saved_ppi_enable[MAX_NUM_CLUSTERS];
+       u32 __percpu *saved_ppi_conf[MAX_NUM_CLUSTERS];
 #endif
        struct irq_domain *domain;
        unsigned int gic_irqs;
 };
 
+enum gic_rpc_func_mask {
+       IRQ_MASK = 0x01,
+       IRQ_UNMASK = 0x02,
+       SET_TYPE = 0x04,
+       SET_AFFINITY = 0x08,
+       CLR_AFFINITY = 0x10,
+       GIC_NOTIFIER = 0x20,
+       MAX_GIC_FUNC_MASK
+};
+
+
+#ifdef CONFIG_CPU_PM
+struct gic_notifier_data {
+       struct notifier_block *self;
+       unsigned long cmd;
+       void *v;
+};
+#endif
+
+struct gic_rpc_data {
+       struct irq_data *d;
+       u32 func_mask;
+       u32 cpu, oldcpu;
+       u32 type;
+       const struct cpumask *mask_val;
+#ifdef CONFIG_CPU_PM
+       struct gic_notifier_data gn_data;
+#endif
+};
+
 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+static DEFINE_MUTEX(irq_bus_lock);
 
-static struct gic_chip_data gic_data[MAX_NUM_CLUSTERS] __read_mostly;
+static struct gic_chip_data gic_data __read_mostly;
+static struct gic_rpc_data gic_rpc_data = {NULL, 0, 0, 0, 0, NULL};
 
 #define gic_data_dist_base(d)  ((d)->dist_base.common_base)
 #define gic_data_cpu_base(d)   ((d)->cpu_base.common_base)
@@ -188,88 +197,88 @@ static inline unsigned int gic_irq(struct irq_data *d)
        return d->hwirq;
 }
 
-typedef void axxia_call_func_t(void *info);
-
-struct axxia_gic_rpc {
-       int cpu;
-       axxia_call_func_t *func;
-       void *info;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct axxia_gic_rpc, axxia_gic_rpc);
-
-void axxia_gic_handle_gic_rpc(void)
-{
-       u32 this_cpu = cpu_logical_map(smp_processor_id());
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-       {
-               struct axxia_gic_rpc *slot = &per_cpu(axxia_gic_rpc, cpu);
-               if (slot->func && slot->cpu == this_cpu) {
-                       slot->func(slot->info);
-                       slot->func = NULL;
-                       dmb();
-               }
-       }
-}
-
-static void axxia_gic_handle_gic_rpc_ipi(void)
-{
-       irq_enter();
-       axxia_gic_handle_gic_rpc();
-       irq_exit();
-}
-
-static void axxia_gic_run_gic_rpc(int cpu, axxia_call_func_t *func, void *info)
-{
-       struct axxia_gic_rpc *slot = &__get_cpu_var(axxia_gic_rpc);
-       int timeout;
-
-       /* If the target CPU isn't online, don't bother. */
-       if (!cpu_online(cpu))
-               return;
-
-       slot->cpu = cpu;
-       slot->info = info;
-       dsb();
-       slot->func = func;
-
-       /* Make visible before sending the IPI. */
-       dmb();
-
-       /* Send the IPI. */
-       axxia_gic_raise_softirq(cpumask_of(cpu), AXXIA_RPC);
-
-       timeout = 1000000;
-       while (slot->func && --timeout > 0) {
-               axxia_gic_handle_gic_rpc(); /* Execute other CPU requests */
-               cpu_relax();
-       }
-
-       /* We should never hit this! */
-       BUG_ON(timeout == 0);
-}
+/*
+ * This GIC driver implements IRQ management routines (e.g., gic_mask_irq,
+ * etc.) that work across multiple clusters. Since a core cannot directly
+ * manipulate GIC registers on another cluster, the Linux RPC mechanism
+ * (smp_call_function_single) is used to remotely execute these IRQ management
+ * routines. However, the IRQ management routines are invoked in thread
+ * context (interrupts disabled on the local core), and for this reason,
+ * smp_call_function_single() cannot be used directly.
+ *
+ * The Linux interrupt code has a mechanism, which is called bus lock/unlock,
+ * which was created for irq chips hanging off slow busses like i2c/spi. The
+ * bus lock is mutex that is used to serialize bus accesses. We take advantage
+ * of this feature here, because we can think of IRQ management routines having
+ * to remotely execute on other clusters as a "slow bus" action. Doing this
+ * here serializes all IRQ management interfaces and guarantees that different
+ * callers cannot interfere.
+ *
+ * So the way this works is as follows:
+ *
+ * ==> Start IRQ management action
+ * chip->bus_lock()                    <== Mutex is taken
+ * raw_spin_lock_irq(&irqdesc->lock)   <== Interrupts disabled on local core
+ * chip->(GIC IRQ management routine)  <== IRQ mgmt routine is executed. If
+ *                                         the intended target core is on the
+ *                                         the same core, then the work is
+ *                                         done here. If the target core is on
+ *                                         another cluster, then a global
+ *                                         structure (gic_rpc_data) is filled
+ *                                         in to pass along to a remote routine
+ *                                         to execute, and no work is done yet.
+ * raw_spin_unlock_irq(&irqdesc->lock) <== Interrupts are re-enabled
+ * chip->bus_unlock()                  <== If the gic_rpc_data global was
+ *                                         filled in, then the specified
+ *                                         remote routine is executed via
+ *                                         smp_call_function_single(). The
+ *                                         mutex is then given. Note that
+ *                                         here, IRQs are already re-enabled,
+ *                                         so its safe to use the RPC here.
+ * <== End IRQ management action
+ *
+ * The gic_rpc_data global is filled in by the chip callback routines (e.g.,
+ * gic_mask_irq, gic_set_type, etc.). The bus lock/unlock routines are
+ * implemented as gic_irq_lock() and gic_irq_sync_unlock() respectively.
+ *
+ */
 
 /*
  * Routines to acknowledge, disable and enable interrupts.
  */
-struct gic_mask_irq_wrapper_struct {
-       struct irq_data *d;
-};
 
-static void _gic_mask_irq(void *arg)
+static void _gic_mask_irq(struct irq_data *d, bool do_mask)
 {
-       struct irq_data *d = (struct irq_data *)arg;
        u32 mask = 1 << (gic_irq(d) % 32);
 
        raw_spin_lock(&irq_controller_lock);
-       writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR
-                               + (gic_irq(d) / 32) * 4);
+       if (do_mask)
+               writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR
+                                       + (gic_irq(d) / 32) * 4);
+       else
+               writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET
+                                       + (gic_irq(d) / 32) * 4);
        raw_spin_unlock(&irq_controller_lock);
 }
 
-static void gic_mask_irq(struct irq_data *d)
+/*
+ * Functions called by smp_call_function_single() must take the form:
+ *
+ * static void foo(void *)
+ *
+ */
+static void gic_mask_remote(void *info)
+{
+       struct irq_data *d = (struct irq_data *)info;
+       _gic_mask_irq(d, 1);
+}
+static void gic_unmask_remote(void *info)
+{
+       struct irq_data *d = (struct irq_data *)info;
+       _gic_mask_irq(d, 0);
+}
+
+static void gic_mask_unmask(struct irq_data *d, bool do_mask)
 {
        u32 pcpu = cpu_logical_map(smp_processor_id());
        u32 irqid = gic_irq(d);
@@ -289,69 +298,37 @@ static void gic_mask_irq(struct irq_data *d)
 
        /* Deal with PPI interrupts directly. */
        if ((irqid > 16) && (irqid < 32)) {
-               _gic_mask_irq(d);
+               _gic_mask_irq(d, do_mask);
                return;
        }
 
        /*
         * If the cpu that this interrupt is assigned to falls within
         * the same cluster as the cpu we're currently running on, do
-        * the IRQ masking directly. Otherwise, use the IPI mechanism
+        * the IRQ [un]masking directly. Otherwise, use the RPC mechanism
         * to remotely do the masking.
         */
        if ((irq_cpuid[irqid] / CORES_PER_CLUSTER) ==
-               (pcpu / CORES_PER_CLUSTER))
-               _gic_mask_irq(d);
-       else
-               axxia_gic_run_gic_rpc(irq_cpuid[irqid], _gic_mask_irq, d);
+               (pcpu / CORES_PER_CLUSTER)) {
+               _gic_mask_irq(d, do_mask);
+       } else {
+               if (do_mask)
+                       gic_rpc_data.func_mask |= IRQ_MASK;
+               else
+                       gic_rpc_data.func_mask |= IRQ_UNMASK;
+               gic_rpc_data.cpu = irq_cpuid[irqid];
+               gic_rpc_data.d = d;
+       }
 }
 
-static void _gic_unmask_irq(void *arg)
+static void gic_mask_irq(struct irq_data *d)
 {
-       struct irq_data *d = (struct irq_data *)arg;
-       u32 mask = 1 << (gic_irq(d) % 32);
-
-       raw_spin_lock(&irq_controller_lock);
-       writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET
-                               + (gic_irq(d) / 32) * 4);
-       raw_spin_unlock(&irq_controller_lock);
+       gic_mask_unmask(d, true);
 }
 
 static void gic_unmask_irq(struct irq_data *d)
 {
-       u32 pcpu = cpu_logical_map(smp_processor_id());
-       u32 irqid = gic_irq(d);
-
-       BUG_ON(!irqs_disabled());
-
-       if (irqid >= MAX_GIC_INTERRUPTS)
-               return;
-
-       /* Don't mess with the AXM IPIs. */
-       if ((irqid >= IPI0_CPU0) && (irqid < MAX_AXM_IPI_NUM))
-               return;
-
-       /* Don't mess with the PMU IRQ either. */
-       if (irqid == IRQ_PMU)
-               return;
-
-       /* Deal with PPI interrupts directly. */
-       if ((irqid > 15) && (irqid < 32)) {
-               _gic_unmask_irq(d);
-               return;
-       }
-
-       /*
-        * If the cpu that this interrupt is assigned to falls within
-        * the same cluster as the cpu we're currently running on, do
-        * the IRQ masking directly. Otherwise, use the IPI mechanism
-        * to remotely do the masking.
-        */
-       if ((irq_cpuid[irqid] / CORES_PER_CLUSTER) ==
-               (pcpu / CORES_PER_CLUSTER))
-               _gic_unmask_irq(d);
-       else
-               axxia_gic_run_gic_rpc(irq_cpuid[irqid], _gic_unmask_irq, d);
+       gic_mask_unmask(d, false);
 }
 
 static void gic_eoi_irq(struct irq_data *d)
@@ -384,8 +361,9 @@ static int _gic_set_type(struct irq_data *d, unsigned int 
type)
                val |= confmask;
 
        /*
-        * As recommended by the spec, disable the interrupt before changing
-        * the configuration.
+        * As recommended by the ARM GIC architecture spec, disable the
+        * interrupt before changing the configuration. We cannot rely
+        * on IRQCHIP_SET_TYPE_MASKED behavior for this.
         */
        if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff)
                          & enablemask) {
@@ -402,34 +380,24 @@ static int _gic_set_type(struct irq_data *d, unsigned int 
type)
 
        raw_spin_unlock(&irq_controller_lock);
 
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
-#ifdef CONFIG_SMP
-struct gic_set_type_wrapper_struct {
-       struct irq_data *d;
-       unsigned int type;
-       int status;
-};
-
-static void gic_set_type_wrapper(void *data)
+/*
+ * Functions called by smp_call_function_single() must take the form:
+ *
+ * static void foo(void *)
+ *
+ */
+static void gic_set_type_remote(void *info)
 {
-       struct gic_set_type_wrapper_struct *args =
-               (struct gic_set_type_wrapper_struct *)data;
-
-       args->status = _gic_set_type(args->d, args->type);
-       dmb();
+       struct gic_rpc_data *rpc = (struct gic_rpc_data *)info;
+       _gic_set_type(rpc->d, rpc->type);
 }
-#endif
 
 static int gic_set_type(struct irq_data *d, unsigned int type)
 {
-       int i, j, cpu;
-       int nr_cluster_ids = ((nr_cpu_ids - 1) / CORES_PER_CLUSTER) + 1;
        unsigned int gicirq = gic_irq(d);
-       u32 pcpu = cpu_logical_map(smp_processor_id());
-       struct gic_set_type_wrapper_struct data;
-       int ret;
 
        BUG_ON(!irqs_disabled());
 
@@ -447,32 +415,16 @@ static int gic_set_type(struct irq_data *d, unsigned int 
type)
 
        /*
         * Duplicate IRQ type settings across all clusters. Run
-        * directly for this cluster, use IPI for all others.
+        * directly for this cluster, use RPC for all others.
         */
-       ret = _gic_set_type(d, type);
-       data.d = d;
-       data.type = type;
-       for (i = 0; i < nr_cluster_ids; i++) {
-               if (i == (pcpu / CORES_PER_CLUSTER))
-                       continue;
+       _gic_set_type(d, type);
 
-               /*
-                * Have some core in each cluster execute this,
-                * Start with the first core on that cluster.
-                */
-               cpu = i * CORES_PER_CLUSTER;
-               for (j = cpu; j < cpu + CORES_PER_CLUSTER; j++) {
-                       if (cpu_online(j)) {
-                               axxia_gic_run_gic_rpc(j, gic_set_type_wrapper,
-                                                     &data);
-                               if (data.status != 0)
-                                       pr_err("IRQ set type error for cpu%d\n",
-                                              j);
-                               break;
-                       }
-               }
-       }
-       return ret;
+       gic_rpc_data.d = d;
+       gic_rpc_data.func_mask |= SET_TYPE;
+       gic_rpc_data.cpu = cpu_logical_map(smp_processor_id());
+       gic_rpc_data.type = type;
+
+       return IRQ_SET_MASK_OK;
 }
 
 static int gic_retrigger(struct irq_data *d)
@@ -480,49 +432,60 @@ static int gic_retrigger(struct irq_data *d)
        return -ENXIO;
 }
 
-#ifdef CONFIG_SMP
-
-/* Mechanism for forwarding IRQ affinity requests to other clusters. */
-struct gic_set_affinity_wrapper_struct {
-       struct irq_data *d;
-       const struct cpumask *mask_val;
-       bool disable;
-};
-
-static void _gic_set_affinity(void *data)
+static int _gic_set_affinity(struct irq_data *d,
+                            const struct cpumask *mask_val,
+                            bool do_clear)
 {
-       struct gic_set_affinity_wrapper_struct *args =
-               (struct gic_set_affinity_wrapper_struct *)data;
-       void __iomem *reg  = gic_dist_base(args->d) +
-                            GIC_DIST_TARGET + (gic_irq(args->d) & ~3);
-       unsigned int shift = (gic_irq(args->d) % 4) * 8;
-       unsigned int cpu = cpumask_any_and(args->mask_val, cpu_online_mask);
-       u32 val, affinity_mask, affinity_bit;
+       void __iomem *reg  = gic_dist_base(d) + GIC_DIST_TARGET +
+                            (gic_irq(d) & ~3);
+       unsigned int shift = (gic_irq(d) % 4) * 8;
+       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       u32 val, mask, bit;
        u32 enable_mask, enable_offset;
 
        /*
         * Normalize the cpu number as seen by Linux (0-15) to a
         * number as seen by a cluster (0-3).
         */
-       affinity_bit = 1 << ((cpu_logical_map(cpu) % CORES_PER_CLUSTER) +
-                               shift);
-       affinity_mask = 0xff << shift;
+       bit = 1 << ((cpu_logical_map(cpu) % CORES_PER_CLUSTER) + shift);
+       mask = 0xff << shift;
 
-       enable_mask = 1 << (gic_irq(args->d) % 32);
-       enable_offset = 4 * (gic_irq(args->d) / 32);
+       enable_mask = 1 << (gic_irq(d) % 32);
+       enable_offset = 4 * (gic_irq(d) / 32);
 
        raw_spin_lock(&irq_controller_lock);
-       val = readl_relaxed(reg) & ~affinity_mask;
-       if (args->disable == true) {
+       val = readl_relaxed(reg) & ~mask;
+       if (do_clear == true) {
+               /* Clear affinity, mask IRQ. */
                writel_relaxed(val, reg);
-               writel_relaxed(enable_mask, gic_data_dist_base(&gic_data[0])
+               writel_relaxed(enable_mask, gic_data_dist_base(&gic_data)
                                + GIC_DIST_ENABLE_CLEAR + enable_offset);
        } else {
-               writel_relaxed(val | affinity_bit, reg);
-               writel_relaxed(enable_mask, gic_data_dist_base(&gic_data[0])
+               /* Set affinity, unmask IRQ. */
+               writel_relaxed(val | bit, reg);
+               writel_relaxed(enable_mask, gic_data_dist_base(&gic_data)
                                + GIC_DIST_ENABLE_SET + enable_offset);
        }
        raw_spin_unlock(&irq_controller_lock);
+
+       return IRQ_SET_MASK_OK;
+}
+
+/*
+ * Functions called by smp_call_function_single() must take the form:
+ *
+ * static void foo(void *)
+ *
+ */
+static void gic_set_affinity_remote(void *info)
+{
+       struct gic_rpc_data *rpc = (struct gic_rpc_data *)info;
+       _gic_set_affinity(rpc->d, rpc->mask_val, false);
+}
+static void gic_clr_affinity_remote(void *info)
+{
+       struct gic_rpc_data *rpc = (struct gic_rpc_data *)info;
+       _gic_set_affinity(rpc->d, rpc->mask_val, true);
 }
 
 static int gic_set_affinity(struct irq_data *d,
@@ -532,7 +495,6 @@ static int gic_set_affinity(struct irq_data *d,
        unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
        u32 pcpu = cpu_logical_map(smp_processor_id());
        unsigned int irqid = gic_irq(d);
-       struct gic_set_affinity_wrapper_struct data;
 
        BUG_ON(!irqs_disabled());
 
@@ -556,63 +518,394 @@ static int gic_set_affinity(struct irq_data *d,
        /*
         * If the new physical cpu assignment falls within the same
         * cluster as the cpu we're currently running on, set the IRQ
-        * affinity directly. Otherwise, use the IPI mechanism.
+        * affinity directly. Otherwise, use the RPC mechanism.
         */
-       data.d = d;
-       data.mask_val = mask_val;
-       data.disable = false;
-
        if ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) ==
-               (pcpu / CORES_PER_CLUSTER))
-               _gic_set_affinity(&data);
-       else
-               axxia_gic_run_gic_rpc(cpu, _gic_set_affinity, &data);
+               (pcpu / CORES_PER_CLUSTER)) {
+               _gic_set_affinity(d, mask_val, false);
+       } else {
+               gic_rpc_data.func_mask |= SET_AFFINITY;
+               gic_rpc_data.cpu = cpu;
+               gic_rpc_data.d = d;
+               gic_rpc_data.mask_val = mask_val;
+       }
 
        /*
         * If the new physical cpu assignment is on a cluster that's
-        * different than the prior cluster, remove the IRQ affinity
+        * different than the prior cluster, clear the IRQ affinity
         * on the old cluster.
         */
        if ((irqid != IRQ_PMU) && ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) !=
                (irq_cpuid[irqid] / CORES_PER_CLUSTER))) {
                /*
                 * If old cpu assignment falls within the same cluster as
-                * the cpu we're currently running on, set the IRQ affinity
-                * directly. Otherwise, use IPI mechanism.
+                * the cpu we're currently running on, clear the IRQ affinity
+                * directly. Otherwise, use RPC mechanism.
                 */
-               data.disable = true;
                if ((irq_cpuid[irqid] / CORES_PER_CLUSTER) ==
-                       (pcpu / CORES_PER_CLUSTER))
-                       _gic_set_affinity(&data);
-               else
-                       axxia_gic_run_gic_rpc(irq_cpuid[irqid],
-                                             _gic_set_affinity,
-                                             &data);
+                       (pcpu / CORES_PER_CLUSTER)) {
+                       _gic_set_affinity(d, mask_val, true);
+               } else {
+                       gic_rpc_data.func_mask |= CLR_AFFINITY;
+                       gic_rpc_data.oldcpu = irq_cpuid[irqid];
+                       gic_rpc_data.d = d;
+                       gic_rpc_data.mask_val = mask_val;
+               }
+       }
+
+       /* Update Axxia IRQ affinity table with the new physical CPU number. */
+       irq_cpuid[irqid] = cpu_logical_map(cpu);
+
+       return IRQ_SET_MASK_OK;
+}
+
+#ifdef CONFIG_PM
+static int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+       int ret = -ENXIO;
+
+       return ret;
+}
+
+#else
+#define gic_set_wake   NULL
+#endif
+
+#ifdef CONFIG_CPU_PM
+
+static u32 get_cluster_id(void)
+{
+       u32 mpidr, cluster;
+
+       mpidr = read_cpuid_mpidr();
+       cluster = (mpidr >> 8) & 0xFF;
+
+       /*
+        * Cluster ID should always be between 0 and 3.
+        * Anything else, return 0.
+        */
+       if (cluster >= MAX_NUM_CLUSTERS)
+               cluster = 0;
+
+       return cluster;
+}
+
+/*
+ * Saves the GIC distributor registers during suspend or idle.  Must be called
+ * with interrupts disabled but before powering down the GIC.  After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+static void gic_dist_save(void)
+{
+       unsigned int gic_irqs;
+       void __iomem *dist_base;
+       int i;
+       u32 this_cluster;
+
+       this_cluster = get_cluster_id();
+
+       gic_irqs = gic_data.gic_irqs;
+       dist_base = gic_data_dist_base(&gic_data);
+
+       if (!dist_base)
+               return;
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+               gic_data.saved_spi_conf[i][this_cluster] =
+                       readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+               gic_data.saved_spi_target[i][this_cluster] =
+                       readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+               gic_data.saved_spi_enable[i][this_cluster] =
+                       readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle.  Must be called before enabling interrupts.  If a level interrupt
+ * that occured while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occured will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+static void gic_dist_restore(void)
+{
+       unsigned int gic_irqs;
+       unsigned int i;
+       void __iomem *dist_base;
+       u32 this_cluster;
+
+       this_cluster = get_cluster_id();
+
+       gic_irqs = gic_data.gic_irqs;
+       dist_base = gic_data_dist_base(&gic_data);
+
+       if (!dist_base)
+               return;
+
+       writel_relaxed(0, dist_base + GIC_DIST_CTRL);
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+               writel_relaxed(gic_data.saved_spi_conf[i][this_cluster],
+                       dist_base + GIC_DIST_CONFIG + i * 4);
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+               writel_relaxed(0xa0a0a0a0,
+                       dist_base + GIC_DIST_PRI + i * 4);
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+               writel_relaxed(gic_data.saved_spi_target[i][this_cluster],
+                       dist_base + GIC_DIST_TARGET + i * 4);
+
+       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+               writel_relaxed(gic_data.saved_spi_enable[i][this_cluster],
+                       dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+       writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+}
+
+static void gic_cpu_save(void)
+{
+       int i;
+       u32 *ptr;
+       void __iomem *dist_base;
+       void __iomem *cpu_base;
+       u32 this_cluster;
+
+       this_cluster = get_cluster_id();
+
+       dist_base = gic_data_dist_base(&gic_data);
+       cpu_base = gic_data_cpu_base(&gic_data);
+
+       if (!dist_base || !cpu_base)
+               return;
+
+       ptr = __this_cpu_ptr(gic_data.saved_ppi_enable[this_cluster]);
+       for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+               ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+       ptr = __this_cpu_ptr(gic_data.saved_ppi_conf[this_cluster]);
+       for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+               ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+}
+
+static void gic_cpu_restore(void)
+{
+       int i;
+       u32 *ptr;
+       void __iomem *dist_base;
+       void __iomem *cpu_base;
+       u32 this_cluster;
+
+       this_cluster = get_cluster_id();
+
+       dist_base = gic_data_dist_base(&gic_data);
+       cpu_base = gic_data_cpu_base(&gic_data);
+
+       if (!dist_base || !cpu_base)
+               return;
+
+       ptr = __this_cpu_ptr(gic_data.saved_ppi_enable[this_cluster]);
+       for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+               writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+       ptr = __this_cpu_ptr(gic_data.saved_ppi_conf[this_cluster]);
+       for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+               writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+       for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+               writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
+
+       writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
+       writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+}
+
+static int _gic_notifier(struct notifier_block *self,
+                        unsigned long cmd, void *v)
+{
+       switch (cmd) {
+       case CPU_PM_ENTER:
+               gic_cpu_save();
+               break;
+       case CPU_PM_ENTER_FAILED:
+       case CPU_PM_EXIT:
+               gic_cpu_restore();
+               break;
+       case CPU_CLUSTER_PM_ENTER:
+               gic_dist_save();
+               break;
+       case CPU_CLUSTER_PM_ENTER_FAILED:
+       case CPU_CLUSTER_PM_EXIT:
+               gic_dist_restore();
+               break;
        }
 
-       /* Update Axxia IRQ affinity table with the new physical CPU number. */
-       irq_cpuid[irqid] = cpu_logical_map(cpu);
+       return NOTIFY_OK;
+}
+
+/* Mechanism for forwarding PM events to other clusters. */
+struct gic_notifier_wrapper_struct {
+       struct notifier_block *self;
+       unsigned long cmd;
+       void *v;
+};
+
+/*
+ * Functions called by smp_call_function_single() must take the form:
+ *
+ * static void foo(void *)
+ *
+ */
+static void gic_notifier_remote(void *info)
+{
+       struct gic_rpc_data *rpc = (struct gic_rpc_data *)info;
+
+       _gic_notifier(rpc->gn_data.self, rpc->gn_data.cmd, rpc->gn_data.v);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd,        
void *v)
+{
+       /* Execute on this cluster. */
+       _gic_notifier(self, cmd, v);
+
+       /* Use RPC mechanism to execute this at other clusters. */
+       gic_rpc_data.func_mask |= GIC_NOTIFIER;
+       gic_rpc_data.cpu = cpu_logical_map(smp_processor_id());
+       gic_rpc_data.gn_data.self = self;
+       gic_rpc_data.gn_data.cmd = cmd;
+       gic_rpc_data.gn_data.v = v;
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+       .notifier_call = gic_notifier,
+};
+
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+       int i;
+
+       for (i = 0; i < MAX_NUM_CLUSTERS; i++) {
+               gic->saved_ppi_enable[i] =
+                       __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, sizeof(u32));
+               BUG_ON(!gic->saved_ppi_enable[i]);
+
+               gic->saved_ppi_conf[i] =
+                       __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, sizeof(u32));
+               BUG_ON(!gic->saved_ppi_conf[i]);
+       }
+
+       if (gic == &gic_data)
+               cpu_pm_register_notifier(&gic_notifier_block);
+}
+#else
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+}
+#endif /* CONFIG_CPU_PM */
+
+/*
+ * GIC bus lock/unlock routines.
+ */
 
-       return IRQ_SET_MASK_OK;
+static void gic_irq_lock(struct irq_data *d)
+{
+       /* Take the bus lock. */
+       mutex_lock(&irq_bus_lock);
 }
-#endif /* SMP */
 
-#ifdef CONFIG_PM
-static int gic_set_wake(struct irq_data *d, unsigned int on)
+static void gic_irq_sync_unlock(struct irq_data *d)
 {
-       int ret = -ENXIO;
+       int i, j, cpu;
+       int nr_cluster_ids = ((nr_cpu_ids - 1) / CORES_PER_CLUSTER) + 1;
 
-       return ret;
-}
+       if (gic_rpc_data.func_mask & IRQ_MASK) {
+               smp_call_function_single(gic_rpc_data.cpu,
+                                        gic_mask_remote,
+                                        d, 1);
+       }
 
-#else
-#define gic_set_wake   NULL
+       if (gic_rpc_data.func_mask & IRQ_UNMASK) {
+               smp_call_function_single(gic_rpc_data.cpu,
+                                        gic_unmask_remote,
+                                        d, 1);
+       }
+
+       if (gic_rpc_data.func_mask & SET_TYPE) {
+               for (i = 0; i < nr_cluster_ids; i++) {
+
+                       /* No need to run on local cluster. */
+                       if (i == (gic_rpc_data.cpu / CORES_PER_CLUSTER))
+                               continue;
+
+                       /*
+                        * Have some core in each cluster execute this,
+                        * Start with the first core on that cluster.
+                        */
+                       cpu = i * CORES_PER_CLUSTER;
+                       for (j = cpu; j < cpu + CORES_PER_CLUSTER; j++) {
+                               if (cpu_online(j)) {
+                                       smp_call_function_single(j,
+                                                       gic_set_type_remote,
+                                                       &gic_rpc_data, 1);
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       if (gic_rpc_data.func_mask & SET_AFFINITY) {
+               smp_call_function_single(gic_rpc_data.cpu,
+                                        gic_set_affinity_remote,
+                                        &gic_rpc_data, 1);
+       }
+
+       if (gic_rpc_data.func_mask & CLR_AFFINITY) {
+               smp_call_function_single(gic_rpc_data.oldcpu,
+                                        gic_clr_affinity_remote,
+                                        &gic_rpc_data, 1);
+       }
+
+#ifdef CONFIG_CPU_PM
+       if (gic_rpc_data.func_mask & GIC_NOTIFIER) {
+               for (i = 0; i < nr_cluster_ids; i++) {
+                       /* No need to run on local cluster. */
+                       if (i == (gic_rpc_data.cpu / CORES_PER_CLUSTER))
+                               continue;
+
+                       /*
+                        * Have some core in each cluster execute this,
+                        * Start with the first core on that cluster.
+                        */
+                       cpu = i * CORES_PER_CLUSTER;
+                       for (j = cpu; j < cpu + CORES_PER_CLUSTER; j++) {
+                               if (cpu_online(j)) {
+                                       smp_call_function_single(j,
+                                                       gic_notifier_remote,
+                                                       &gic_rpc_data, 1);
+                                       break;
+                               }
+                       }
+               }
+       }
 #endif
 
+       /* Reset RPC data. */
+       gic_rpc_data.func_mask = 0;
+
+       /* Give the bus lock. */
+       mutex_unlock(&irq_bus_lock);
+}
+
 asmlinkage void __exception_irq_entry axxia_gic_handle_irq(struct pt_regs 
*regs)
 {
        u32 irqstat, irqnr;
-       struct gic_chip_data *gic = &gic_data[0]; /* OK to always use 0 */
+       struct gic_chip_data *gic = &gic_data;
        void __iomem *cpu_base = gic_data_cpu_base(gic);
 
        do {
@@ -635,7 +928,7 @@ asmlinkage void __exception_irq_entry 
axxia_gic_handle_irq(struct pt_regs *regs)
                         *             IPI_CALL_FUNC_SINGLE (5) |
                         *             IPI_CPU_STOP (6) |
                         *             IPI_WAKEUP (1)
-                        * IPI3_CPUx = AXXIA_RPC (0xff)
+                        * IPI3_CPUx = Not Used
                         *
                         * Note that if the ipi_msg_type enum changes in
                         * arch/arm/kernel/smp.c then this will have to be
@@ -670,8 +963,8 @@ asmlinkage void __exception_irq_entry 
axxia_gic_handle_irq(struct pt_regs *regs)
                        case IPI3_CPU1:
                        case IPI3_CPU2:
                        case IPI3_CPU3:
+                               /* Not currently used */
                                writel_relaxed(irqnr, cpu_base + GIC_CPU_EOI);
-                               axxia_gic_handle_gic_rpc_ipi();
                                break;
 
                        default:
@@ -683,9 +976,7 @@ asmlinkage void __exception_irq_entry 
axxia_gic_handle_irq(struct pt_regs *regs)
                }
                if (irqnr < 16) {
                        writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
-#ifdef CONFIG_SMP
                        handle_IPI(irqnr, regs);
-#endif
                        continue;
                }
                break;
@@ -694,14 +985,14 @@ asmlinkage void __exception_irq_entry 
axxia_gic_handle_irq(struct pt_regs *regs)
 
 static struct irq_chip gic_chip = {
        .name                   = "GIC",
+       .irq_bus_lock           = gic_irq_lock,
+       .irq_bus_sync_unlock    = gic_irq_sync_unlock,
        .irq_mask               = gic_mask_irq,
        .irq_unmask             = gic_unmask_irq,
        .irq_eoi                = gic_eoi_irq,
        .irq_set_type           = gic_set_type,
        .irq_retrigger          = gic_retrigger,
-#ifdef CONFIG_SMP
        .irq_set_affinity       = gic_set_affinity,
-#endif
        .irq_set_wake           = gic_set_wake,
 };
 
@@ -815,8 +1106,7 @@ static void __cpuinit gic_dist_init(struct gic_chip_data 
*gic)
         */
        enablemask = 1 << (IRQ_PMU % 32);
        enableoff = (IRQ_PMU / 32) * 4;
-       writel_relaxed(enablemask,
-                      base + GIC_DIST_ENABLE_SET + enableoff);
+       writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
 
        writel_relaxed(1, base + GIC_DIST_CTRL);
 }
@@ -845,254 +1135,6 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data 
*gic)
        writel_relaxed(1, base + GIC_CPU_CTRL);
 }
 
-#ifdef CONFIG_CPU_PM
-
-static u32 get_cluster_id(void)
-{
-       u32 mpidr, cluster;
-
-       mpidr = read_cpuid_mpidr();
-       cluster = (mpidr >> 8) & 0xFF;
-
-       /* Cluster ID should always be between 0 and 3. */
-       if (cluster >= MAX_NUM_CLUSTERS)
-               cluster = 0;
-
-       return cluster;
-}
-
-/*
- * Saves the GIC distributor registers during suspend or idle.  Must be called
- * with interrupts disabled but before powering down the GIC.  After calling
- * this function, no interrupts will be delivered by the GIC, and another
- * platform-specific wakeup source must be enabled.
- */
-static void gic_dist_save(void)
-{
-       unsigned int gic_irqs;
-       void __iomem *dist_base;
-       int i;
-       u32 this_cluster;
-
-       this_cluster = get_cluster_id();
-
-       gic_irqs = gic_data[this_cluster].gic_irqs;
-       dist_base = gic_data_dist_base(&gic_data[this_cluster]);
-
-       if (!dist_base)
-               return;
-
-       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
-               gic_data[this_cluster].saved_spi_conf[i] =
-                       readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
-
-       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
-               gic_data[this_cluster].saved_spi_target[i] =
-                       readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
-
-       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
-               gic_data[this_cluster].saved_spi_enable[i] =
-                       readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
-}
-
-/*
- * Restores the GIC distributor registers during resume or when coming out of
- * idle.  Must be called before enabling interrupts.  If a level interrupt
- * that occured while the GIC was suspended is still present, it will be
- * handled normally, but any edge interrupts that occured will not be seen by
- * the GIC and need to be handled by the platform-specific wakeup source.
- */
-static void gic_dist_restore(void)
-{
-       unsigned int gic_irqs;
-       unsigned int i;
-       void __iomem *dist_base;
-       u32 this_cluster;
-
-       this_cluster = get_cluster_id();
-
-       gic_irqs = gic_data[this_cluster].gic_irqs;
-       dist_base = gic_data_dist_base(&gic_data[this_cluster]);
-
-       if (!dist_base)
-               return;
-
-       writel_relaxed(0, dist_base + GIC_DIST_CTRL);
-
-       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
-               writel_relaxed(gic_data[this_cluster].saved_spi_conf[i],
-                       dist_base + GIC_DIST_CONFIG + i * 4);
-
-       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
-               writel_relaxed(0xa0a0a0a0,
-                       dist_base + GIC_DIST_PRI + i * 4);
-
-       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
-               writel_relaxed(gic_data[this_cluster].saved_spi_target[i],
-                       dist_base + GIC_DIST_TARGET + i * 4);
-
-       for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
-               writel_relaxed(gic_data[this_cluster].saved_spi_enable[i],
-                       dist_base + GIC_DIST_ENABLE_SET + i * 4);
-
-       writel_relaxed(1, dist_base + GIC_DIST_CTRL);
-}
-
-static void gic_cpu_save(void)
-{
-       int i;
-       u32 *ptr;
-       void __iomem *dist_base;
-       void __iomem *cpu_base;
-       u32 this_cluster;
-
-       this_cluster = get_cluster_id();
-
-       dist_base = gic_data_dist_base(&gic_data[this_cluster]);
-       cpu_base = gic_data_cpu_base(&gic_data[this_cluster]);
-
-       if (!dist_base || !cpu_base)
-               return;
-
-       ptr = __this_cpu_ptr(gic_data[this_cluster].saved_ppi_enable);
-       for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
-               ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
-
-       ptr = __this_cpu_ptr(gic_data[this_cluster].saved_ppi_conf);
-       for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
-               ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
-
-}
-
-static void gic_cpu_restore(void)
-{
-       int i;
-       u32 *ptr;
-       void __iomem *dist_base;
-       void __iomem *cpu_base;
-       u32 this_cluster;
-
-       this_cluster = get_cluster_id();
-
-       dist_base = gic_data_dist_base(&gic_data[this_cluster]);
-       cpu_base = gic_data_cpu_base(&gic_data[this_cluster]);
-
-       if (!dist_base || !cpu_base)
-               return;
-
-       ptr = __this_cpu_ptr(gic_data[this_cluster].saved_ppi_enable);
-       for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
-               writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
-
-       ptr = __this_cpu_ptr(gic_data[this_cluster].saved_ppi_conf);
-       for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
-               writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
-
-       for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
-               writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
-
-       writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
-       writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
-}
-
-static int _gic_notifier(struct notifier_block *self,
-                        unsigned long cmd, void *v)
-{
-       switch (cmd) {
-       case CPU_PM_ENTER:
-               gic_cpu_save();
-               break;
-       case CPU_PM_ENTER_FAILED:
-       case CPU_PM_EXIT:
-               gic_cpu_restore();
-               break;
-       case CPU_CLUSTER_PM_ENTER:
-               gic_dist_save();
-               break;
-       case CPU_CLUSTER_PM_ENTER_FAILED:
-       case CPU_CLUSTER_PM_EXIT:
-               gic_dist_restore();
-               break;
-       }
-
-       return NOTIFY_OK;
-}
-
-/* Mechanism for forwarding PM events to other clusters. */
-struct gic_notifier_wrapper_struct {
-       struct notifier_block *self;
-       unsigned long cmd;
-       void *v;
-};
-
-static void gic_notifier_wrapper(void *data)
-{
-       struct gic_notifier_wrapper_struct *args =
-               (struct gic_notifier_wrapper_struct *)data;
-
-       _gic_notifier(args->self, args->cmd, args->v);
-}
-
-static int gic_notifier(struct notifier_block *self, unsigned long cmd,        
void *v)
-{
-       int i, j, cpu;
-       struct gic_notifier_wrapper_struct data;
-       int nr_cluster_ids = ((nr_cpu_ids-1) / CORES_PER_CLUSTER) + 1;
-       u32 pcpu = cpu_logical_map(smp_processor_id());
-
-       /* Use IPI mechanism to execute this at other clusters. */
-       data.self = self;
-       data.cmd = cmd;
-       data.v = v;
-       for (i = 0; i < nr_cluster_ids; i++) {
-               /* Skip the cluster we're already executing on - do last. */
-               if ((pcpu / CORES_PER_CLUSTER) == i)
-                       continue;
-
-               /*
-                * Have some core in each cluster execute this,
-                * Start with the first core on that cluster.
-                */
-               cpu = i * CORES_PER_CLUSTER;
-               for (j = cpu; j < cpu + CORES_PER_CLUSTER; j++) {
-                       if (cpu_online(j)) {
-                               axxia_gic_run_gic_rpc(j, gic_notifier_wrapper,
-                                                     &data);
-                               break;
-                       }
-               }
-       }
-
-       /* Execute on this cluster. */
-       _gic_notifier(self, cmd, v);
-
-       return NOTIFY_OK;
-       }
-
-static struct notifier_block gic_notifier_block = {
-       .notifier_call = gic_notifier,
-};
-
-static void __init gic_pm_init(struct gic_chip_data *gic)
-{
-       gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
-               sizeof(u32));
-       BUG_ON(!gic->saved_ppi_enable);
-
-       gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
-               sizeof(u32));
-       BUG_ON(!gic->saved_ppi_conf);
-
-       if (gic == &gic_data[0])
-               cpu_pm_register_notifier(&gic_notifier_block);
-}
-#else
-static void __init gic_pm_init(struct gic_chip_data *gic)
-{
-}
-#endif /* CONFIG_CPU_PM */
-
-#ifdef CONFIG_SMP
 void axxia_gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
        int cpu;
@@ -1159,10 +1201,6 @@ void axxia_gic_raise_softirq(const struct cpumask *mask, 
unsigned int irq)
                muxed_ipi_message_pass(mask, MUX_MSG_CPU_STOP);
                break;
 
-       case AXXIA_RPC:
-               regoffset += 0xC; /* Axxia IPI3 */
-               break;
-
        default:
                /* Unknown ARM IPI */
                pr_err("Unknown ARM IPI num (%d)!\n", irq);
@@ -1178,7 +1216,6 @@ void axxia_gic_raise_softirq(const struct cpumask *mask, 
unsigned int irq)
        /* Axxia chip uses external SPI interrupts for IPI functionality. */
        writel_relaxed(map, ipi_send_reg_base + regoffset);
 }
-#endif /* SMP */
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
                                irq_hw_number_t hw)
@@ -1233,16 +1270,13 @@ void __init axxia_gic_init_bases(int irq_start,
        irq_hw_number_t hwirq_base;
        struct gic_chip_data *gic;
        int gic_irqs, irq_base;
-       int i;
 
-       for (i = 0; i < MAX_NUM_CLUSTERS; i++) {
-               gic = &gic_data[i];
+       gic = &gic_data;
 
-               /* Normal, sane GIC... */
-               gic->dist_base.common_base = dist_base;
-               gic->cpu_base.common_base = cpu_base;
-               gic_set_base_accessor(gic, gic_get_common_base);
-       }
+       /* Normal, sane GIC... */
+       gic->dist_base.common_base = dist_base;
+       gic->cpu_base.common_base = cpu_base;
+       gic_set_base_accessor(gic, gic_get_common_base);
 
        /*
         * For primary GICs, skip over SGIs.
@@ -1260,15 +1294,11 @@ void __init axxia_gic_init_bases(int irq_start,
         * Find out how many interrupts are supported.
         * The GIC only supports up to 1020 interrupt sources.
         */
-       gic = &gic_data[0];
        gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
        gic_irqs = (gic_irqs + 1) * 32;
        if (gic_irqs > MAX_GIC_INTERRUPTS)
                gic_irqs = MAX_GIC_INTERRUPTS;
-       for (i = 0; i < MAX_NUM_CLUSTERS; i++) {
-               gic = &gic_data[i];
-               gic->gic_irqs = gic_irqs;
-       }
+       gic->gic_irqs = gic_irqs;
 
        gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
        irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
@@ -1278,7 +1308,6 @@ void __init axxia_gic_init_bases(int irq_start,
                     irq_start);
                irq_base = irq_start;
        }
-       gic = &gic_data[0];
        gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
                                    hwirq_base, &gic_irq_domain_ops, gic);
        if (WARN_ON(!gic->domain))
@@ -1292,25 +1321,13 @@ void __init axxia_gic_init_bases(int irq_start,
 
 void __cpuinit axxia_gic_secondary_init(void)
 {
-       /*
-        * OK to always use the gic_data associated with
-        * the first cluster. All clusters use the same
-        * dist and cpu base addresses.
-        */
-
-       gic_cpu_init(&gic_data[0]);
+       gic_cpu_init(&gic_data);
 }
 
 
 void __cpuinit axxia_gic_secondary_cluster_init(void)
 {
-       /*
-        * OK to always use the gic_data associated with
-        * the first cluster. All clusters use the same
-        * dist and cpu base addresses.
-        */
-
-       struct gic_chip_data *gic = &gic_data[0];
+       struct gic_chip_data *gic = &gic_data;
 
        /*
         * Initialize the GIC distributor and cpu interfaces
-- 
1.7.9.5

-- 
_______________________________________________
linux-yocto mailing list
linux-yo...@yoctoproject.org
https://lists.yoctoproject.org/listinfo/linux-yocto

Reply via email to