From: David Mercado <david.merc...@windriver.com> This patch adds PMU support to the Axxia AXM55xx platform. Note that on this platform, all PMU IRQ lines are OR'ed together into a single IRQ, and therefore, this implementation uses a rotating IRQ affinity scheme to deal with it.
Signed-off-by: David Mercado <david.merc...@windriver.com> --- arch/arm/kernel/perf_event_v7.c | 3 +- arch/arm/mach-axxia/axxia-gic.c | 25 ++++++++++++-- arch/arm/mach-axxia/axxia.c | 55 +++++++++++++++++++++++++++++++ arch/arm/mach-axxia/include/mach/irqs.h | 1 + 4 files changed, 81 insertions(+), 3 deletions(-) diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 7deaa7f..ae7d9d8 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -95,6 +95,7 @@ enum armv7_a5_perf_types { /* ARMv7 Cortex-A15 specific event types */ enum armv7_a15_perf_types { + ARMV7_A15_PERFCTR_CPU_CYCLES = 0x11, ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40, ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41, ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42, @@ -489,7 +490,7 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] * Cortex-A15 HW events mapping */ static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_A15_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c index e5ad304..8a8e8eb 100644 --- a/arch/arm/mach-axxia/axxia-gic.c +++ b/arch/arm/mach-axxia/axxia-gic.c @@ -283,6 +283,10 @@ static void gic_mask_irq(struct irq_data *d) if ((irqid >= IPI0_CPU0) && (irqid < MAX_AXM_IPI_NUM)) return; + /* Don't mess with the PMU IRQ either. */ + if (irqid == IRQ_PMU) + return; + /* Deal with PPI interrupts directly. */ if ((irqid > 16) && (irqid < 32)) { _gic_mask_irq(d); @@ -327,6 +331,10 @@ static void gic_unmask_irq(struct irq_data *d) if ((irqid >= IPI0_CPU0) && (irqid < MAX_AXM_IPI_NUM)) return; + /* Don't mess with the PMU IRQ either. */ + if (irqid == IRQ_PMU) + return; + /* Deal with PPI interrupts directly. */ if ((irqid > 15) && (irqid < 32)) { _gic_unmask_irq(d); @@ -565,8 +573,8 @@ static int gic_set_affinity(struct irq_data *d, * different than the prior cluster, remove the IRQ affinity * on the old cluster. */ - if ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) != - (irq_cpuid[irqid] / CORES_PER_CLUSTER)) { + if ((irqid != IRQ_PMU) && ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) != + (irq_cpuid[irqid] / CORES_PER_CLUSTER))) { /* * If old cpu assignment falls within the same cluster as * the cpu we're currently running on, set the IRQ affinity @@ -775,6 +783,11 @@ static void __cpuinit gic_dist_init(struct gic_chip_data *gic) } /* + * Set the PMU IRQ to the first cpu in this cluster. + */ + writeb_relaxed(0x01, base + GIC_DIST_TARGET + IRQ_PMU); + + /* * Set Axxia IPI interrupts to be edge triggered. */ for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) { @@ -797,6 +810,14 @@ static void __cpuinit gic_dist_init(struct gic_chip_data *gic) base + GIC_DIST_ENABLE_SET + enableoff); } + /* + * Do the initial enable of the PMU IRQ here. + */ + enablemask = 1 << (IRQ_PMU % 32); + enableoff = (IRQ_PMU / 32) * 4; + writel_relaxed(enablemask, + base + GIC_DIST_ENABLE_SET + enableoff); + writel_relaxed(1, base + GIC_DIST_CTRL); } diff --git a/arch/arm/mach-axxia/axxia.c b/arch/arm/mach-axxia/axxia.c index 0e368a4..1c1f20e 100644 --- a/arch/arm/mach-axxia/axxia.c +++ b/arch/arm/mach-axxia/axxia.c @@ -46,6 +46,7 @@ #include <asm/mach/time.h> #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/gic.h> +#include <asm/pmu.h> #include <mach/hardware.h> #include <mach/timers.h> #include <mach/axxia-gic.h> @@ -178,6 +179,58 @@ static struct of_dev_auxdata axxia_auxdata_lookup[] __initdata = { {} }; +static struct resource axxia_pmu_resources[] = { + [0] = { + .start = IRQ_PMU, + .end = IRQ_PMU, + .flags = IORESOURCE_IRQ, + }, +}; + +/* + * The PMU IRQ lines of four cores are wired together into a single interrupt. + * Bounce the interrupt to other cores if it's not ours. + */ +#define CORES_PER_CLUSTER 4 +static irqreturn_t axxia_pmu_handler(int irq, void *dev, irq_handler_t handler) +{ + irqreturn_t ret = handler(irq, dev); + int cpu = smp_processor_id(); + int cluster = cpu / CORES_PER_CLUSTER; + int other; + + if (ret == IRQ_NONE) { + + /* Look until we find another cpu that's online. */ + do { + other = (++cpu % CORES_PER_CLUSTER) + + (cluster * CORES_PER_CLUSTER); + } while (!cpu_online(other)); + + irq_set_affinity(irq, cpumask_of(other)); + } + + /* + * We should be able to get away with the amount of IRQ_NONEs we give, + * while still having the spurious IRQ detection code kick in if the + * interrupt really starts hitting spuriously. + */ + return ret; +} + +static struct arm_pmu_platdata axxia_pmu_platdata = { + .handle_irq = axxia_pmu_handler, +}; + + +static struct platform_device pmu_device = { + .name = "arm-pmu", + .id = ARM_PMU_DEVICE_CPU, + .num_resources = ARRAY_SIZE(axxia_pmu_resources), + .resource = axxia_pmu_resources, + .dev.platform_data = &axxia_pmu_platdata, +}; + static inline void spidev_chip_select(u32 control, unsigned n) { @@ -291,6 +344,8 @@ void __init axxia_dt_init(void) axxia_pcie_init(); axxia_ddr_retention_init(); + + platform_device_register(&pmu_device); } static void axxia_restart(char str, const char *cmd) diff --git a/arch/arm/mach-axxia/include/mach/irqs.h b/arch/arm/mach-axxia/include/mach/irqs.h index 5f25c95..d0ddc8b 100644 --- a/arch/arm/mach-axxia/include/mach/irqs.h +++ b/arch/arm/mach-axxia/include/mach/irqs.h @@ -1,4 +1,5 @@ #define IRQ_LOCALTIMER 29 #define IRQ_LOCALWDOG 30 +#define IRQ_PMU 222 #define AXXIA_MSI_FIRST 224 #define NR_IRQS 256 -- 1.7.9.5 -- _______________________________________________ linux-yocto mailing list linux-yocto@yoctoproject.org https://lists.yoctoproject.org/listinfo/linux-yocto