From: John Jacques <[email protected]> This includes powering down a cluster when all four cores are off and powering up the cluster when one of the cores gets re-enabled.
Signed-off-by: John Jacques <[email protected]> --- arch/arm/mach-axxia/Makefile | 2 +- arch/arm/mach-axxia/axxia-gic.c | 10 +- arch/arm/mach-axxia/axxia.h | 1 + arch/arm/mach-axxia/hotplug.c | 76 +- arch/arm/mach-axxia/lsi_power_management.c | 1522 ++++++++++++++++++++++++++++ arch/arm/mach-axxia/lsi_power_management.h | 183 ++++ arch/arm/mach-axxia/platsmp.c | 44 +- 7 files changed, 1778 insertions(+), 60 deletions(-) create mode 100644 arch/arm/mach-axxia/lsi_power_management.c create mode 100644 arch/arm/mach-axxia/lsi_power_management.h diff --git a/arch/arm/mach-axxia/Makefile b/arch/arm/mach-axxia/Makefile index a4ecf4e..720857b 100644 --- a/arch/arm/mach-axxia/Makefile +++ b/arch/arm/mach-axxia/Makefile @@ -12,5 +12,5 @@ obj-y += ddr_retention.o ddr_shutdown.o obj-$(CONFIG_I2C) += i2c.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o obj-$(CONFIG_ARCH_AXXIA_GIC) += axxia-gic.o -obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o +obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o lsi_power_management.o obj-$(CONFIG_AXXIA_RIO) += rapidio.o diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c index 53e525a..c670f3d 100644 --- a/arch/arm/mach-axxia/axxia-gic.c +++ b/arch/arm/mach-axxia/axxia-gic.c @@ -45,6 +45,7 @@ #include <asm/mach/irq.h> #include <mach/axxia-gic.h> +#include "lsi_power_management.h" #define MAX_GIC_INTERRUPTS 1020 #define MAX_NUM_CLUSTERS 4 @@ -1057,9 +1058,10 @@ static void __cpuinit gic_dist_init(struct gic_chip_data *gic) u32 enablemask; u32 enableoff; u32 val; + u32 this_cluster = get_cluster_id(); /* Initialize the distributor interface once per CPU cluster */ - if (test_and_set_bit(get_cluster_id(), &gic->dist_init_done)) + if ((test_and_set_bit(get_cluster_id(), &gic->dist_init_done)) && (!cluster_power_up[this_cluster])) return; cpumask = 1 << cpu; @@ -1362,6 +1364,12 @@ void __cpuinit axxia_gic_secondary_init(void) gic_cpu_init(&gic_data); } +void __cpuinit axxia_hotplug_gic_secondary_init(void) +{ + struct gic_chip_data *gic = &gic_data; + gic_cpu_init(&gic_data); +} + #ifdef CONFIG_OF int __init axxia_gic_of_init(struct device_node *node, diff --git a/arch/arm/mach-axxia/axxia.h b/arch/arm/mach-axxia/axxia.h index 156c6e0..000adc8 100644 --- a/arch/arm/mach-axxia/axxia.h +++ b/arch/arm/mach-axxia/axxia.h @@ -3,6 +3,7 @@ void axxia_init_clocks(int is_sim); void axxia_ddr_retention_init(void); void axxia_platform_cpu_die(unsigned int cpu); +int axxia_platform_cpu_kill(unsigned int cpu); extern struct smp_operations axxia_smp_ops; diff --git a/arch/arm/mach-axxia/hotplug.c b/arch/arm/mach-axxia/hotplug.c index fb5ec90..9e82bdc 100644 --- a/arch/arm/mach-axxia/hotplug.c +++ b/arch/arm/mach-axxia/hotplug.c @@ -15,6 +15,8 @@ #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/cp15.h> +#include "lsi_power_management.h" + extern volatile int pen_release; @@ -63,49 +65,10 @@ static inline void cpu_leave_lowpower(void) : "cc"); } -static void __ref platform_do_lowpower(unsigned int cpu, int *spurious) -{ - int phys_cpu, cluster; - /* - * there is no power-control hardware on this platform, so all - * we can do is put the core into WFI; this is safe as the calling - * code will have already disabled interrupts - */ - for (;;) { - wfi(); - - /* - * Convert the "cpu" variable to be compatible with the - * ARM MPIDR register format (CLUSTERID and CPUID): - * - * Bits: |11 10 9 8|7 6 5 4 3 2|1 0 - * | CLUSTER | Reserved |CPU - */ - phys_cpu = cpu_logical_map(cpu); - cluster = (phys_cpu / 4) << 8; - phys_cpu = cluster + (phys_cpu % 4); - - if (pen_release == phys_cpu) { - /* - * OK, proper wakeup, we're done - */ - break; - } - - /* - * Getting here, means that we have come out of WFI without - * having been woken up - this shouldn't happen - * - * Just note it happening - when we're woken, we can report - * its occurrence. - */ - (*spurious)++; - } -} - -int platform_cpu_kill(unsigned int cpu) +int axxia_platform_cpu_kill(unsigned int cpu) { + pm_cpu_shutdown(cpu); return 1; } @@ -114,24 +77,29 @@ int platform_cpu_kill(unsigned int cpu) * * Called with IRQs disabled */ + void axxia_platform_cpu_die(unsigned int cpu) { - int spurious = 0; - /* - * we're ready for shutdown now, so do it - */ - cpu_enter_lowpower_a15(); - platform_do_lowpower(cpu, &spurious); + pm_data pm_request; + int rVal = 0; + bool lastCpu; - /* - * bring this CPU back into the world of cache - * coherency, and then restore interrupts - */ - cpu_leave_lowpower(); + pm_request.cpu = cpu; + pm_request.cluster = 0; + + + lastCpu = pm_cpu_last_of_cluster(cpu); + if (lastCpu) + rVal = pm_cpul2_logical_die(&pm_request); + else + rVal = pm_cpu_logical_die(&pm_request); + if (rVal) + pr_err("CPU %d failed to die\n", cpu); + + for (;;) + wfi(); - if (spurious) - pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } int platform_cpu_disable(unsigned int cpu) diff --git a/arch/arm/mach-axxia/lsi_power_management.c b/arch/arm/mach-axxia/lsi_power_management.c new file mode 100644 index 0000000..9723054 --- /dev/null +++ b/arch/arm/mach-axxia/lsi_power_management.c @@ -0,0 +1,1522 @@ +/* + * linux/arch/arm/mach-axxia/lsi_power_management.c + * + * C * Created on: Jun 19, 2014 + * Author: z8cpaul + * opyright (C) 2002 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Created on: Jun 19, 2014 + * Author: z8cpaul + */ +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/smp.h> +#include <linux/delay.h> +#include <asm/exception.h> +#include <asm/cacheflush.h> +#include <asm/smp_plat.h> +#include <asm/cp15.h> + +#include "axxia.h" +#include "lsi_power_management.h" + +#undef DEBUG_CPU_PM + +#define SYSCON_PHYS_ADDR 0x002010030000ULL +#define DICKENS_PHYS_ADDR 0x2000000000 + +#define PM_WAIT_TIME (10000) +#define MAX_CLUSTER (4) + +#define CHECK_BIT(var, pos) ((var) & (1 << (pos))) + +bool pm_in_progress[16]; +bool cluster_power_up[4]; + +static const u32 cluster_to_node[MAX_CLUSTER] = { DKN_CLUSTER0_NODE, +DKN_CLUSTER1_NODE, +DKN_CLUSTER2_NODE, +DKN_CLUSTER3_NODE }; + +static const u32 cluster_to_poreset[MAX_CLUSTER] = { +PORESET_CLUSTER0, +PORESET_CLUSTER1, +PORESET_CLUSTER2, +PORESET_CLUSTER3 }; + +static u32 pm_cpu_powered_down; + +/*======================= LOCAL FUNCTIONS ==============================*/ +static void pm_set_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data); +static void pm_clear_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data); +static bool pm_test_for_bit_with_timeout(void __iomem *syscon, u32 reg, u32 bit); +static bool pm_wait_for_bit_clear_with_timeout(void __iomem *syscon, u32 reg, + u32 bit); +static void pm_dickens_logical_shutdown(u32 cluster); +static int pm_dickens_logical_powerup(u32 cluster); +static int pm_cpu_physical_isolation_and_power_down(int cpu); +static void pm_L2_isolation_and_power_down(int cluster); +static void __pm_cpu_shutdown(void *data); +static int pm_cpu_physical_connection_and_power_up(int cpu); +static int pm_L2_physical_connection_and_power_up(u32 cluster); +static int pm_L2_logical_powerup(u32 cluster, u32 cpu); + +static bool pm_first_cpu_of_cluster(u32 cpu) +{ + u32 count = 0; + switch (cpu) { + case (0): + case (1): + case (2): + case (3): + /* This will never happen because cpu 0 will never be turned off */ + break; + case (4): + case (5): + case (6): + case (7): + if (pm_cpu_powered_down & (1 << 4)) + count++; + if (pm_cpu_powered_down & (1 << 5)) + count++; + if (pm_cpu_powered_down & (1 << 6)) + count++; + if (pm_cpu_powered_down & (1 << 7)) + count++; + if (count == 4) + return true; + break; + case (8): + case (9): + case (10): + case (11): + if (pm_cpu_powered_down & (1 << 8)) + count++; + if (pm_cpu_powered_down & (1 << 9)) + count++; + if (pm_cpu_powered_down & (1 << 10)) + count++; + if (pm_cpu_powered_down & (1 << 11)) + count++; + if (count == 4) + return true; + break; + case (12): + case (13): + case (14): + case (15): + if (pm_cpu_powered_down & (1 << 12)) + count++; + if (pm_cpu_powered_down & (1 << 13)) + count++; + if (pm_cpu_powered_down & (1 << 14)) + count++; + if (pm_cpu_powered_down & (1 << 15)) + count++; + if (count == 4) + return true; + break; + default: + pr_err("ERROR: the cpu does not exist: %d - %s:%d\n", cpu, __FILE__, + __LINE__); + break; + } + return false; +} + +bool pm_cpu_last_of_cluster(u32 cpu) +{ + u32 count = 0; + switch (cpu) { + case (0): + case (1): + case (2): + case (3): + /* This will never happen because cpu 0 will never be turned off */ + break; + case (4): + case (5): + case (6): + case (7): + if (pm_cpu_powered_down & (1 << 4)) + count++; + if (pm_cpu_powered_down & (1 << 5)) + count++; + if (pm_cpu_powered_down & (1 << 6)) + count++; + if (pm_cpu_powered_down & (1 << 7)) + count++; + if (count == 3) + return true; + break; + case (8): + case (9): + case (10): + case (11): + if (pm_cpu_powered_down & (1 << 8)) + count++; + if (pm_cpu_powered_down & (1 << 9)) + count++; + if (pm_cpu_powered_down & (1 << 10)) + count++; + if (pm_cpu_powered_down & (1 << 11)) + count++; + if (count == 3) + return true; + break; + case (12): + case (13): + case (14): + case (15): + if (pm_cpu_powered_down & (1 << 12)) + count++; + if (pm_cpu_powered_down & (1 << 13)) + count++; + if (pm_cpu_powered_down & (1 << 14)) + count++; + if (pm_cpu_powered_down & (1 << 15)) + count++; + if (count == 3) + return true; + break; + default: + pr_err("ERROR: the cpu does not exist: %d - %s:%d\n", cpu, __FILE__, + __LINE__); + break; + } + return false; +} + +static void pm_set_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data) +{ + u32 tmp; + + tmp = readl(syscon + reg); + tmp |= data; + writel(tmp, syscon + reg); +} + +static void pm_clear_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data) +{ + u32 tmp; + + tmp = readl(syscon + reg); + tmp &= ~(data); + writel(tmp, syscon + reg); +} + +static bool pm_test_for_bit_with_timeout(void __iomem *syscon, u32 reg, u32 bit) +{ + + u32 tmp = 0; + u32 cnt = 0; + + while (cnt < PM_WAIT_TIME) { + tmp = readl(syscon + reg); + if (CHECK_BIT(tmp, bit)) + break; + cnt++; + } + if (cnt == PM_WAIT_TIME) { + pr_err("reg=0x%x tmp:=0x%x\n", reg, tmp); + return false; + } + return true; +} + +static bool pm_wait_for_bit_clear_with_timeout(void __iomem *syscon, u32 reg, + u32 bit) +{ + u32 cnt = 0; + u32 tmp = 0; + + while (cnt < PM_WAIT_TIME) { + tmp = readl(syscon + reg); + if (!(CHECK_BIT(tmp, bit))) + break; + cnt++; + } + if (cnt == PM_WAIT_TIME) { + pr_err("reg=0x%x tmp:=0x%x\n", reg, tmp); + return false; + } + + return true; +} +static void pm_dickens_logical_shutdown(u32 cluster) +{ + int i; + int status; + u32 bit; + u32 bit_pos; + int retries; + void __iomem *dickens; + + dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M); + if (dickens == NULL) { + pr_err("DICKENS: Failed to map the dickens registers\n"); + return; + } + + bit = (0x01 << cluster_to_node[cluster]); + bit_pos = cluster_to_node[cluster]; + + for (i = 0; i < DKN_HNF_TOTAL_NODES; ++i) { + writel(bit, + dickens + (0x10000 * (DKN_HNF_NODE_ID + i)) + + DKN_HNF_SNOOP_DOMAIN_CTL_CLR); + + retries = PM_WAIT_TIME; + + do { + status = readl( + dickens + (0x10000 * (DKN_HNF_NODE_ID + i)) + + DKN_HNF_SNOOP_DOMAIN_CTL); + udelay(1); + } while ((0 < --retries) && CHECK_BIT(status, bit_pos)); + + if (0 == retries) { + pr_err("DICKENS: Failed to clear the SNOOP main control. LOOP:%d reg: 0x%x\n", i, status); + goto dickens_power_down; + + } + + } + /* Clear the domain cluster */ + writel(bit, dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET) + DKN_MN_DVM_DOMAIN_CTL_CLR); + + /* Check for complete */ + retries = PM_WAIT_TIME; + + do { + status = readl( + dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET) + + DKN_MN_DVM_DOMAIN_CTL); + udelay(1); + } while ((0 < --retries) && CHECK_BIT(status, bit_pos)); + + if (0 == retries) { + pr_err("DICKENS: failed to set DOMAIN OFFSET Reg=0x%x\n", status); + goto dickens_power_down; + + } + +dickens_power_down: + iounmap(dickens); + + return; +} + +static int pm_dickens_logical_powerup(u32 cluster) +{ + int i; + u32 status; + u32 bit; + u32 bit_pos; + int retries; + int rval = 0; + + void __iomem *dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M); + if (dickens == NULL) { + pr_err("Failed to map dickens registers\n"); + return -EINVAL; + } + + bit = (0x01 << cluster_to_node[cluster]); + bit_pos = cluster_to_node[cluster]; + + for (i = 0; i < DKN_HNF_TOTAL_NODES; ++i) { + writel(bit, + dickens + (0x10000 * (DKN_HNF_NODE_ID + i)) + + DKN_HNF_SNOOP_DOMAIN_CTL_SET); + + retries = PM_WAIT_TIME; + + do { + status = readl( + dickens + (0x10000 * (DKN_HNF_NODE_ID + i)) + + DKN_HNF_SNOOP_DOMAIN_CTL); + udelay(1); + } while ((0 < --retries) && !CHECK_BIT(status, bit_pos)); + + if (0 == retries) { + pr_err("DICKENS: Failed on the SNOOP DONAIN\n"); + rval = -EINVAL; + goto dickens_power_up; + } + + } + + /* Clear the domain cluster */ + writel(bit, dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET) + DKN_MN_DVM_DOMAIN_CTL_SET); + + /* Check for complete */ + retries = PM_WAIT_TIME; + + do { + status = readl( + dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET) + + DKN_MN_DVM_DOMAIN_CTL); + udelay(1); + } while ((0 < --retries) && !CHECK_BIT(status, bit_pos)); + + if (0 == retries) { + pr_err("DICKENS: Failed on the SNOOP DONAIN\n"); + rval = -EINVAL; + goto dickens_power_up; + } + +dickens_power_up: + iounmap(dickens); + + return rval; +} + +static void __pm_cpu_shutdown(void *data) +{ + + pm_data *pm_request = (pm_data *)data; + void __iomem *syscon; + bool success; + u32 cluster_mask = (0x01 << pm_request->cluster); + bool last_cpu; + int rval = 0; + + /* + * Is this the last cpu of a cluster then turn off the L2 cache + * along with the CPU. + */ + last_cpu = pm_cpu_last_of_cluster(pm_request->cpu); + if (last_cpu) { + + /* Remove the cluster from the Dickens coherency domain */ + pm_dickens_logical_shutdown(pm_request->cluster); + + /* Power down the cpu */ + pm_cpu_physical_isolation_and_power_down(pm_request->cpu); + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return; + +#if 0 + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_TS, cluster_mask); + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_CACTIVE_TS, pm_request->cluster); + if (!success) { + pr_err( + "Failed to keep other cluster TS going on cluster %d: %s-%d\n", + pm_request->cluster, __FILE__, __LINE__); + iounmap(syscon); + return; + } + + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_ATB, cluster_mask); + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_CACTIVE_ATB, pm_request->cluster); + if (!success) { + pr_err( + "Failed to keep other cluster ATB going on cluster %d: %s-%d\n", + pm_request->cluster, __FILE__, __LINE__); + iounmap(syscon); + return; + } + + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_APB, cluster_mask); + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_CACTIVE_APB, pm_request->cluster); + if (!success) { + pr_err( + "Failed to keep other cluster APB going on cluster %d: %s-%d\n", + pm_request->cluster, __FILE__, __LINE__); + iounmap(syscon); + return; + } +#endif + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_CNT, cluster_mask); + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_CACTIVE_CNT, pm_request->cluster); + if (!success) { + pr_err( + "Failed to keep other cluster count going on cluster %d: %s-%d\n", + pm_request->cluster, __FILE__, __LINE__); + iounmap(syscon); + return; + } + + /* Turn off the ACE */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACEPWRDNRQ, cluster_mask); + + /* Wait for ACE to complete power off */ + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NACEPWRDNACK, pm_request->cluster); + if (!success) { + pr_err("Failed to power off ACE on cluster %d: %s-%d\n", + pm_request->cluster, __FILE__, __LINE__); + iounmap(syscon); + return; + } + + /* Isolate the cluster */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATEL2MISC, cluster_mask); + + /* Wait for WFI L2 to go to standby */ + success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_STANDBYWFIL2, pm_request->cluster); + if (!success) { + pr_err("Failed to enter L2 WFI on cluster %d: %s-%d\n", + pm_request->cluster, __FILE__, __LINE__); + iounmap(syscon); + return; + } + + iounmap(syscon); + + /* Power off the L2 */ + pm_L2_isolation_and_power_down(pm_request->cluster); + if (rval == 0) { + pr_info("CPU %d is powered down with cluster: %d\n", pm_request->cpu, pm_request->cluster); + pm_cpu_powered_down |= (1 << pm_request->cpu); + } else + pr_err("CPU %d failed to power down\n", pm_request->cpu); + + + } else { + + rval = pm_cpu_physical_isolation_and_power_down(pm_request->cpu); + if (rval == 0) + pm_cpu_powered_down |= (1 << pm_request->cpu); + else + pr_err("CPU %d failed to power down\n", pm_request->cpu); + } + + return; +} + + +int pm_cpu_logical_die(pm_data *pm_request) +{ + void __iomem *syscon; + bool success; + + smp_call_function_single(pm_request->cpu, pm_cpu_logical_shutdown, (void *)pm_request, 1); + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* Wait for the cpu to enter wfi */ + success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_STANDBYWFI, pm_request->cpu); + if (!success) { + pr_err("Failed to enter WFI mode on cpu %d: %s-%d\n", + pm_request->cpu, __FILE__, __LINE__); + iounmap(syscon); + return -EINVAL; + } + + iounmap(syscon); + return 0; +} + +int pm_cpul2_logical_die(pm_data *pm_request) +{ + void __iomem *syscon; + bool success; + + smp_call_function_single(pm_request->cpu, pm_L2_logical_shutdown, (void *)pm_request, 1); + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* Wait for the cpu to enter wfi */ + success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_STANDBYWFI, pm_request->cpu); + if (!success) { + pr_err("Failed to enter WFI mode on cpu %d: %s-%d\n", + pm_request->cpu, __FILE__, __LINE__); + iounmap(syscon); + return -EINVAL; + } + + iounmap(syscon); + return 0; +} + +void pm_cpu_shutdown(u32 cpu) +{ + + pm_data pm_request; + + u32 pcpu = cpu_logical_map(smp_processor_id()); + u32 rcpu = cpumask_any_and(cpu_present_mask, cpu_online_mask); + u32 reqcpu = cpu_logical_map(cpu); + + /* Check to see if the cpu is powered up */ + if (pm_cpu_powered_down & (1 << cpu)) { + pr_err("CPU %d is already powered off - %s:%d\n", cpu, __FILE__, __LINE__); + return; + } + /* + * Is this the last cpu to be powered off, then don't + * allow the power to be shut off. + */ + if (cpu == 0) { + pr_err("Cannot turn off cpu 0 - %s:%d\n", __FILE__, __LINE__); + return; + } + + /* + * Is this process on the requested cpu to power down + * then send it to another cpu for processing + */ + pm_request.cpu = cpu; + pm_request.cluster = reqcpu / CORES_PER_CLUSTER; + + if (pcpu == cpu) + smp_call_function_single(rcpu, __pm_cpu_shutdown, (void *)&pm_request, 0); + else + __pm_cpu_shutdown(&pm_request); + +} + +int pm_cpu_powerup(u32 cpu) +{ + + bool first_cpu; + int rval = 0; + void __iomem *syscon = NULL; + u32 cpu_mask = (0x01 << cpu); + + u32 reqcpu = cpu_logical_map(cpu); + u32 cluster = reqcpu / CORES_PER_CLUSTER; + + /* Hold the CPU in reset */ + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* + * The key value has to be written before the CPU RST can be written. + */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, cpu_mask); + + iounmap(syscon); + + /* + * Is this the first cpu of a cluster to come back on? + * Then power up the L2 cache. + */ + first_cpu = pm_first_cpu_of_cluster(cpu); + if (first_cpu) { + + + rval = pm_L2_logical_powerup(cluster, cpu); + if (rval) { + pr_err("CPU: Failed the logical L2 power up\n"); + return rval; + } + cluster_power_up[cluster] = true; + + } + + + /* + * Power up the CPU + */ + rval = pm_cpu_physical_connection_and_power_up(cpu); + if (rval) { + pr_err("Failed to power up physical connection of cpu: %d\n", cpu); + goto pm_power_up; + } + + udelay(16); + + /* Clear the CPU from reset and let it go */ + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* + * The key value must be written before the CPU RST can be written. + */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, cpu_mask); + + /* + * The key value must be written before HOLD CPU can be written. + */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_CPU, cpu_mask); + + /* + * Clear the powered down mask + */ + pm_cpu_powered_down &= ~(1 << cpu); + + +pm_power_up: + iounmap(syscon); + return rval; +} + +unsigned long pm_get_powered_down_cpu(void) +{ + return pm_cpu_powered_down; +} + +void pm_cpu_logical_shutdown(void *data) +{ + u32 val; + + asm volatile( + " mrc p15, 1, %0, c9, c0, 2\n" + : "=&r" (val) + : "Ir" (0x1) + : "cc"); + + asm volatile( + " mrc p15, 0, %0, c1, c0, 0\n" + " bic %0, %0, %1\n" + " mcr p15, 0, %0, c1, c0, 0\n" + : "=&r" (val) + : "Ir" (CR_C) + : "cc"); + + /* Clear and invalidate all date from L1 data cache */ + flush_cache_all(); + + /* Switch the processor over to AMP mode out of SMP */ + asm volatile( + " mrc p15, 0, %0, c1, c0, 1\n" + " bic %0, %0, %1\n" + " mcr p15, 0, %0, c1, c0, 1\n" + : "=&r" (val) + : "Ir" (0x40) + : "cc"); + + isb(); + dsb(); + + wfi(); + + return; + +} + +void pm_cpu_logical_powerup(void) +{ + unsigned int v; + + asm volatile( + " mrc p15, 0, %0, c1, c0, 0\n" + " orr %0, %0, %1\n" + " mcr p15, 0, %0, c1, c0, 0\n" + " mrc p15, 0, %0, c1, c0, 0\n" + " orr %0, %0, %2\n" + " mcr p15, 0, %0, c1, c0, 0\n" + " mrc p15, 0, %0, c1, c0, 1\n" + " orr %0, %0, %3\n" + " mcr p15, 0, %0, c1, c0, 1\n" + : "=&r" (v) + : "Ir" (CR_C), "Ir" (CR_I), "Ir" (0x40) + : "cc"); + + asm volatile( + " mrc p15, 1, %0, c9, c0, 2\n" + : "=&r" (v) + : "Ir" (0x1) + : "cc"); + +} + +static int pm_cpu_physical_isolation_and_power_down(int cpu) +{ + void __iomem *syscon; + int rval = 0; + + bool success; + u32 mask = (0x01 << cpu); + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* Initiate power down of the CPU's HS Rams */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPURAM, mask); + + /* Wait until the RAM power down is complete */ + success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu); + if (!success) { + rval = -EINVAL; + pr_err("CPU: Failed to power down CPU RAM\n"); + goto power_down_cleanup; + } + + /* Activate the CPU's isolation clamps */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATECPU, mask); + + /* Initiate power down of the CPU logic */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG2, mask); + + udelay(10); + + /* Continue power down of the CPU logic */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG1, mask); + + success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu); + if (!success) { + rval = -EINVAL; + pr_err("CPU: Failed to power down stage 1 cpu\n"); + goto power_down_cleanup; + } + +power_down_cleanup: + iounmap(syscon); + return rval; +} + +static int pm_cpu_physical_connection_and_power_up(int cpu) +{ + int rval = 0; + void __iomem *syscon; + bool success; + u32 mask = (0x01 << cpu); + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* Initiate power up of the CPU */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG1, mask); + + /* Wait until CPU logic power is compete */ + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu); + if (!success) { + rval = -EINVAL; + pr_err("CPU: Failed to get ACK from power down stage 1\n"); + goto power_up_cleanup; + } + + /* Continue stage 2 power up of the CPU*/ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG2, mask); + + udelay(10); + + /* Initiate power up of HS Rams */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPURAM, mask); + + /* Wait until the RAM power up is complete */ + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu); + if (!success) { + rval = -EINVAL; + pr_err("CPU: Failed to get ACK of power power up\n"); + goto power_up_cleanup; + } + + /* Release the CPU's isolation clamps */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATECPU, mask); + +power_up_cleanup: + iounmap(syscon); + + return rval; + +} +/*========================================== L2 FUNCTIONS ========================================*/ + +void pm_L2_logical_shutdown(void *data) +{ + u32 val; + + + asm volatile( + " mrc p15, 0, %0, c1, c0, 0\n" + " bic %0, %0, %1\n" + " mcr p15, 0, %0, c1, c0, 0\n" + : "=&r" (val) + : "Ir" (CR_C) + : "cc"); + + + asm volatile( + /* + * Disable L2 prefetch + */ + " mrc p15, 1, %0, c15, c0, 3\n" + " orr %0, %0, %1\n" + " mcr p15, 1, %0, c15, c0, 3\n" + : "=&r" (val) + : "Ir" (0x400) + : "cc"); + + isb(); + dsb(); + + /* Clear and invalidate all L1 and L2 data cache */ + flush_cache_all(); + + + /* Turn the DBG Double Lock quiet */ + asm volatile( + /* + * Turn Off the DBGOSDLR.DLK bit + */ + " mrc p14, 0, %0, c1, c3, 4\n" + " orr %0, %0, %1\n" + " mcr p14, 0, %0, c1, c3, 4\n" + : "=&r" (val) + : "Ir" (0x1) + : "cc"); + + /* Switch the processor over to AMP mode out of SMP */ + asm volatile( + " mrc p15, 0, %0, c1, c0, 1\n" + " bic %0, %0, %1\n" + " mcr p15, 0, %0, c1, c0, 1\n" + : "=&r" (val) + : "Ir" (0x40) + : "cc"); + + isb(); + dsb(); + + wfi(); + return; +} + +static void pm_L2_isolation_and_power_down(int cluster) +{ + void __iomem *syscon; + u32 mask = (0x1 << cluster); + + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return; + + /* Enable the chip select for the cluster */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, mask); + + /* Disable the hsram */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2HSRAM, mask); + + switch (cluster) { + case (0): + +#ifdef PM_POWER_OFF_ONLY_DATARAM + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_BANK0_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK2_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK3_MASK); + udelay(20); +#else + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK); + udelay(20); + +#endif + break; + case (1): + +#ifdef PM_POWER_OFF_ONLY_DATARAM + + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK0_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK2_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK3_MASK); + udelay(20); +#else + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK); + udelay(20); +#endif + break; + case (2): + +#ifdef PM_POWER_OFF_ONLY_DATARAM + + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_BANK0_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK2_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK3_MASK); + udelay(20); +#else + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK); + udelay(20); +#endif + break; + case (3): + +#ifdef PM_POWER_OFF_ONLY_DATARAM + + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_BANK0_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK2_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK3_MASK); + udelay(20); +#else + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK); + udelay(20); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK); + udelay(20); +#endif + break; + default: + pr_err("Illegal cluster: %d > 3\n", cluster); + break; + } + + /* Power down stage 2 */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask); + + /* Power down stage 1 */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask); + + + iounmap(syscon); +} + +static int pm_L2_physical_connection_and_power_up(u32 cluster) +{ + void __iomem *syscon; + bool success; + u32 mask = (0x1 << cluster); + int rval = 0; + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* Power up stage 1 */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask); + + /* Wait for the stage 1 power up to complete */ + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK, cluster); + if (!success) { + pr_err("CPU: Failed to ack the L2 Stage 1 Power up\n"); + rval = -EINVAL; + goto power_up_l2_cleanup; + } + + /* Power on stage 2 */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask); + + /* Set the chip select */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, mask); + + /* Power up the snoop ramram */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2HSRAM, mask); + + /* Wait for the stage 1 power up to complete */ + success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK, cluster); + if (!success) { + pr_err("CPU: failed to get the HSRAM power up ACK\n"); + rval = -EINVAL; + goto power_up_l2_cleanup; + } + + switch (cluster) { + case (0): + +#ifdef PM_POWER_OFF_ONLY_DATARAM + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_BANK0_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK2_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK3_MASK); + udelay(20); +#else + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK); + udelay(20); + +#endif + break; + case (1): + +#ifdef PM_POWER_OFF_ONLY_DATARAM + + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK0_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK2_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK3_MASK); + udelay(20); +#else + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK); + udelay(20); +#endif + break; + case (2): + +#ifdef PM_POWER_OFF_ONLY_DATARAM + + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_BANK0_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK2_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK3_MASK); + udelay(20); +#else + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK); + udelay(20); +#endif + break; + case (3): + +#ifdef PM_POWER_OFF_ONLY_DATARAM + + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_BANK0_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK2_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, + NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK3_MASK); + udelay(20); +#else + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK); + udelay(20); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK); + udelay(20); +#endif + break; + default: + pr_err("Illegal cluster: %d > 3\n", cluster); + break; + } + + /* Clear the chip select */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, mask); + + /* Release the isolation clamps */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATEL2MISC, mask); + + /* Turn the ACE bridge power on*/ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACEPWRDNRQ, mask); + +power_up_l2_cleanup: + + iounmap(syscon); + + return rval; +} + +static int pm_L2_logical_powerup(u32 cluster, u32 cpu) +{ + + void __iomem *syscon; + u32 mask = (0x1 << cluster); + u32 cpu_mask = (0x1 << cpu); + int rval = 0; + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* put the cluster into a cpu hold */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_RESET_AXIS, + cluster_to_poreset[cluster]); + + /* + * Write the key so the reset cpu register can be written to. + */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, cpu_mask); + + /* Hold the chip debug cluster */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_HOLD_DBG, mask); + + /* Hold the L2 cluster */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE); + pm_set_bits_syscon_register(syscon, NCP_SYSCON_HOLD_L2, mask); + + iounmap(syscon); + + /* Cluster physical power up */ + rval = pm_L2_physical_connection_and_power_up(cluster); + + udelay(16); + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return -EINVAL; + + /* take the cluster out of a cpu hold */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_RESET_AXIS, + cluster_to_poreset[cluster]); + + udelay(64); + + /* Enable the system counter */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_CNT, mask); + + /* Release the L2 cluster */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_L2, mask); + + /* Release the chip debug cluster */ + pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE); + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_DBG, mask); + + + rval = pm_dickens_logical_powerup(cluster); + + /* start L2 */ + pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACINACTM, mask); + + iounmap(syscon); + + return rval; + +} + +#ifdef DEBUG_CPU_PM + +void pm_debug_read_pwr_registers(void) +{ + u32 reg; + + void __iomem *syscon; + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return; + + reg = readl(syscon + 0x1400); + pr_err("NCP_SYSCON_PWR_CLKEN: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_PWR_ACINACTM); + pr_err("NCP_SYSCON_PWR_ACINACTM: 0x%x\n", reg); + reg = readl(syscon + 0x140c); + pr_err("NCP_SYSCON_PWR_CHIPSELECTEN: 0x%x\n", reg); + reg = readl(syscon + 0x1410); + pr_err("NCP_SYSCON_PWR_CSYSREQ_TS: 0x%x\n", reg); + reg = readl(syscon + 0x1414); + pr_err("NCP_SYSCON_PWR_CSYSREQ_CNT: 0x%x\n", reg); + reg = readl(syscon + 0x1418); + pr_err("NCP_SYSCON_PWR_CSYSREQ_ATB: 0x%x\n", reg); + reg = readl(syscon + 0x141c); + pr_err("NCP_SYSCON_PWR_CSYSREQ_APB: 0x%x\n", reg); + reg = readl(syscon + 0x1420); + pr_err("NCP_SYSCON_PWR_PWRUPL2LGCSTG1: 0x%x\n", reg); + reg = readl(syscon + 0x1424); + pr_err("NCP_SYSCON_PWR_PWRUPL2LGCSTG2: 0x%x\n", reg); + reg = readl(syscon + 0x1428); + pr_err("NCP_SYSCON_PWR_PWRUPL2HSRAM: 0x%x\n", reg); + reg = readl(syscon + 0x142c); + pr_err("NCP_SYSCON_PWR_ACEPWRDNRQ: 0x%x\n", reg); + reg = readl(syscon + 0x1430); + pr_err("NCP_SYSCON_PWR_ISOLATEL2MIS: 0x%x\n", reg); + reg = readl(syscon + 0x1438); + pr_err("NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK: 0x%x\n", reg); + reg = readl(syscon + 0x143c); + pr_err("NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK: 0x%x\n", reg); + reg = readl(syscon + 0x1440); + pr_err("NCP_SYSCON_PWR_STANDBYWFIL2: 0x%x\n", reg); + reg = readl(syscon + 0x1444); + pr_err("NCP_SYSCON_PWR_CSYSACK_TS: 0x%x\n", reg); + reg = readl(syscon + 0x1448); + pr_err("NCP_SYSCON_PWR_CACTIVE_TS: 0x%x\n", reg); + reg = readl(syscon + 0x144c); + pr_err("NCP_SYSCON_PWR_CSYSACK_CNT: 0x%x\n", reg); + reg = readl(syscon + 0x1450); + pr_err("NCP_SYSCON_PWR_CACTIVE_CNT: 0x%x\n", reg); + reg = readl(syscon + 0x1454); + pr_err("NCP_SYSCON_PWR_CSYSACK_ATB: 0x%x\n", reg); + reg = readl(syscon + 0x1458); + pr_err("NCP_SYSCON_PWR_CACTIVE_ATB: 0x%x\n", reg); + reg = readl(syscon + 0x145c); + pr_err("NCP_SYSCON_PWR_CSYSACK_APB: 0x%x\n", reg); + reg = readl(syscon + 0x1460); + pr_err("NCP_SYSCON_PWR_CACTIVE_APB: 0x%x\n", reg); + reg = readl(syscon + 0x1464); + pr_err("NCP_SYSCON_PWR_NACEPWRDNACK: 0x%x\n", reg); + reg = readl(syscon + 0x1468); + pr_err("NCP_SYSCON_PWR_CACTIVEM_EAGM: 0x%x\n", reg); + reg = readl(syscon + 0x146c); + pr_err("NCP_SYSCON_PWR_CACTIVEM_EAGS: 0x%x\n", reg); + reg = readl(syscon + 0x1470); + pr_err("NCP_SYSCON_PWR_CACTIVES_EAGM: 0x%x\n", reg); + reg = readl(syscon + 0x1474); + pr_err("NCP_SYSCON_PWR_CACTIVES_EAGS: 0x%x\n", reg); + reg = readl(syscon + 0x1480); + pr_err("NCP_SYSCON_PWR_PWRUPCPUSTG1: 0x%x\n", reg); + reg = readl(syscon + 0x1484); + pr_err("NCP_SYSCON_PWR_PWRUPCPUSTG2: 0x%x\n", reg); + reg = readl(syscon + 0x1488); + pr_err("NCP_SYSCON_PWR_PWRUPCPURAM: 0x%x\n", reg); + reg = readl(syscon + 0x148c); + pr_err("NCP_SYSCON_PWR_ISOLATECPU: 0x%x\n", reg); + reg = readl(syscon + 0x1490); + pr_err("NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK: 0x%x\n", reg); + reg = readl(syscon + 0x1494); + pr_err("NCP_SYSCON_PWR_NPWRUPCPURAM_ACK: 0x%x\n", reg); + reg = readl(syscon + 0x1498); + pr_err("NCP_SYSCON_PWR_QACTIVE: 0x%x\n", reg); + reg = readl(syscon + 0x149C); + pr_err("NCP_SYSCON_PWR_STANDBYWFI: 0x%x\n", reg); + reg = readl(syscon + 0x14A0); + pr_err("NCP_SYSCON_PWR_STANDBYWFE: 0x%x\n", reg); + reg = readl(syscon + 0x14A4); + pr_err("NCP_SYSCON_PWR_DBGNOPWRDWN: 0x%x\n", reg); + reg = readl(syscon + 0x14A8); + pr_err("NCP_SYSCON_PWR_DBGPWRUPREQ: 0x%x\n", reg); + reg = readl(syscon + 0x1040); + pr_err("NCP_SYSCON_RESET_AXIS: 0x%x\n", reg); + reg = readl(syscon + 0x1044); + pr_err("NCP_SYSCON_RESET_AXIS-WORD1: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_RESET_CPU); + pr_err("NCP_SYSCON_RESET_CPU: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_HOLD_DBG); + pr_err("NCP_SYSCON_HOLD_DBG: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_HOLD_L2); + pr_err("NCP_SYSCON_HOLD_L2: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_HOLD_CPU); + pr_err("NCP_SYSCON_HOLD_CPU: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_PWRUP_CPU_RST); + pr_err("NCP_SYSCON_PWRUP_CPU_RST: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_RESET_STATUS); + pr_err("NCP_SYSCON_RESET_STATUS: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_RESET_CORE_STATUS); + pr_err("NCP_SYSCON_RESET_CORE_STATUS: 0x%x\n", reg); + + +#if 0 + reg = readl(syscon + NCP_SYSCON_MCG_CSW_CPU); + pr_err("NCP_SYSCON_MCG_CSW_CPU: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MCG_CSW_SYS); + pr_err("NCP_SYSCON_MCG_CSW_SYS: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MCG_DIV_CPU); + pr_err("NCP_SYSCON_MCG_DIV_CPU: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MCG_DIV_SYS); + pr_err("NCP_SYSCON_MCG_DIV_SYS: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_CLKDEBUG); + pr_err("NCP_SYSCON_CLKDEBUG: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_EVENT_ENB); + pr_err("NCP_SYSCON_EVENT_ENB: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_CPU_FAST_INT); + pr_err("NCP_SYSCON_CPU_FAST_INT: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_GIC_DISABLE); + pr_err("NCP_SYSCON_GIC_DISABLE: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_CP15SDISABLE); + pr_err("NCP_SYSCON_CP15SDISABLE: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_LDO_CTL); + pr_err("NCP_SYSCON_LDO_CTL: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_SHWK_QOS); + pr_err("NCP_SYSCON_SHWK_QOS: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_FUSE_RTO); + pr_err("NCP_SYSCON_FUSE_RTO: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_PFUSE); + pr_err("NCP_SYSCON_PFUSE: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_FUSE_STAT); + pr_err("NCP_SYSCON_FUSE_STAT: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_SCRATCH); + pr_err("NCP_SYSCON_SCRATCH: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI0); + pr_err("NCP_SYSCON_MASK_IPI0: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI1); + pr_err("NCP_SYSCON_MASK_IPI1: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI2); + pr_err("NCP_SYSCON_MASK_IPI2: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI3); + pr_err("NCP_SYSCON_MASK_IPI3: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI4); + pr_err("NCP_SYSCON_MASK_IPI4: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI5); + pr_err("NCP_SYSCON_MASK_IPI5: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI6); + pr_err("NCP_SYSCON_MASK_IPI6: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI7); + pr_err("NCP_SYSCON_MASK_IPI7: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI8); + pr_err("NCP_SYSCON_MASK_IPI8: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI9); + pr_err("NCP_SYSCON_MASK_IPI9: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI10); + pr_err("NCP_SYSCON_MASK_IPI10: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI11); + pr_err("NCP_SYSCON_MASK_IPI11: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI12); + pr_err("NCP_SYSCON_MASK_IPI12: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI13); + pr_err("NCP_SYSCON_MASK_IPI13: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI14); + pr_err("NCP_SYSCON_MASK_IPI14: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_MASK_IPI15); + pr_err("NCP_SYSCON_MASK_IPI15: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_SPARE0); + pr_err("NCP_SYSCON_SPARE0: 0x%x\n", reg); + reg = readl(syscon + NCP_SYSCON_STOP_CLK_CPU); + pr_err("NCP_SYSCON_STOP_CLK_CPU: 0x%x\n", reg); +#endif + + + iounmap(syscon); +} + + +void pm_dump_L2_registers(void) +{ + u32 reg; + + void __iomem *syscon; + + syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K); + if (WARN_ON(!syscon)) + return; + reg = readl(syscon + 0x1580); + pr_err("NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2: 0x%x\n", reg); + reg = readl(syscon + 0x1584); + pr_err("NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1: 0x%x\n", reg); + reg = readl(syscon + 0x1588); + pr_err("NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0: 0x%x\n", reg); + reg = readl(syscon + 0x158c); + pr_err("NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2: 0x%x\n", reg); + reg = readl(syscon + 0x1590); + pr_err("NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1: 0x%x\n", reg); + reg = readl(syscon + 0x1594); + pr_err("NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0: 0x%x\n", reg); + reg = readl(syscon + 0x1598); + pr_err("NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2: 0x%x\n", reg); + reg = readl(syscon + 0x159c); + pr_err("NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1: 0x%x\n", reg); + reg = readl(syscon + 0x15a0); + pr_err("NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0: 0x%x\n", reg); + reg = readl(syscon + 0x15a4); + pr_err("NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2: 0x%x\n", reg); + reg = readl(syscon + 0x15a8); + pr_err("NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1: 0x%x\n", reg); + reg = readl(syscon + 0x15ac); + pr_err("NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0: 0x%x\n", reg); + + iounmap(syscon); +} + + +void pm_dump_dickens(void) +{ + + void __iomem *dickens; + u32 status; + u32 i; + + dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M); + if (dickens == NULL) { + pr_err("DICKENS: Failed to map the dickens registers\n"); + return; + } + + for (i = 0; i < DKN_HNF_TOTAL_NODES; ++i) { + status = readl( + dickens + (0x10000 * (DKN_HNF_NODE_ID + i)) + + DKN_HNF_SNOOP_DOMAIN_CTL); + udelay(1); + pr_err("DKN_HNF_SNOOP_DOMAIN_CTL[%d]: 0x%x\n", i, status); + } + + status = readl( + dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET) + + DKN_MN_DVM_DOMAIN_CTL); + + pr_err("DKN_MN_DVM_DOMAIN_CTL: 0x%x\n", status); + + + iounmap(dickens); + + +} + +#endif diff --git a/arch/arm/mach-axxia/lsi_power_management.h b/arch/arm/mach-axxia/lsi_power_management.h new file mode 100644 index 0000000..4cb6d1f --- /dev/null +++ b/arch/arm/mach-axxia/lsi_power_management.h @@ -0,0 +1,183 @@ +/* + * lsi_power_management.h + * + * Created on: Jun 23, 2014 + * Author: z8cpaul + */ + +#ifndef LSI_POWER_MANAGEMENT_H_ +#define LSI_POWER_MANAGEMENT_H_ + + +#define NCP_SYSCON_MCG_CSW_CPU (0x00000000) +#define NCP_SYSCON_MCG_CSW_SYS (0x00000004) +#define NCP_SYSCON_MCG_DIV_CPU (0x00000008) +#define NCP_SYSCON_MCG_DIV_SYS (0x0000000c) +#define NCP_SYSCON_CLKDEBUG (0x00000010) +#define NCP_SYSCON_EVENT_ENB (0x00000014) +#define NCP_SYSCON_CPU_FAST_INT (0x00000018) +#define NCP_SYSCON_GIC_DISABLE (0x0000001c) +#define NCP_SYSCON_CP15SDISABLE (0x00000020) +#define NCP_SYSCON_LRSTDISABLE (0x00000024) +#define NCP_SYSCON_LDO_CTL (0x00000028) +#define NCP_SYSCON_SHWK_QOS (0x0000002c) +#define NCP_SYSCON_FUSE_RTO (0x00000030) +#define NCP_SYSCON_PFUSE (0x00000034) +#define NCP_SYSCON_FUSE_STAT (0x00000038) +#define NCP_SYSCON_SCRATCH (0x0000003c) +#define NCP_SYSCON_MASK_IPI0 (0x00000040) +#define NCP_SYSCON_MASK_IPI1 (0x00000044) +#define NCP_SYSCON_MASK_IPI2 (0x00000048) +#define NCP_SYSCON_MASK_IPI3 (0x0000004c) +#define NCP_SYSCON_MASK_IPI4 (0x00000050) +#define NCP_SYSCON_MASK_IPI5 (0x00000054) +#define NCP_SYSCON_MASK_IPI6 (0x00000058) +#define NCP_SYSCON_MASK_IPI7 (0x0000005c) +#define NCP_SYSCON_MASK_IPI8 (0x00000060) +#define NCP_SYSCON_MASK_IPI9 (0x00000064) +#define NCP_SYSCON_MASK_IPI10 (0x00000068) +#define NCP_SYSCON_MASK_IPI11 (0x0000006c) +#define NCP_SYSCON_MASK_IPI12 (0x00000070) +#define NCP_SYSCON_MASK_IPI13 (0x00000074) +#define NCP_SYSCON_MASK_IPI14 (0x00000078) +#define NCP_SYSCON_MASK_IPI15 (0x0000007c) +#define NCP_SYSCON_MASK_IPI16 (0x00000080) +#define NCP_SYSCON_MASK_IPI17 (0x00000084) +#define NCP_SYSCON_MASK_IPI18 (0x00000088) +#define NCP_SYSCON_SPARE0 (0x0000008c) +#define NCP_SYSCON_STOP_CLK_CPU (0x00000090) + + +#define NCP_SYSCON_RESET_STATUS (0x00000100) +#define NCP_SYSCON_RESET_CORE_STATUS (0x00000108) + +#define NCP_SYSCON_KEY (0x00001000) +#define NCP_SYSCON_RESET_CTL (0x00001008) +#define NCP_SYSCON_RESET_CPU (0x0000100c) +#define NCP_SYSCON_HOLD_CPU (0x00001010) +#define NCP_SYSCON_HOLD_PTM (0x00001014) +#define NCP_SYSCON_HOLD_L2 (0x00001018) +#define NCP_SYSCON_HOLD_DBG (0x0000101c) + +#define NCP_SYSCON_PWRUP_CPU_RST (0x00001030) + +#define NCP_SYSCON_RESET_AXIS (0x00001040) +#define NCP_SYSCON_RESET_AXIS_ACCESS_SIZE (0x00000008) + +#define NCP_SYSCON_PWR_CLKEN (0x00001400) +#define NCP_SYSCON_ENABLE_CLKEN_SET (0x00001404) +#define NCP_SYSCON_PWR_ACINACTM (0x00001408) +#define NCP_SYSCON_PWR_CHIPSELECTEN (0x0000140c) +#define NCP_SYSCON_PWR_CSYSREQ_TS (0x00001410) +#define NCP_SYSCON_PWR_CSYSREQ_CNT (0x00001414) +#define NCP_SYSCON_PWR_CSYSREQ_ATB (0x00001418) +#define NCP_SYSCON_PWR_CSYSREQ_APB (0x0000141c) +#define NCP_SYSCON_PWR_PWRUPL2LGCSTG1 (0x00001420) +#define NCP_SYSCON_PWR_PWRUPL2LGCSTG2 (0x00001424) +#define NCP_SYSCON_PWR_PWRUPL2HSRAM (0x00001428) +#define NCP_SYSCON_PWR_ACEPWRDNRQ (0x0000142c) +#define NCP_SYSCON_PWR_ISOLATEL2MISC (0x00001430) +#define NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK (0x00001438) +#define NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK (0x0000143c) +#define NCP_SYSCON_PWR_STANDBYWFIL2 (0x00001440) +#define NCP_SYSCON_PWR_CSYSACK_TS (0x00001444) +#define NCP_SYSCON_PWR_CACTIVE_TS (0x00001448) +#define NCP_SYSCON_PWR_CSYSACK_CNT (0x0000144c) +#define NCP_SYSCON_PWR_CACTIVE_CNT (0x00001450) +#define NCP_SYSCON_PWR_CSYSACK_ATB (0x00001454) +#define NCP_SYSCON_PWR_CACTIVE_ATB (0x00001458) +#define NCP_SYSCON_PWR_CSYSACK_APB (0x0000145c) +#define NCP_SYSCON_PWR_CACTIVE_APB (0x00001460) +#define NCP_SYSCON_PWR_NACEPWRDNACK (0x00001464) +#define NCP_SYSCON_PWR_CACTIVEM_EAGM (0x00001468) +#define NCP_SYSCON_PWR_CACTIVEM_EAGS (0x0000146c) +#define NCP_SYSCON_PWR_CACTIVES_EAGM (0x00001470) +#define NCP_SYSCON_PWR_CACTIVES_EAGS (0x00001474) +#define NCP_SYSCON_PWR_PWRUPCPUSTG1 (0x00001480) +#define NCP_SYSCON_PWR_PWRUPCPUSTG2 (0x00001484) +#define NCP_SYSCON_PWR_PWRUPCPURAM (0x00001488) +#define NCP_SYSCON_PWR_ISOLATECPU (0x0000148c) +#define NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK (0x00001490) +#define NCP_SYSCON_PWR_NPWRUPCPURAM_ACK (0x00001494) +#define NCP_SYSCON_PWR_QACTIVE (0x00001498) +#define NCP_SYSCON_PWR_STANDBYWFI (0x0000149c) +#define NCP_SYSCON_PWR_STANDBYWFE (0x000014a0) +#define NCP_SYSCON_PWR_DBGNOPWRDWN (0x000014a4) +#define NCP_SYSCON_PWR_DBGPWRUPREQ (0x000014a8) +#define NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2 (0x00001580) +#define NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1 (0x00001584) +#define NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0 (0x00001588) +#define NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2 (0x0000158c) +#define NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1 (0x00001590) +#define NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0 (0x00001594) +#define NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2 (0x00001598) +#define NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1 (0x0000159c) +#define NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0 (0x000015a0) +#define NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2 (0x000015a4) +#define NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1 (0x000015a8) +#define NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0 (0x000015ac) + +#define RAM_BANK0_MASK (0x0FFF0000) +#define RAM_BANK1_LS_MASK (0xF0000000) +#define RAM_BANK1_MS_MASK (0x000000FF) +#define RAM_BANK2_MASK (0x000FFF00) +#define RAM_BANK3_MASK (0xFFF00000) +#define RAM_ALL_MASK (0xFFFFFFFF) + +/* DICKENS REGISTERS (Miscelaneous Node) */ +#define DKN_MN_NODE_ID (0x0) +#define DKN_DVM_DOMAIN_OFFSET (0x0) +#define DKN_MN_DVM_DOMAIN_CTL (0x200) +#define DKN_MN_DVM_DOMAIN_CTL_SET (0x210) +#define DKN_MN_DVM_DOMAIN_CTL_CLR (0x220) + +/* DICKENS HN-F (Fully-coherent Home Node) */ +#define DKN_HNF_NODE_ID (0x20) +#define DKN_HNF_TOTAL_NODES (0x8) +#define DKN_HNF_SNOOP_DOMAIN_CTL (0x200) +#define DKN_HNF_SNOOP_DOMAIN_CTL_SET (0x210) +#define DKN_HNF_SNOOP_DOMAIN_CTL_CLR (0x220) + +/* DICKENS clustid to Node */ +#define DKN_CLUSTER0_NODE (1) +#define DKN_CLUSTER1_NODE (9) +#define DKN_CLUSTER2_NODE (11) +#define DKN_CLUSTER3_NODE (19) + +/* PO RESET cluster id to bit */ +#define PORESET_CLUSTER0 (0x10000) +#define PORESET_CLUSTER1 (0x20000) +#define PORESET_CLUSTER2 (0x40000) +#define PORESET_CLUSTER3 (0x80000) + +/* SYSCON KEY Value */ +#define VALID_KEY_VALUE (0xAB) + +#define MAX_NUM_CLUSTERS (4) +#define CORES_PER_CLUSTER (4) + +typedef struct { + u32 cpu; + u32 cluster; +} pm_data; + + +void pm_cpu_shutdown(u32 cpu); +int pm_cpu_powerup(u32 cpu); +void pm_debug_read_pwr_registers(void); +void pm_dump_L2_registers(void); +void pm_cpu_logical_shutdown(void *data); +int pm_cpu_logical_die(pm_data *pm_request); +int pm_cpul2_logical_die(pm_data *pm_request); +unsigned long pm_get_powered_down_cpu(void); +bool pm_cpu_last_of_cluster(u32 cpu); +void pm_L2_logical_shutdown(void *data); +void pm_dump_dickens(void); +void pm_init_cpu(u32 cpu); +void pm_cpu_logical_powerup(void); + +extern bool pm_in_progress[]; +extern bool cluster_power_up[]; + + +#endif /* LSI_POWER_MANAGEMENT_H_ */ diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c index 44cde07..2d4d0e9 100644 --- a/arch/arm/mach-axxia/platsmp.c +++ b/arch/arm/mach-axxia/platsmp.c @@ -23,6 +23,7 @@ #include <asm/virt.h> #include "axxia.h" +#include "lsi_power_management.h" #include <mach/axxia-gic.h> extern void axxia_secondary_startup(void); @@ -88,10 +89,25 @@ static DEFINE_RAW_SPINLOCK(boot_lock); void __cpuinit axxia_secondary_init(unsigned int cpu) { - /* Fixup for cross-cluster SEV */ - do_fixup_sev(); + int phys_cpu, cluster; + + phys_cpu = cpu_logical_map(cpu); + cluster = (phys_cpu / 4) << 8; - axxia_gic_secondary_init(); + /* + * Only execute this when powering up a cpu for hotplug. + */ + if (!pm_in_progress[cpu]) { + /* Fixup for cross-cluster SEV */ + do_fixup_sev(); + + axxia_gic_secondary_init(); + } else { + axxia_gic_secondary_init(); + pm_cpu_logical_powerup(); + pm_in_progress[cpu] = false; + cluster_power_up[cluster] = false; + } /* * Let the primary processor know we're out of the @@ -108,8 +124,12 @@ void __cpuinit axxia_secondary_init(unsigned int cpu) int __cpuinit axxia_boot_secondary(unsigned int cpu, struct task_struct *idle) { - unsigned long timeout; + int phys_cpu, cluster; + unsigned long timeout; + unsigned long powered_down_cpu; + int rVal = 0; + /* * Set synchronisation state between this boot processor @@ -117,6 +137,21 @@ int __cpuinit axxia_boot_secondary(unsigned int cpu, struct task_struct *idle) */ _raw_spin_lock(&boot_lock); + phys_cpu = cpu_logical_map(cpu); + + powered_down_cpu = pm_get_powered_down_cpu(); + + if (powered_down_cpu & (1 << phys_cpu)) { + pm_in_progress[cpu] = true; + + rVal = pm_cpu_powerup(phys_cpu); + if (rVal) { + _raw_spin_unlock(&boot_lock); + return rVal; + } + + } + /* * In the Axxia, the bootloader does not put the secondary cores * into a wait-for-event (wfe) or wait-for-interrupt (wfi) state @@ -260,6 +295,7 @@ struct smp_operations axxia_smp_ops __initdata = { .smp_boot_secondary = axxia_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = axxia_platform_cpu_die, + .cpu_kill = axxia_platform_cpu_kill, #endif }; -- 1.8.1.4 -- _______________________________________________ linux-yocto mailing list [email protected] https://lists.yoctoproject.org/listinfo/linux-yocto
