summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-exynos
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-22 09:08:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-22 09:08:39 -0700
commite6c81cce5699ec6be3a7533b5ad7a062ab3357f2 (patch)
tree4592735bbfb17f163217d0bb80877dcc22a869c0 /arch/arm/mach-exynos
parentd0440c59f52d31aa7f74ba8e35cc22ee96acea84 (diff)
parenta018bb2ff95868ea592212b6735b35836fda268b (diff)
downloadop-kernel-dev-e6c81cce5699ec6be3a7533b5ad7a062ab3357f2.zip
op-kernel-dev-e6c81cce5699ec6be3a7533b5ad7a062ab3357f2.tar.gz
Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC platform updates from Olof Johansson: "Our SoC branch usually contains expanded support for new SoCs and other core platform code. In this case, that includes: - support for the new Annapurna Labs "Alpine" platform - a rework greatly simplifying adding new platform support to the MCPM subsystem (Multi-cluster power management) - cpuidle and PM improvements for Exynos3250 - misc updates for Renesas, OMAP, Meson, i.MX. Some of these could have gone in other branches but ended up here for various reasons" * tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (53 commits) ARM: alpine: add support for generic pci ARM: Exynos: migrate DCSCB to the new MCPM backend abstraction ARM: vexpress: migrate DCSCB to the new MCPM backend abstraction ARM: vexpress: DCSCB: tighten CPU validity assertion ARM: vexpress: migrate TC2 to the new MCPM backend abstraction ARM: MCPM: move the algorithmic complexity to the core code ARM: EXYNOS: allow cpuidle driver usage on Exynos3250 SoC ARM: EXYNOS: add AFTR mode support for Exynos3250 ARM: EXYNOS: add code for setting/clearing boot flag ARM: EXYNOS: fix CPU1 hotplug on Exynos3250 ARM: S3C64XX: Use fixed IRQ bases to avoid conflicts on Cragganmore ARM: cygnus: fix const declaration bcm_cygnus_dt_compat ARM: DRA7: hwmod: Fix the hwmod class for GPTimer4 ARM: DRA7: hwmod: Add data for GPTimers 13 through 16 ARM: EXYNOS: Remove left over 'extra_save' ARM: EXYNOS: Constify exynos_pm_data array ARM: EXYNOS: use static in suspend.c ARM: EXYNOS: Use platform device name as power domain name ARM: EXYNOS: add support for async-bridge clocks for pm_domains ARM: omap-device: add missed callback for suspend-to-disk ...
Diffstat (limited to 'arch/arm/mach-exynos')
-rw-r--r--arch/arm/mach-exynos/common.h6
-rw-r--r--arch/arm/mach-exynos/exynos.c1
-rw-r--r--arch/arm/mach-exynos/firmware.c33
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c247
-rw-r--r--arch/arm/mach-exynos/platsmp.c23
-rw-r--r--arch/arm/mach-exynos/pm.c12
-rw-r--r--arch/arm/mach-exynos/pm_domains.c29
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h3
-rw-r--r--arch/arm/mach-exynos/smc.h9
-rw-r--r--arch/arm/mach-exynos/suspend.c22
10 files changed, 167 insertions, 218 deletions
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index f70eca7..acd5b56 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -126,6 +126,12 @@ enum {
void exynos_firmware_init(void);
+/* CPU BOOT mode flag for Exynos3250 SoC bootloader */
+#define C2_STATE (1 << 3)
+
+void exynos_set_boot_flag(unsigned int cpu, unsigned int mode);
+void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode);
+
extern u32 exynos_get_eint_wake_mask(void);
#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 8576a9f..bcde0dd 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -214,6 +214,7 @@ static void __init exynos_dt_machine_init(void)
of_machine_is_compatible("samsung,exynos4212") ||
(of_machine_is_compatible("samsung,exynos4412") &&
of_machine_is_compatible("samsung,trats2")) ||
+ of_machine_is_compatible("samsung,exynos3250") ||
of_machine_is_compatible("samsung,exynos5250"))
platform_device_register(&exynos_cpuidle);
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 4791a3c..1bd3576 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -48,7 +48,13 @@ static int exynos_do_idle(unsigned long mode)
__raw_writel(virt_to_phys(exynos_cpu_resume_ns),
sysram_ns_base_addr + 0x24);
__raw_writel(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
- exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0);
+ if (soc_is_exynos3250()) {
+ exynos_smc(SMC_CMD_SAVE, OP_TYPE_CORE,
+ SMC_POWERSTATE_IDLE, 0);
+ exynos_smc(SMC_CMD_SHUTDOWN, OP_TYPE_CLUSTER,
+ SMC_POWERSTATE_IDLE, 0);
+ } else
+ exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0);
break;
case FW_DO_IDLE_SLEEP:
exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);
@@ -206,3 +212,28 @@ void __init exynos_firmware_init(void)
outer_cache.configure = exynos_l2_configure;
}
}
+
+#define REG_CPU_STATE_ADDR (sysram_ns_base_addr + 0x28)
+#define BOOT_MODE_MASK 0x1f
+
+void exynos_set_boot_flag(unsigned int cpu, unsigned int mode)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
+
+ if (mode & BOOT_MODE_MASK)
+ tmp &= ~BOOT_MODE_MASK;
+
+ tmp |= mode;
+ __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
+}
+
+void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
+ tmp &= ~mode;
+ __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
+}
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index b0d3c2e..9bdf547 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -61,25 +61,7 @@ static void __iomem *ns_sram_base_addr;
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
"r9", "r10", "lr", "memory")
-/*
- * We can't use regular spinlocks. In the switcher case, it is possible
- * for an outbound CPU to call power_down() after its inbound counterpart
- * is already live using the same logical CPU number which trips lockdep
- * debugging.
- */
-static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-static int
-cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
-
-#define exynos_cluster_usecnt(cluster) \
- (cpu_use_count[0][cluster] + \
- cpu_use_count[1][cluster] + \
- cpu_use_count[2][cluster] + \
- cpu_use_count[3][cluster])
-
-#define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)
-
-static int exynos_power_up(unsigned int cpu, unsigned int cluster)
+static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
{
unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
@@ -88,127 +70,65 @@ static int exynos_power_up(unsigned int cpu, unsigned int cluster)
cluster >= EXYNOS5420_NR_CLUSTERS)
return -EINVAL;
- /*
- * Since this is called with IRQs enabled, and no arch_spin_lock_irq
- * variant exists, we need to disable IRQs manually here.
- */
- local_irq_disable();
- arch_spin_lock(&exynos_mcpm_lock);
-
- cpu_use_count[cpu][cluster]++;
- if (cpu_use_count[cpu][cluster] == 1) {
- bool was_cluster_down =
- (exynos_cluster_usecnt(cluster) == 1);
-
- /*
- * Turn on the cluster (L2/COMMON) and then power on the
- * cores.
- */
- if (was_cluster_down)
- exynos_cluster_power_up(cluster);
-
- exynos_cpu_power_up(cpunr);
- } else if (cpu_use_count[cpu][cluster] != 2) {
- /*
- * The only possible values are:
- * 0 = CPU down
- * 1 = CPU (still) up
- * 2 = CPU requested to be up before it had a chance
- * to actually make itself down.
- * Any other value is a bug.
- */
- BUG();
- }
+ exynos_cpu_power_up(cpunr);
+ return 0;
+}
- arch_spin_unlock(&exynos_mcpm_lock);
- local_irq_enable();
+static int exynos_cluster_powerup(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ if (cluster >= EXYNOS5420_NR_CLUSTERS)
+ return -EINVAL;
+ exynos_cluster_power_up(cluster);
return 0;
}
-/*
- * NOTE: This function requires the stack data to be visible through power down
- * and can only be executed on processors like A15 and A7 that hit the cache
- * with the C bit clear in the SCTLR register.
- */
-static void exynos_power_down(void)
+static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpu, cluster;
- bool last_man = false, skip_wfi = false;
- unsigned int cpunr;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
+ unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
cluster >= EXYNOS5420_NR_CLUSTERS);
+ exynos_cpu_power_down(cpunr);
+}
- __mcpm_cpu_going_down(cpu, cluster);
-
- arch_spin_lock(&exynos_mcpm_lock);
- BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
- cpu_use_count[cpu][cluster]--;
- if (cpu_use_count[cpu][cluster] == 0) {
- exynos_cpu_power_down(cpunr);
-
- if (exynos_cluster_unused(cluster)) {
- exynos_cluster_power_down(cluster);
- last_man = true;
- }
- } else if (cpu_use_count[cpu][cluster] == 1) {
- /*
- * A power_up request went ahead of us.
- * Even if we do not want to shut this CPU down,
- * the caller expects a certain state as if the WFI
- * was aborted. So let's continue with cache cleaning.
- */
- skip_wfi = true;
- } else {
- BUG();
- }
-
- if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
- arch_spin_unlock(&exynos_mcpm_lock);
-
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
- /*
- * On the Cortex-A15 we need to disable
- * L2 prefetching before flushing the cache.
- */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3\n\t"
- "isb\n\t"
- "dsb"
- : : "r" (0x400));
- }
+static void exynos_cluster_powerdown_prepare(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= EXYNOS5420_NR_CLUSTERS);
+ exynos_cluster_power_down(cluster);
+}
- /* Flush all cache levels for this cluster. */
- exynos_v7_exit_coherency_flush(all);
+static void exynos_cpu_cache_disable(void)
+{
+ /* Disable and flush the local CPU cache. */
+ exynos_v7_exit_coherency_flush(louis);
+}
+static void exynos_cluster_cache_disable(void)
+{
+ if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
/*
- * Disable cluster-level coherency by masking
- * incoming snoops and DVM messages:
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
*/
- cci_disable_port_by_cpu(mpidr);
-
- __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
- } else {
- arch_spin_unlock(&exynos_mcpm_lock);
-
- /* Disable and flush the local CPU cache. */
- exynos_v7_exit_coherency_flush(louis);
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3\n\t"
+ "isb\n\t"
+ "dsb"
+ : : "r" (0x400));
}
- __mcpm_cpu_down(cpu, cluster);
-
- /* Now we are prepared for power-down, do it: */
- if (!skip_wfi)
- wfi();
+ /* Flush all cache levels for this cluster. */
+ exynos_v7_exit_coherency_flush(all);
- /* Not dead at this point? Let our caller cope. */
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ cci_disable_port_by_cpu(read_cpuid_mpidr());
}
static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
@@ -222,10 +142,8 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
/* Wait for the core state to be OFF */
while (tries--) {
- if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
- if ((exynos_cpu_power_state(cpunr) == 0))
- return 0; /* success: the CPU is halted */
- }
+ if ((exynos_cpu_power_state(cpunr) == 0))
+ return 0; /* success: the CPU is halted */
/* Otherwise, wait and retry: */
msleep(1);
@@ -234,63 +152,23 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
return -ETIMEDOUT; /* timeout */
}
-static void exynos_powered_up(void)
-{
- unsigned int mpidr, cpu, cluster;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
- arch_spin_lock(&exynos_mcpm_lock);
- if (cpu_use_count[cpu][cluster] == 0)
- cpu_use_count[cpu][cluster] = 1;
- arch_spin_unlock(&exynos_mcpm_lock);
-}
-
-static void exynos_suspend(u64 residency)
+static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpunr;
-
- exynos_power_down();
-
- /*
- * Execution reaches here only if cpu did not power down.
- * Hence roll back the changes done in exynos_power_down function.
- *
- * CAUTION: "This function requires the stack data to be visible through
- * power down and can only be executed on processors like A15 and A7
- * that hit the cache with the C bit clear in the SCTLR register."
- */
- mpidr = read_cpuid_mpidr();
- cpunr = exynos_pmu_cpunr(mpidr);
-
- exynos_cpu_power_up(cpunr);
+ /* especially when resuming: make sure power control is set */
+ exynos_cpu_powerup(cpu, cluster);
}
static const struct mcpm_platform_ops exynos_power_ops = {
- .power_up = exynos_power_up,
- .power_down = exynos_power_down,
+ .cpu_powerup = exynos_cpu_powerup,
+ .cluster_powerup = exynos_cluster_powerup,
+ .cpu_powerdown_prepare = exynos_cpu_powerdown_prepare,
+ .cluster_powerdown_prepare = exynos_cluster_powerdown_prepare,
+ .cpu_cache_disable = exynos_cpu_cache_disable,
+ .cluster_cache_disable = exynos_cluster_cache_disable,
.wait_for_powerdown = exynos_wait_for_powerdown,
- .suspend = exynos_suspend,
- .powered_up = exynos_powered_up,
+ .cpu_is_up = exynos_cpu_is_up,
};
-static void __init exynos_mcpm_usage_count_init(void)
-{
- unsigned int mpidr, cpu, cluster;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
- cluster >= EXYNOS5420_NR_CLUSTERS);
-
- cpu_use_count[cpu][cluster] = 1;
-}
-
/*
* Enable cluster-level coherency, in preparation for turning on the MMU.
*/
@@ -302,19 +180,6 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
"b cci_enable_port_for_self");
}
-static void __init exynos_cache_off(void)
-{
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
- /* disable L2 prefetching on the Cortex-A15 */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3\n\t"
- "isb\n\t"
- "dsb"
- : : "r" (0x400));
- }
- exynos_v7_exit_coherency_flush(all);
-}
-
static const struct of_device_id exynos_dt_mcpm_match[] = {
{ .compatible = "samsung,exynos5420" },
{ .compatible = "samsung,exynos5800" },
@@ -370,13 +235,11 @@ static int __init exynos_mcpm_init(void)
*/
pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
- exynos_mcpm_usage_count_init();
-
ret = mcpm_platform_register(&exynos_power_ops);
if (!ret)
ret = mcpm_sync_init(exynos_pm_power_up_setup);
if (!ret)
- ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */
+ ret = mcpm_loopback(exynos_cluster_cache_disable); /* turn on the CCI */
if (ret) {
iounmap(ns_sram_base_addr);
return ret;
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index d2e9f12..ebd135b 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -126,6 +126,8 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
*/
void exynos_cpu_power_down(int cpu)
{
+ u32 core_conf;
+
if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
/*
* Bypass power down for CPU0 during suspend. Check for
@@ -137,7 +139,10 @@ void exynos_cpu_power_down(int cpu)
if (!(val & S5P_CORE_LOCAL_PWR_EN))
return;
}
- pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
+
+ core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
+ core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
+ pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
}
/**
@@ -148,7 +153,12 @@ void exynos_cpu_power_down(int cpu)
*/
void exynos_cpu_power_up(int cpu)
{
- pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
+ u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
+
+ if (soc_is_exynos3250())
+ core_conf |= S5P_CORE_AUTOWAKEUP_EN;
+
+ pmu_raw_writel(core_conf,
EXYNOS_ARM_CORE_CONFIGURATION(cpu));
}
@@ -226,6 +236,10 @@ static void exynos_core_restart(u32 core_id)
if (!of_machine_is_compatible("samsung,exynos3250"))
return;
+ while (!pmu_raw_readl(S5P_PMU_SPARE2))
+ udelay(10);
+ udelay(10);
+
val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
@@ -346,7 +360,10 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
call_firmware_op(cpu_boot, core_id);
- arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+ if (soc_is_exynos3250())
+ dsb_sev();
+ else
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
if (pen_release == -1)
break;
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 5685250..cc75ab4 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -127,6 +127,8 @@ int exynos_pm_central_resume(void)
static void exynos_set_wakeupmask(long mask)
{
pmu_raw_writel(mask, S5P_WAKEUP_MASK);
+ if (soc_is_exynos3250())
+ pmu_raw_writel(0x0, S5P_WAKEUP_MASK2);
}
static void exynos_cpu_set_boot_vector(long flags)
@@ -140,7 +142,7 @@ static int exynos_aftr_finisher(unsigned long flags)
{
int ret;
- exynos_set_wakeupmask(0x0000ff3e);
+ exynos_set_wakeupmask(soc_is_exynos3250() ? 0x40003ffe : 0x0000ff3e);
/* Set value of power down register for aftr mode */
exynos_sys_powerdown_conf(SYS_AFTR);
@@ -157,8 +159,13 @@ static int exynos_aftr_finisher(unsigned long flags)
void exynos_enter_aftr(void)
{
+ unsigned int cpuid = smp_processor_id();
+
cpu_pm_enter();
+ if (soc_is_exynos3250())
+ exynos_set_boot_flag(cpuid, C2_STATE);
+
exynos_pm_central_suspend();
if (of_machine_is_compatible("samsung,exynos4212") ||
@@ -178,6 +185,9 @@ void exynos_enter_aftr(void)
exynos_pm_central_resume();
+ if (soc_is_exynos3250())
+ exynos_clear_boot_flag(cpuid, C2_STATE);
+
cpu_pm_exit();
}
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 37266a8..cbe56b3 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -37,6 +37,7 @@ struct exynos_pm_domain {
struct clk *oscclk;
struct clk *clk[MAX_CLK_PER_DOMAIN];
struct clk *pclk[MAX_CLK_PER_DOMAIN];
+ struct clk *asb_clk[MAX_CLK_PER_DOMAIN];
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -45,14 +46,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
void __iomem *base;
u32 timeout, pwr;
char *op;
+ int i;
pd = container_of(domain, struct exynos_pm_domain, pd);
base = pd->base;
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ clk_prepare_enable(pd->asb_clk[i]);
+ }
+
/* Set oscclk before powering off a domain*/
if (!power_on) {
- int i;
-
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
if (IS_ERR(pd->clk[i]))
break;
@@ -81,8 +87,6 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
/* Restore clocks after powering on a domain*/
if (power_on) {
- int i;
-
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
if (IS_ERR(pd->clk[i]))
break;
@@ -92,6 +96,12 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
}
}
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ clk_disable_unprepare(pd->asb_clk[i]);
+ }
+
return 0;
}
@@ -125,12 +135,21 @@ static __init int exynos4_pm_init_power_domain(void)
return -ENOMEM;
}
- pd->pd.name = kstrdup(np->name, GFP_KERNEL);
+ pd->pd.name = kstrdup(dev_name(dev), GFP_KERNEL);
pd->name = pd->pd.name;
pd->base = of_iomap(np, 0);
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ char clk_name[8];
+
+ snprintf(clk_name, sizeof(clk_name), "asb%d", i);
+ pd->asb_clk[i] = clk_get(dev, clk_name);
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ }
+
pd->oscclk = clk_get(dev, "oscclk");
if (IS_ERR(pd->oscclk))
goto no_clk;
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index eb461e1..b761433 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -43,12 +43,14 @@
#define S5P_WAKEUP_STAT 0x0600
#define S5P_EINT_WAKEUP_MASK 0x0604
#define S5P_WAKEUP_MASK 0x0608
+#define S5P_WAKEUP_MASK2 0x0614
#define S5P_INFORM0 0x0800
#define S5P_INFORM1 0x0804
#define S5P_INFORM5 0x0814
#define S5P_INFORM6 0x0818
#define S5P_INFORM7 0x081C
+#define S5P_PMU_SPARE2 0x0908
#define S5P_PMU_SPARE3 0x090C
#define EXYNOS_IROM_DATA2 0x0988
@@ -182,6 +184,7 @@
#define S5P_CORE_LOCAL_PWR_EN 0x3
#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
+#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
/* Only for EXYNOS4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
diff --git a/arch/arm/mach-exynos/smc.h b/arch/arm/mach-exynos/smc.h
index f7b82f9..c284571 100644
--- a/arch/arm/mach-exynos/smc.h
+++ b/arch/arm/mach-exynos/smc.h
@@ -17,6 +17,8 @@
#define SMC_CMD_SLEEP (-3)
#define SMC_CMD_CPU1BOOT (-4)
#define SMC_CMD_CPU0AFTR (-5)
+#define SMC_CMD_SAVE (-6)
+#define SMC_CMD_SHUTDOWN (-7)
/* For CP15 Access */
#define SMC_CMD_C15RESUME (-11)
/* For L2 Cache Access */
@@ -32,4 +34,11 @@ extern void exynos_smc(u32 cmd, u32 arg1, u32 arg2, u32 arg3);
#endif /* __ASSEMBLY__ */
+/* op type for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
+#define OP_TYPE_CORE 0x0
+#define OP_TYPE_CLUSTER 0x1
+
+/* Power State required for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
+#define SMC_POWERSTATE_IDLE 0x1
+
#endif
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 2146d91..3e6aea7 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -65,8 +65,6 @@ static struct sleep_save exynos_core_save[] = {
struct exynos_pm_data {
const struct exynos_wkup_irq *wkup_irq;
- struct sleep_save *extra_save;
- int num_extra_save;
unsigned int wake_disable_mask;
unsigned int *release_ret_regs;
@@ -77,7 +75,7 @@ struct exynos_pm_data {
int (*cpu_suspend)(unsigned long);
};
-struct exynos_pm_data *pm_data;
+static const struct exynos_pm_data *pm_data;
static int exynos5420_cpu_state;
static unsigned int exynos_pmu_spare3;
@@ -106,7 +104,7 @@ static const struct exynos_wkup_irq exynos5250_wkup_irq[] = {
{ /* sentinel */ },
};
-unsigned int exynos_release_ret_regs[] = {
+static unsigned int exynos_release_ret_regs[] = {
S5P_PAD_RET_MAUDIO_OPTION,
S5P_PAD_RET_GPIO_OPTION,
S5P_PAD_RET_UART_OPTION,
@@ -117,7 +115,7 @@ unsigned int exynos_release_ret_regs[] = {
REG_TABLE_END,
};
-unsigned int exynos3250_release_ret_regs[] = {
+static unsigned int exynos3250_release_ret_regs[] = {
S5P_PAD_RET_MAUDIO_OPTION,
S5P_PAD_RET_GPIO_OPTION,
S5P_PAD_RET_UART_OPTION,
@@ -130,7 +128,7 @@ unsigned int exynos3250_release_ret_regs[] = {
REG_TABLE_END,
};
-unsigned int exynos5420_release_ret_regs[] = {
+static unsigned int exynos5420_release_ret_regs[] = {
EXYNOS_PAD_RET_DRAM_OPTION,
EXYNOS_PAD_RET_MAUDIO_OPTION,
EXYNOS_PAD_RET_JTAG_OPTION,
@@ -349,10 +347,6 @@ static void exynos_pm_prepare(void)
s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
- if (pm_data->extra_save)
- s3c_pm_do_save(pm_data->extra_save,
- pm_data->num_extra_save);
-
exynos_pm_enter_sleep_mode();
/* ensure at least INFORM0 has the resume address */
@@ -475,10 +469,6 @@ static void exynos_pm_resume(void)
/* For release retention */
exynos_pm_release_retention();
- if (pm_data->extra_save)
- s3c_pm_do_restore_core(pm_data->extra_save,
- pm_data->num_extra_save);
-
s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
if (cpuid == ARM_CPU_PART_CORTEX_A9)
@@ -685,7 +675,7 @@ static const struct exynos_pm_data exynos5250_pm_data = {
.cpu_suspend = exynos_cpu_suspend,
};
-static struct exynos_pm_data exynos5420_pm_data = {
+static const struct exynos_pm_data exynos5420_pm_data = {
.wkup_irq = exynos5250_wkup_irq,
.wake_disable_mask = (0x7F << 7) | (0x1F << 1),
.release_ret_regs = exynos5420_release_ret_regs,
@@ -736,7 +726,7 @@ void __init exynos_pm_init(void)
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
- pm_data = (struct exynos_pm_data *) match->data;
+ pm_data = (const struct exynos_pm_data *) match->data;
/* All wakeup disable */
tmp = pmu_raw_readl(S5P_WAKEUP_MASK);
OpenPOWER on IntegriCloud