summaryrefslogtreecommitdiffstats
path: root/arch/arm/common/mcpm_entry.c
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2014-06-24 18:32:51 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-07-18 11:58:00 +0100
commit3721924c81541d828d73d0e36dcbae8fd93f0885 (patch)
treeab7e046674055bd4fb0f0c1b321e792201c1f0b2 /arch/arm/common/mcpm_entry.c
parent731542ef44a3dea7726a03f906111700847cf777 (diff)
downloadop-kernel-dev-3721924c81541d828d73d0e36dcbae8fd93f0885.zip
op-kernel-dev-3721924c81541d828d73d0e36dcbae8fd93f0885.tar.gz
ARM: 8081/1: MCPM: provide infrastructure to allow for MCPM loopback
The kernel already has the responsibility to handle resources such as the CCI when hotplugging CPUs, during the booting of secondary CPUs, and when resuming from suspend/idle. It would be more coherent and less confusing if the CCI for the boot CPU (or cluster) was also initialized by the kernel rather than expecting the firmware/bootloader to do it and only in that case. After all, the kernel has all the necessary code already and the bootloader shouldn't have to care at all. The CCI may be turned on only when the cache is off. Leveraging the CPU suspend code to loop back through the low-level MCPM entry point is all that is needed to properly turn on the CCI from the kernel by using the same code as during secondary boot. Let's provide a generic MCPM loopback function that can be invoked by backend initialization code to set things (CCI or similar) on the boot CPU just as it is done for the other CPUs. Signed-off-by: Nicolas Pitre <nico@linaro.org> Reviewed-by: Kevin Hilman <khilman@linaro.org> Tested-by: Kevin Hilman <khilman@linaro.org> Tested-by: Doug Anderson <dianders@chromium.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/common/mcpm_entry.c')
-rw-r--r--arch/arm/common/mcpm_entry.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index f91136a..3c165fc 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -12,11 +12,13 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irqflags.h>
+#include <linux/cpu_pm.h>
#include <asm/mcpm.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/cputype.h>
+#include <asm/suspend.h>
extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
@@ -146,6 +148,56 @@ int mcpm_cpu_powered_up(void)
return 0;
}
+#ifdef CONFIG_ARM_CPU_SUSPEND
+
+static int __init nocache_trampoline(unsigned long _arg)
+{
+ void (*cache_disable)(void) = (void *)_arg;
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ phys_reset_t phys_reset;
+
+ mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+ setup_mm_for_reboot();
+
+ __mcpm_cpu_going_down(cpu, cluster);
+ BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
+ cache_disable();
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ __mcpm_cpu_down(cpu, cluster);
+
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+ BUG();
+}
+
+int __init mcpm_loopback(void (*cache_disable)(void))
+{
+ int ret;
+
+ /*
+ * We're going to soft-restart the current CPU through the
+ * low-level MCPM code by leveraging the suspend/resume
+ * infrastructure. Let's play it safe by using cpu_pm_enter()
+ * in case the CPU init code path resets the VFP or similar.
+ */
+ local_irq_disable();
+ local_fiq_disable();
+ ret = cpu_pm_enter();
+ if (!ret) {
+ ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
+ cpu_pm_exit();
+ }
+ local_fiq_enable();
+ local_irq_enable();
+ if (ret)
+ pr_err("%s returned %d\n", __func__, ret);
+ return ret;
+}
+
+#endif
+
struct sync_struct mcpm_sync;
/*
OpenPOWER on IntegriCloud