diff options
author | chenhui zhao <chenhui.zhao@freescale.com> | 2015-11-20 17:14:01 +0800 |
---|---|---|
committer | Scott Wood <oss@buserror.net> | 2016-03-04 23:56:31 -0600 |
commit | 2f4f1f815bc6d03ea42d4f67dd1e284525e7524e (patch) | |
tree | 3cbc7c754e6f206ffcbc776f2c9e23a8edf05177 | |
parent | 56f1ba280719469bffc870b6b2d935f3a3019ea4 (diff) | |
download | op-kernel-dev-2f4f1f815bc6d03ea42d4f67dd1e284525e7524e.zip op-kernel-dev-2f4f1f815bc6d03ea42d4f67dd1e284525e7524e.tar.gz |
powerpc/mpc85xx: Add hotplug support on E5500 and E500MC cores
Freescale E500MC and E5500 core-based platforms, like P4080, T1040,
support disabling/enabling CPU dynamically.
This patch adds this feature on those platforms.
Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com>
[scottwood: removed unused pr_fmt]
Signed-off-by: Scott Wood <oss@buserror.net>
-rw-r--r-- | arch/powerpc/Kconfig | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/smp.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 7 | ||||
-rw-r--r-- | arch/powerpc/platforms/85xx/smp.c | 196 |
4 files changed, 118 insertions, 90 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 7efddd1..d500772 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -389,7 +389,7 @@ config SWIOTLB config HOTPLUG_CPU bool "Support for enabling/disabling CPUs" depends on SMP && (PPC_PSERIES || \ - PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC)) + PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE) ---help--- Say Y here to be able to disable and re-enable individual CPUs at runtime on SMP machines. diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 825663c..bdb8111 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -67,6 +67,9 @@ void generic_cpu_die(unsigned int cpu); void generic_set_cpu_dead(unsigned int cpu); void generic_set_cpu_up(unsigned int cpu); int generic_check_cpu_restart(unsigned int cpu); +int is_cpu_dead(unsigned int cpu); +#else +#define generic_set_cpu_up(i) do { } while (0) #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ec9ec20..8575d04 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -427,7 +427,7 @@ void generic_cpu_die(unsigned int cpu) for (i = 0; i < 100; i++) { smp_rmb(); - if (per_cpu(cpu_state, cpu) == CPU_DEAD) + if (is_cpu_dead(cpu)) return; msleep(100); } @@ -454,6 +454,11 @@ int generic_check_cpu_restart(unsigned int cpu) return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; } +int is_cpu_dead(unsigned int cpu) +{ + return per_cpu(cpu_state, cpu) == CPU_DEAD; +} + static bool secondaries_inhibited(void) { return kvm_hv_mode_active(); diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index ab0459d..d7cc538 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -53,6 +53,7 @@ static void mpc85xx_give_timebase(void) unsigned long flags; local_irq_save(flags); + hard_irq_disable(); while (!tb_req) barrier(); @@ -101,6 +102,7 @@ static void mpc85xx_take_timebase(void) unsigned long flags; local_irq_save(flags); + hard_irq_disable(); tb_req = 1; while (!tb_valid) @@ -136,8 +138,31 @@ static void smp_85xx_mach_cpu_die(void) while (1) ; } + +static void qoriq_cpu_kill(unsigned int cpu) +{ + int i; + + for (i = 0; i < 500; i++) { + if (is_cpu_dead(cpu)) { +#ifdef CONFIG_PPC64 + paca[cpu].cpu_start = 0; +#endif + return; + } + msleep(20); + } + pr_err("CPU%d didn't die...\n", cpu); +} #endif +/* + * To keep it compatible with old boot program which uses + * cache-inhibit spin table, we need to flush the cache + * before accessing spin table to invalidate any staled data. + * We also need to flush the cache after writing to spin + * table to push data out. + */ static inline void flush_spin_table(void *spin_table) { flush_dcache_range((ulong)spin_table, @@ -176,57 +201,20 @@ static void wake_hw_thread(void *info) } #endif -static int smp_85xx_kick_cpu(int nr) +static int smp_85xx_start_cpu(int cpu) { - unsigned long flags; - const u64 *cpu_rel_addr; - __iomem struct epapr_spin_table *spin_table; + int ret = 0; struct device_node *np; - int hw_cpu = get_hard_smp_processor_id(nr); + const u64 *cpu_rel_addr; + unsigned long flags; int ioremappable; - int ret = 0; + int hw_cpu = get_hard_smp_processor_id(cpu); + struct epapr_spin_table __iomem *spin_table; - WARN_ON(nr < 0 || nr >= NR_CPUS); - WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS); - - pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); - -#ifdef CONFIG_PPC64 - /* Threads don't use the spin table */ - if (cpu_thread_in_core(nr) != 0) { - int primary = cpu_first_thread_sibling(nr); - - if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) - return -ENOENT; - - if (cpu_thread_in_core(nr) != 1) { - pr_err("%s: cpu %d: invalid hw thread %d\n", - __func__, nr, cpu_thread_in_core(nr)); - return -ENOENT; - } - - if (!cpu_online(primary)) { - pr_err("%s: cpu %d: primary %d not online\n", - __func__, nr, primary); - return -ENOENT; - } - - smp_call_function_single(primary, wake_hw_thread, &nr, 0); - return 0; - } else if (cpu_thread_in_core(boot_cpuid) != 0 && - cpu_first_thread_sibling(boot_cpuid) == nr) { - if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) - return -ENOENT; - - smp_call_function_single(boot_cpuid, wake_hw_thread, &nr, 0); - } -#endif - - np = of_get_cpu_node(nr, NULL); + np = of_get_cpu_node(cpu, NULL); cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); - - if (cpu_rel_addr == NULL) { - printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); + if (!cpu_rel_addr) { + pr_err("No cpu-release-addr for cpu %d\n", cpu); return -ENOENT; } @@ -246,28 +234,18 @@ static int smp_85xx_kick_cpu(int nr) spin_table = phys_to_virt(*cpu_rel_addr); local_irq_save(flags); -#ifdef CONFIG_PPC32 -#ifdef CONFIG_HOTPLUG_CPU - /* Corresponding to generic_set_cpu_dead() */ - generic_set_cpu_up(nr); + hard_irq_disable(); - if (system_state == SYSTEM_RUNNING) { - /* - * To keep it compatible with old boot program which uses - * cache-inhibit spin table, we need to flush the cache - * before accessing spin table to invalidate any staled data. - * We also need to flush the cache after writing to spin - * table to push data out. - */ - flush_spin_table(spin_table); - out_be32(&spin_table->addr_l, 0); - flush_spin_table(spin_table); + if (qoriq_pm_ops) + qoriq_pm_ops->cpu_up_prepare(cpu); + /* if cpu is not spinning, reset it */ + if (read_spin_table_addr_l(spin_table) != 1) { /* * We don't set the BPTR register here since it already points * to the boot page properly. */ - mpic_reset_core(nr); + mpic_reset_core(cpu); /* * wait until core is ready... @@ -277,40 +255,23 @@ static int smp_85xx_kick_cpu(int nr) if (!spin_event_timeout( read_spin_table_addr_l(spin_table) == 1, 10000, 100)) { - pr_err("%s: timeout waiting for core %d to reset\n", - __func__, hw_cpu); - ret = -ENOENT; - goto out; + pr_err("timeout waiting for cpu %d to reset\n", + hw_cpu); + ret = -EAGAIN; + goto err; } - - /* clear the acknowledge status */ - __secondary_hold_acknowledge = -1; - } -#endif - flush_spin_table(spin_table); - out_be32(&spin_table->pir, hw_cpu); - out_be32(&spin_table->addr_l, __pa(__early_start)); - flush_spin_table(spin_table); - - /* Wait a bit for the CPU to ack. */ - if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu, - 10000, 100)) { - pr_err("%s: timeout waiting for core %d to ack\n", - __func__, hw_cpu); - ret = -ENOENT; - goto out; } -out: -#else - smp_generic_kick_cpu(nr); flush_spin_table(spin_table); out_be32(&spin_table->pir, hw_cpu); +#ifdef CONFIG_PPC64 out_be64((u64 *)(&spin_table->addr_h), __pa(ppc_function_entry(generic_secondary_smp_init))); - flush_spin_table(spin_table); +#else + out_be32(&spin_table->addr_l, __pa(__early_start)); #endif - + flush_spin_table(spin_table); +err: local_irq_restore(flags); if (ioremappable) @@ -319,6 +280,60 @@ out: return ret; } +static int smp_85xx_kick_cpu(int nr) +{ + int ret = 0; +#ifdef CONFIG_PPC64 + int primary = nr; +#endif + + WARN_ON(nr < 0 || nr >= num_possible_cpus()); + + pr_debug("kick CPU #%d\n", nr); + +#ifdef CONFIG_PPC64 + /* Threads don't use the spin table */ + if (cpu_thread_in_core(nr) != 0) { + int primary = cpu_first_thread_sibling(nr); + + if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) + return -ENOENT; + + if (cpu_thread_in_core(nr) != 1) { + pr_err("%s: cpu %d: invalid hw thread %d\n", + __func__, nr, cpu_thread_in_core(nr)); + return -ENOENT; + } + + if (!cpu_online(primary)) { + pr_err("%s: cpu %d: primary %d not online\n", + __func__, nr, primary); + return -ENOENT; + } + + smp_call_function_single(primary, wake_hw_thread, &nr, 0); + return 0; + } + + ret = smp_85xx_start_cpu(primary); + if (ret) + return ret; + + paca[nr].cpu_start = 1; + generic_set_cpu_up(nr); + + return ret; +#else + ret = smp_85xx_start_cpu(nr); + if (ret) + return ret; + + generic_set_cpu_up(nr); + + return ret; +#endif +} + struct smp_ops_t smp_85xx_ops = { .kick_cpu = smp_85xx_kick_cpu, .cpu_bootable = smp_generic_cpu_bootable, @@ -473,6 +488,10 @@ void __init mpc85xx_smp_init(void) } #ifdef CONFIG_HOTPLUG_CPU +#ifdef CONFIG_FSL_CORENET_RCPM + fsl_rcpm_init(); +#endif + #ifdef CONFIG_FSL_PMC mpc85xx_setup_pmc(); #endif @@ -480,6 +499,7 @@ void __init mpc85xx_smp_init(void) smp_85xx_ops.give_timebase = mpc85xx_give_timebase; smp_85xx_ops.take_timebase = mpc85xx_take_timebase; ppc_md.cpu_die = smp_85xx_mach_cpu_die; + smp_85xx_ops.cpu_die = qoriq_cpu_kill; } #endif smp_ops = &smp_85xx_ops; |