From b1d4c6cac02808b1d4e84d0187dc6014bffd2446 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 4 Mar 2014 10:11:39 +0000 Subject: MIPS: PM: Add CPU PM callbacks for general CPU context Add a CPU power management notifier callback for preserving general CPU context. The CPU PM callbacks will be triggered by the powering down of CPU cores, for example by cpuidle drivers & in the future by suspend to RAM implementations. The current state preserved is mostly related to the process context: - FPU - DSP - ASID - UserLocal - Watch registers Signed-off-by: James Hogan Signed-off-by: Paul Burton --- arch/mips/Kconfig | 1 + arch/mips/kernel/Makefile | 2 + arch/mips/kernel/pm.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+) create mode 100644 arch/mips/kernel/pm.c (limited to 'arch/mips') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5cd695f..322bbe1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -50,6 +50,7 @@ config MIPS select CLONE_BACKWARDS select HAVE_DEBUG_STACKOVERFLOW select HAVE_CC_STACKPROTECTOR + select CPU_PM if CPU_IDLE menu "Machine selection" diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 277dab3..97540a8 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -107,6 +107,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_MIPS_CM) += mips-cm.o obj-$(CONFIG_MIPS_CPC) += mips-cpc.o +obj-$(CONFIG_CPU_PM) += pm.o + # # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c new file mode 100644 index 0000000..112903f --- /dev/null +++ b/arch/mips/kernel/pm.c @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2014 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * CPU PM notifiers for saving/restoring general CPU state. + */ + +#include +#include + +#include +#include +#include +#include + +/** + * mips_cpu_save() - Save general CPU state. + * Ensures that general CPU context is saved, notably FPU and DSP. + */ +static int mips_cpu_save(void) +{ + /* Save FPU state */ + lose_fpu(1); + + /* Save DSP state */ + save_dsp(current); + + return 0; +} + +/** + * mips_cpu_restore() - Restore general CPU state. + * Restores important CPU context. + */ +static void mips_cpu_restore(void) +{ + unsigned int cpu = smp_processor_id(); + + /* Restore ASID */ + if (current->mm) + write_c0_entryhi(cpu_asid(cpu, current->mm)); + + /* Restore DSP state */ + restore_dsp(current); + + /* Restore UserLocal */ + if (cpu_has_userlocal) + write_c0_userlocal(current_thread_info()->tp_value); + + /* Restore watch registers */ + __restore_watch(); +} + +/** + * mips_pm_notifier() - Notifier for preserving general CPU context. + * @self: Notifier block. + * @cmd: CPU PM event. + * @v: Private data (unused). + * + * This is called when a CPU power management event occurs, and is used to + * ensure that important CPU context is preserved across a CPU power down. + */ +static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + int ret; + + switch (cmd) { + case CPU_PM_ENTER: + ret = mips_cpu_save(); + if (ret) + return NOTIFY_STOP; + break; + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + mips_cpu_restore(); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block mips_pm_notifier_block = { + .notifier_call = mips_pm_notifier, +}; + +static int __init mips_pm_init(void) +{ + return cpu_pm_register_notifier(&mips_pm_notifier_block); +} +arch_initcall(mips_pm_init); -- cgit v1.1 From ae4ce45419f908cf884d3fdc37f5706972068d34 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 4 Mar 2014 10:20:43 +0000 Subject: MIPS: traps: Add CPU PM callback for trap configuration Implement a CPU power management callback for restoring trap related CPU configuration after CPU power up from a low power state. The following state is restored: - Status register - HWREna register - Exception vector configuration registers - Context/XContext register Signed-off-by: James Hogan Signed-off-by: Paul Burton --- arch/mips/include/asm/mmu_context.h | 15 ++++-- arch/mips/kernel/traps.c | 93 ++++++++++++++++++++++++++++--------- 2 files changed, 81 insertions(+), 27 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index e277bba..ecae1dc 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -31,11 +31,15 @@ do { \ } while (0) #ifdef CONFIG_MIPS_PGD_C0_CONTEXT + +#define TLBMISS_HANDLER_RESTORE() \ + write_c0_xcontext((unsigned long) smp_processor_id() << \ + SMP_CPUID_REGSHIFT) + #define TLBMISS_HANDLER_SETUP() \ do { \ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \ - write_c0_xcontext((unsigned long) smp_processor_id() << \ - SMP_CPUID_REGSHIFT); \ + TLBMISS_HANDLER_RESTORE(); \ } while (0) #else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ @@ -47,9 +51,12 @@ do { \ */ extern unsigned long pgd_current[]; -#define TLBMISS_HANDLER_SETUP() \ +#define TLBMISS_HANDLER_RESTORE() \ write_c0_context((unsigned long) smp_processor_id() << \ - SMP_CPUID_REGSHIFT); \ + SMP_CPUID_REGSHIFT) + +#define TLBMISS_HANDLER_SETUP() \ + TLBMISS_HANDLER_RESTORE(); \ back_to_back_c0_hazard(); \ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 074e857..9651f68 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -1865,32 +1866,16 @@ static int __init ulri_disable(char *s) } __setup("noulri", ulri_disable); -void per_cpu_trap_init(bool is_boot_cpu) +/* configure STATUS register */ +static void configure_status(void) { - unsigned int cpu = smp_processor_id(); - unsigned int status_set = ST0_CU0; - unsigned int hwrena = cpu_hwrena_impl_bits; -#ifdef CONFIG_MIPS_MT_SMTC - int secondaryTC = 0; - int bootTC = (cpu == 0); - - /* - * Only do per_cpu_trap_init() for first TC of Each VPE. - * Note that this hack assumes that the SMTC init code - * assigns TCs consecutively and in ascending order. - */ - - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && - ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) - secondaryTC = 1; -#endif /* CONFIG_MIPS_MT_SMTC */ - /* * Disable coprocessors and select 32-bit or 64-bit addressing * and the 16/32 or 32/32 FPR register model. Reset the BEV * flag that some firmware may have left set and the TS bit (for * IP27). Set XX for ISA IV code to work. */ + unsigned int status_set = ST0_CU0; #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif @@ -1901,6 +1886,12 @@ void per_cpu_trap_init(bool is_boot_cpu) change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); +} + +/* configure HWRENA register */ +static void configure_hwrena(void) +{ + unsigned int hwrena = cpu_hwrena_impl_bits; if (cpu_has_mips_r2) hwrena |= 0x0000000f; @@ -1910,11 +1901,10 @@ void per_cpu_trap_init(bool is_boot_cpu) if (hwrena) write_c0_hwrena(hwrena); +} -#ifdef CONFIG_MIPS_MT_SMTC - if (!secondaryTC) { -#endif /* CONFIG_MIPS_MT_SMTC */ - +static void configure_exception_vector(void) +{ if (cpu_has_veic || cpu_has_vint) { unsigned long sr = set_c0_status(ST0_BEV); write_c0_ebase(ebase); @@ -1930,6 +1920,34 @@ void per_cpu_trap_init(bool is_boot_cpu) } else set_c0_cause(CAUSEF_IV); } +} + +void per_cpu_trap_init(bool is_boot_cpu) +{ + unsigned int cpu = smp_processor_id(); +#ifdef CONFIG_MIPS_MT_SMTC + int secondaryTC = 0; + int bootTC = (cpu == 0); + + /* + * Only do per_cpu_trap_init() for first TC of Each VPE. + * Note that this hack assumes that the SMTC init code + * assigns TCs consecutively and in ascending order. + */ + + if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && + ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) + secondaryTC = 1; +#endif /* CONFIG_MIPS_MT_SMTC */ + + configure_status(); + configure_hwrena(); + +#ifdef CONFIG_MIPS_MT_SMTC + if (!secondaryTC) { +#endif /* CONFIG_MIPS_MT_SMTC */ + + configure_exception_vector(); /* * Before R2 both interrupt numbers were fixed to 7, so on R2 only: @@ -2185,3 +2203,32 @@ void __init trap_init(void) cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ } + +static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + switch (cmd) { + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + configure_status(); + configure_hwrena(); + configure_exception_vector(); + + /* Restore register with CPU number for TLB handlers */ + TLBMISS_HANDLER_RESTORE(); + + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block trap_pm_notifier_block = { + .notifier_call = trap_pm_notifier, +}; + +static int __init trap_pm_init(void) +{ + return cpu_pm_register_notifier(&trap_pm_notifier_block); +} +arch_initcall(trap_pm_init); -- cgit v1.1 From 61d73044fe4cb8b9b50fa2a612cb4492b9db43cd Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 4 Mar 2014 10:23:57 +0000 Subject: MIPS: c-r4k: Add CPU PM callback for coherency Implement a CPU power management callback for the r4k cache, to set up coherency again after leaving a powered down state. Signed-off-by: James Hogan Signed-off-by: Paul Burton --- arch/mips/mm/c-r4k.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 1c74a6a..a2a71c5 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -7,6 +7,7 @@ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ +#include #include #include #include @@ -1644,3 +1645,26 @@ void r4k_cache_init(void) coherency_setup(); board_cache_error_setup = r4k_cache_error_setup; } + +static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + switch (cmd) { + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + coherency_setup(); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block r4k_cache_pm_notifier_block = { + .notifier_call = r4k_cache_pm_notifier, +}; + +int __init r4k_cache_init_pm(void) +{ + return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block); +} +arch_initcall(r4k_cache_init_pm); -- cgit v1.1 From eaa38d6343adbb5070c27af29aeeb3df126f47f2 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 28 Feb 2014 17:09:20 +0000 Subject: MIPS: tlb-r4k: Add CPU PM callback to reconfigure TLB Add a CPU power management callback for the r4k TLB which reconfigures it after the CPU leaves a powered down state. Signed-off-by: James Hogan Signed-off-by: Paul Burton --- arch/mips/mm/tlb-r4k.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index eeaf50f..89e3fab 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -8,6 +8,7 @@ * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. */ +#include #include #include #include @@ -421,7 +422,10 @@ static int __init set_ntlb(char *str) __setup("ntlb=", set_ntlb); -void tlb_init(void) +/* + * Configure TLB (for init or after a CPU has been powered off). + */ +static void r4k_tlb_configure(void) { /* * You should never change this register: @@ -453,6 +457,11 @@ void tlb_init(void) local_flush_tlb_all(); /* Did I tell you that ARC SUCKS? */ +} + +void tlb_init(void) +{ + r4k_tlb_configure(); if (ntlb) { if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { @@ -466,3 +475,26 @@ void tlb_init(void) build_tlb_refill_handler(); } + +static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + switch (cmd) { + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + r4k_tlb_configure(); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block r4k_tlb_pm_notifier_block = { + .notifier_call = r4k_tlb_pm_notifier, +}; + +static int __init r4k_tlb_init_pm(void) +{ + return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); +} +arch_initcall(r4k_tlb_init_pm); -- cgit v1.1 From 74e91335190c628b870c69cff8360d23707b1f53 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 4 Mar 2014 10:25:45 +0000 Subject: MIPS: PM: Implement PM helper macros Implement assembler helper macros in asm/pm.h for platform code to use for saving context across low power states - for example suspend to RAM or powered down cpuidle states. Macros are provided for saving and restoring the main CPU context used by C code and doing important configuration which must be done very early during resume. Notably EVA needs segmentation control registers to be restored before the stack or dynamically allocated memory is accessed, so that state is saved in global data. Signed-off-by: James Hogan Signed-off-by: Paul Burton --- arch/mips/include/asm/pm.h | 167 +++++++++++++++++++++++++++++++++++++++++ arch/mips/kernel/asm-offsets.c | 15 ++++ arch/mips/kernel/pm.c | 4 + 3 files changed, 186 insertions(+) create mode 100644 arch/mips/include/asm/pm.h (limited to 'arch/mips') diff --git a/arch/mips/include/asm/pm.h b/arch/mips/include/asm/pm.h new file mode 100644 index 0000000..268546f --- /dev/null +++ b/arch/mips/include/asm/pm.h @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2014 Imagination Technologies Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * PM helper macros for CPU power off (e.g. Suspend-to-RAM). + */ + +#ifndef __ASM_PM_H +#define __ASM_PM_H + +#ifdef __ASSEMBLY__ + +#include +#include +#include +#include + +/* Save CPU state to stack for suspend to RAM */ +.macro SUSPEND_SAVE_REGS + subu sp, PT_SIZE + /* Call preserved GPRs */ + LONG_S $16, PT_R16(sp) + LONG_S $17, PT_R17(sp) + LONG_S $18, PT_R18(sp) + LONG_S $19, PT_R19(sp) + LONG_S $20, PT_R20(sp) + LONG_S $21, PT_R21(sp) + LONG_S $22, PT_R22(sp) + LONG_S $23, PT_R23(sp) + LONG_S $28, PT_R28(sp) + LONG_S $30, PT_R30(sp) + LONG_S $31, PT_R31(sp) + /* A couple of CP0 registers with space in pt_regs */ + mfc0 k0, CP0_STATUS + LONG_S k0, PT_STATUS(sp) +#ifdef CONFIG_MIPS_MT_SMTC + mfc0 k0, CP0_TCSTATUS + LONG_S k0, PT_TCSTATUS(sp) +#endif +.endm + +/* Restore CPU state from stack after resume from RAM */ +.macro RESUME_RESTORE_REGS_RETURN + .set push + .set noreorder + /* A couple of CP0 registers with space in pt_regs */ + LONG_L k0, PT_STATUS(sp) + mtc0 k0, CP0_STATUS +#ifdef CONFIG_MIPS_MT_SMTC + LONG_L k0, PT_TCSTATUS(sp) + mtc0 k0, CP0_TCSTATUS +#endif + /* Call preserved GPRs */ + LONG_L $16, PT_R16(sp) + LONG_L $17, PT_R17(sp) + LONG_L $18, PT_R18(sp) + LONG_L $19, PT_R19(sp) + LONG_L $20, PT_R20(sp) + LONG_L $21, PT_R21(sp) + LONG_L $22, PT_R22(sp) + LONG_L $23, PT_R23(sp) + LONG_L $28, PT_R28(sp) + LONG_L $30, PT_R30(sp) + LONG_L $31, PT_R31(sp) + /* Pop and return */ + jr ra + addiu sp, PT_SIZE + .set pop +.endm + +/* Get address of static suspend state into t1 */ +.macro LA_STATIC_SUSPEND + la t1, mips_static_suspend_state +.endm + +/* Save important CPU state for early restoration to global data */ +.macro SUSPEND_SAVE_STATIC +#ifdef CONFIG_EVA + /* + * Segment configuration is saved in global data where it can be easily + * reloaded without depending on the segment configuration. + */ + mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */ + LONG_S k0, SSS_SEGCTL0(t1) + mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */ + LONG_S k0, SSS_SEGCTL1(t1) + mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */ + LONG_S k0, SSS_SEGCTL2(t1) +#endif + /* save stack pointer (pointing to GPRs) */ + LONG_S sp, SSS_SP(t1) +.endm + +/* Restore important CPU state early from global data */ +.macro RESUME_RESTORE_STATIC +#ifdef CONFIG_EVA + /* + * Segment configuration must be restored prior to any access to + * allocated memory, as it may reside outside of the legacy kernel + * segments. + */ + LONG_L k0, SSS_SEGCTL0(t1) + mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */ + LONG_L k0, SSS_SEGCTL1(t1) + mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */ + LONG_L k0, SSS_SEGCTL2(t1) + mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */ + tlbw_use_hazard +#endif + /* restore stack pointer (pointing to GPRs) */ + LONG_L sp, SSS_SP(t1) +.endm + +/* flush caches to make sure context has reached memory */ +.macro SUSPEND_CACHE_FLUSH + .extern __wback_cache_all + .set push + .set noreorder + la t1, __wback_cache_all + LONG_L t0, 0(t1) + jalr t0 + nop + .set pop + .endm + +/* Save suspend state and flush data caches to RAM */ +.macro SUSPEND_SAVE + SUSPEND_SAVE_REGS + LA_STATIC_SUSPEND + SUSPEND_SAVE_STATIC + SUSPEND_CACHE_FLUSH +.endm + +/* Restore saved state after resume from RAM and return */ +.macro RESUME_RESTORE_RETURN + LA_STATIC_SUSPEND + RESUME_RESTORE_STATIC + RESUME_RESTORE_REGS_RETURN +.endm + +#else /* __ASSEMBLY__ */ + +/** + * struct mips_static_suspend_state - Core saved CPU state across S2R. + * @segctl: CP0 Segment control registers. + * @sp: Stack frame where GP register context is saved. + * + * This structure contains minimal CPU state that must be saved in static kernel + * data in order to be able to restore the rest of the state. This includes + * segmentation configuration in the case of EVA being enabled, as they must be + * restored prior to any kmalloc'd memory being referenced (even the stack + * pointer). + */ +struct mips_static_suspend_state { +#ifdef CONFIG_EVA + unsigned long segctl[3]; +#endif + unsigned long sp; +}; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_PM_HELPERS_H */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0ea75c2..e085cde 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -404,6 +405,20 @@ void output_pbe_defines(void) } #endif +#ifdef CONFIG_CPU_PM +void output_pm_defines(void) +{ + COMMENT(" PM offsets. "); +#ifdef CONFIG_EVA + OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]); + OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]); + OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]); +#endif + OFFSET(SSS_SP, mips_static_suspend_state, sp); + BLANK(); +} +#endif + void output_kvm_defines(void) { COMMENT(" KVM/MIPS Specfic offsets. "); diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c index 112903f..fefdf39 100644 --- a/arch/mips/kernel/pm.c +++ b/arch/mips/kernel/pm.c @@ -15,8 +15,12 @@ #include #include #include +#include #include +/* Used by PM helper macros in asm/pm.h */ +struct mips_static_suspend_state mips_static_suspend_state; + /** * mips_cpu_save() - Save general CPU state. * Ensures that general CPU context is saved, notably FPU and DSP. -- cgit v1.1 From 0467811e9bdb22c7a1595db4c230efd99265e3c7 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 14 Feb 2014 09:21:30 +0000 Subject: MIPS: mark GIC clockevent device with CLOCK_EVT_FEAT_C3STOP Although the GIC counter will continue when a core is in a low power state and it will still trigger interrupts, the core will be incapable of servicing those interrupts rendering them useless. Signed-off-by: Paul Burton --- arch/mips/kernel/cevt-gic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c index 594cbbf..925bae5 100644 --- a/arch/mips/kernel/cevt-gic.c +++ b/arch/mips/kernel/cevt-gic.c @@ -73,7 +73,8 @@ int gic_clockevent_init(void) cd = &per_cpu(gic_clockevent_device, cpu); cd->name = "MIPS GIC"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; + cd->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP; clockevent_set_clock(cd, gic_frequency); -- cgit v1.1 From 414408d0eedd782a397ba89fd8f732ffbd3acde8 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 5 Mar 2014 11:35:53 +0000 Subject: MIPS: allow GIC clockevent device config from other CPUs This patch allows the GIC clockevent device for a CPU to be configured by another CPU. This makes GIC clockevent devices suitable for use as the tick broadcast device, where formerly the GIC timer local to the configuring CPU would have been configured incorrectly. Signed-off-by: Paul Burton --- arch/mips/include/asm/gic.h | 1 + arch/mips/kernel/cevt-gic.c | 2 +- arch/mips/kernel/irq-gic.c | 15 +++++++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 0827166..10f6a99 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h @@ -380,6 +380,7 @@ extern unsigned int gic_compare_int (void); extern cycle_t gic_read_count(void); extern cycle_t gic_read_compare(void); extern void gic_write_compare(cycle_t cnt); +extern void gic_write_cpu_compare(cycle_t cnt, int cpu); extern void gic_send_ipi(unsigned int intr); extern unsigned int plat_ipi_call_int_xlate(unsigned int); extern unsigned int plat_ipi_resched_int_xlate(unsigned int); diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c index 925bae5..6093716 100644 --- a/arch/mips/kernel/cevt-gic.c +++ b/arch/mips/kernel/cevt-gic.c @@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt) cnt = gic_read_count(); cnt += (u64)delta; - gic_write_compare(cnt); + gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; return res; } diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 8520dad..88e4c32 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -54,6 +54,21 @@ void gic_write_compare(cycle_t cnt) (int)(cnt & 0xffffffff)); } +void gic_write_cpu_compare(cycle_t cnt, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), + (int)(cnt >> 32)); + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), + (int)(cnt & 0xffffffff)); + + local_irq_restore(flags); +} + cycle_t gic_read_compare(void) { unsigned int hi, lo; -- cgit v1.1 From 5977d682d21cc65a9e0e402e94709e210af04459 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 14 Feb 2014 09:20:15 +0000 Subject: MIPS: mark R4K clockevent device with CLOCK_EVT_FEAT_C3STOP When a core enters a clock off or power down state its CP0 counter will be stopped along with it. Signed-off-by: Paul Burton --- arch/mips/kernel/cevt-r4k.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 50d3f5a..7820d5d 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -195,7 +195,8 @@ int r4k_clockevent_init(void) cd = &per_cpu(mips_clockevent_device, cpu); cd->name = "MIPS"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; + cd->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP; clockevent_set_clock(cd, mips_hpt_frequency); -- cgit v1.1 From d8107efd8a3c15ec5885dbe1d3168e26c6b3e2e4 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 15 Apr 2014 12:05:24 +0100 Subject: MIPS: mark R4K clockevent device with CLOCK_EVT_FEAT_PERCPU The CLOCK_EVT_FEAT_PERCPU flag indicates that a clockevent device is only configurable by the CPU for which it is registered, and thus cannot be used as the tick broadcast device. That property is true of the R4K timer, which is inaccessible from other cores. Signed-off-by: Paul Burton --- arch/mips/kernel/cevt-r4k.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 7820d5d..f3c549c 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -196,7 +196,8 @@ int r4k_clockevent_init(void) cd->name = "MIPS"; cd->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_C3STOP; + CLOCK_EVT_FEAT_C3STOP | + CLOCK_EVT_FEAT_PERCPU; clockevent_set_clock(cd, mips_hpt_frequency); -- cgit v1.1 From 60bdb9c7bd9ab5b63ad68e9391056f8757b810f1 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 5 Mar 2014 11:41:47 +0000 Subject: MIPS: allow R4K clockevent device to function regardless of GIC Having the GIC clockevent driver compiled should not prevent the R4K timer clockevent driver from functioning. One will be selected as the CPU local timer based upon their priorities and the other may simply be unused or in the case of the GIC timer may be used as the tick broadcast device. Signed-off-by: Paul Burton --- arch/mips/kernel/cevt-r4k.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index f3c549c..4dcd1fb 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -72,9 +72,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) /* Clear Count/Compare Interrupt */ write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); -#ifdef CONFIG_CEVT_GIC - if (!gic_present) -#endif cd->event_handler(cd); } @@ -212,9 +209,6 @@ int r4k_clockevent_init(void) cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; -#ifdef CONFIG_CEVT_GIC - if (!gic_present) -#endif clockevents_register_device(cd); if (cp0_timer_irq_installed) -- cgit v1.1 From cc7964af8f997a20240d3ec5bf90c4fd20d3c48a Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 14 Feb 2014 09:24:58 +0000 Subject: MIPS: support for generic clockevents broadcast This patch adds support for generic clockevents broadcast using the a dummy clockevent device and the tick_broadcast function introduced by commit 12ad10004645 "clockevents: Add generic timer broadcast function". Signed-off-by: Paul Burton --- arch/mips/Kconfig | 1 + arch/mips/kernel/smp.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 322bbe1..5cdc53b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -51,6 +51,7 @@ config MIPS select HAVE_DEBUG_STACKOVERFLOW select HAVE_CC_STACKPROTECTOR select CPU_PM if CPU_IDLE + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST menu "Machine selection" diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 0a022ee..9a52264 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -404,3 +404,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *)) } EXPORT_SYMBOL(dump_send_ipi); #endif + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + +static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); +static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); + +void tick_broadcast(const struct cpumask *mask) +{ + atomic_t *count; + struct call_single_data *csd; + int cpu; + + for_each_cpu(cpu, mask) { + count = &per_cpu(tick_broadcast_count, cpu); + csd = &per_cpu(tick_broadcast_csd, cpu); + + if (atomic_inc_return(count) == 1) + smp_call_function_single_async(cpu, csd); + } +} + +static void tick_broadcast_callee(void *info) +{ + int cpu = smp_processor_id(); + tick_receive_broadcast(); + atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); +} + +static int __init tick_broadcast_init(void) +{ + struct call_single_data *csd; + int cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + csd = &per_cpu(tick_broadcast_csd, cpu); + csd->func = tick_broadcast_callee; + } + + return 0; +} +early_initcall(tick_broadcast_init); + +#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ -- cgit v1.1 From 76306f4272e036e600254a98bc291df50cedd949 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 14 Feb 2014 16:30:52 +0000 Subject: MIPS: introduce cpu_coherent_mask Add a mask of CPUs which are currently known to be operating coherently. This is setup initially to be all present CPUs, but in a subsequent patch CPUs in a MIPS Coherent Processing System will be cleared in this mask as they enter non-coherent idle states. This will be used in order to determine when a CPU within a CPS system may need to be powered back up, but may also be used in future to optimise away wakeups for cache operations or TLB invalidations. Signed-off-by: Paul Burton --- arch/mips/include/asm/smp.h | 3 +++ arch/mips/kernel/smp.c | 4 ++++ 2 files changed, 7 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index efa02ac..b037334 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -46,6 +46,9 @@ extern int __cpu_logical_map[NR_CPUS]; extern volatile cpumask_t cpu_callin_map; +/* Mask of CPUs which are currently definitely operating coherently */ +extern cpumask_t cpu_coherent_mask; + extern void asmlinkage smp_bootstrap(void); /* diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 9a52264..991ae96 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -66,6 +66,8 @@ EXPORT_SYMBOL(cpu_sibling_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +cpumask_t cpu_coherent_mask; + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -124,6 +126,7 @@ asmlinkage void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; + cpu_set(cpu, cpu_coherent_mask); notify_cpu_starting(cpu); set_cpu_online(cpu, true); @@ -186,6 +189,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif + cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); } /* preload SMP state for boot cpu */ -- cgit v1.1 From 2ba60250b01bfbab6b2293f8c1492312eb6a4131 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 14 Feb 2014 09:27:15 +0000 Subject: MIPS: CPC: provide functions to retrieve register addresses This patch introduces addr_ functions in addition to the existing read_ & write_ functions. The new functions simply return the address of the appropriate CPC register rather than performing a memory access. This will be used in a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/asm/mips-cpc.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h index 988507e..c5bb609 100644 --- a/arch/mips/include/asm/mips-cpc.h +++ b/arch/mips/include/asm/mips-cpc.h @@ -72,7 +72,12 @@ static inline bool mips_cpc_present(void) #define MIPS_CPC_COCB_OFS 0x4000 /* Macros to ease the creation of register access functions */ -#define BUILD_CPC_R_(name, off) \ +#define BUILD_CPC_R_(name, off) \ +static inline u32 *addr_cpc_##name(void) \ +{ \ + return (u32 *)(mips_cpc_base + (off)); \ +} \ + \ static inline u32 read_cpc_##name(void) \ { \ return __raw_readl(mips_cpc_base + (off)); \ -- cgit v1.1 From 76ae658465c2319a63f3814b1e1e6e0664a1f542 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 14 Feb 2014 09:28:06 +0000 Subject: MIPS: CPC: provide locking functions This patch provides functions to lock & unlock access to the "core-other" register region of the CPC. Without performing appropriate locking it is possible for code using this region to be preempted or to race with code on another VPE within the same core, with one changing the core which the "core-other" region is acting upon at an inopportune time for the other. Signed-off-by: Paul Burton --- arch/mips/include/asm/mips-cpc.h | 27 +++++++++++++++++++++++++++ arch/mips/kernel/mips-cpc.c | 26 ++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h index c5bb609..e139a53 100644 --- a/arch/mips/include/asm/mips-cpc.h +++ b/arch/mips/include/asm/mips-cpc.h @@ -152,4 +152,31 @@ BUILD_CPC_Cx_RW(other, 0x10) #define CPC_Cx_OTHER_CORENUM_SHF 16 #define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) +#ifdef CONFIG_MIPS_CPC + +/** + * mips_cpc_lock_other - lock access to another core + * core: the other core to be accessed + * + * Call before operating upon a core via the 'other' register region in + * order to prevent the region being moved during access. Must be followed + * by a call to mips_cpc_unlock_other. + */ +extern void mips_cpc_lock_other(unsigned int core); + +/** + * mips_cpc_unlock_other - unlock access to another core + * + * Call after operating upon another core via the 'other' register region. + * Must be called after mips_cpc_lock_other. + */ +extern void mips_cpc_unlock_other(void); + +#else /* !CONFIG_MIPS_CPC */ + +static inline void mips_cpc_lock_other(unsigned int core) { } +static inline void mips_cpc_unlock_other(void) { } + +#endif /* !CONFIG_MIPS_CPC */ + #endif /* __MIPS_ASM_MIPS_CPC_H__ */ diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index c9dc674..2368fc5 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -15,6 +15,10 @@ void __iomem *mips_cpc_base; +static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); + +static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); + phys_t __weak mips_cpc_phys_base(void) { u32 cpc_base; @@ -39,6 +43,10 @@ phys_t __weak mips_cpc_phys_base(void) int mips_cpc_probe(void) { phys_t addr; + unsigned cpu; + + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu(cpc_core_lock, cpu)); addr = mips_cpc_phys_base(); if (!addr) @@ -50,3 +58,21 @@ int mips_cpc_probe(void) return 0; } + +void mips_cpc_lock_other(unsigned int core) +{ + unsigned curr_core; + preempt_disable(); + curr_core = current_cpu_data.core; + spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), + per_cpu(cpc_core_lock_flags, curr_core)); + write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); +} + +void mips_cpc_unlock_other(void) +{ + unsigned curr_core = current_cpu_data.core; + spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), + per_cpu(cpc_core_lock_flags, curr_core)); + preempt_enable(); +} -- cgit v1.1 From e2a9e5ad719fb424ab3c30520733aa0e8fbcf1ce Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 3 Mar 2014 12:08:40 +0000 Subject: MIPS: add kmap_noncoherent to wire a cached non-coherent TLB entry This is identical to kmap_coherent apart from the cache coherency attribute used for the TLB entry, so kmap_coherent is abstracted to kmap_prot which is then called for both kmap_coherent & kmap_noncoherent. This will be used by a subsequent patch. Suggested-by: Leonid Yegoshin Signed-off-by: Paul Burton --- arch/mips/include/asm/cacheflush.h | 6 ++++++ arch/mips/include/asm/pgtable.h | 2 ++ arch/mips/mm/init.c | 14 ++++++++++++-- 3 files changed, 20 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 69468de..e08381a 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -113,6 +113,12 @@ unsigned long run_uncached(void *func); extern void *kmap_coherent(struct page *page, unsigned long addr); extern void kunmap_coherent(void); +extern void *kmap_noncoherent(struct page *page, unsigned long addr); + +static inline void kunmap_noncoherent(void) +{ + kunmap_coherent(); +} #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE static inline void flush_kernel_dcache_page(struct page *page) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 008324d..539ddd1 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -32,6 +32,8 @@ struct vm_area_struct; _page_cachable_default) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _page_cachable_default) +#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ _page_cachable_default) #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 4fc74c7..80ff52e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -114,7 +114,7 @@ static void __init kmap_coherent_init(void) static inline void kmap_coherent_init(void) {} #endif -void *kmap_coherent(struct page *page, unsigned long addr) +static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr, flags, entrylo; @@ -133,7 +133,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) idx += in_interrupt() ? FIX_N_COLOURS : 0; #endif vaddr = __fix_to_virt(FIX_CMAP_END - idx); - pte = mk_pte(page, PAGE_KERNEL); + pte = mk_pte(page, prot); #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) entrylo = pte.pte_high; #else @@ -171,6 +171,16 @@ void *kmap_coherent(struct page *page, unsigned long addr) return (void*) vaddr; } +void *kmap_coherent(struct page *page, unsigned long addr) +{ + return __kmap_pgprot(page, addr, PAGE_KERNEL); +} + +void *kmap_noncoherent(struct page *page, unsigned long addr) +{ + return __kmap_pgprot(page, addr, PAGE_KERNEL_NC); +} + void kunmap_coherent(void) { #ifndef CONFIG_MIPS_MT_SMTC -- cgit v1.1 From 27476f3bf45cf61976bc85b66a0bcd0019feae41 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Sun, 16 Mar 2014 16:21:34 +0000 Subject: MIPS: MT: define write_c0_tchalt macro Define a macro to write to the current TCs TCHalt register. This will be used by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/asm/mipsmtregs.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h index 6efa79a..5f8052c 100644 --- a/arch/mips/include/asm/mipsmtregs.h +++ b/arch/mips/include/asm/mipsmtregs.h @@ -36,6 +36,8 @@ #define read_c0_tcbind() __read_32bit_c0_register($2, 2) +#define write_c0_tchalt(val) __write_32bit_c0_register($2, 4, val) + #define read_c0_tccontext() __read_32bit_c0_register($2, 5) #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val) -- cgit v1.1 From b0a3eae2b943ef62cb8265aa604c78bb6565a2cd Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 24 Dec 2013 03:44:28 +0000 Subject: MIPS: inst.h: define COP0 wait op The func field for the wait instruction was missing from inst.h - this patch adds it. Signed-off-by: Paul Burton --- arch/mips/include/uapi/asm/inst.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index df6e775..044123b 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -125,7 +125,8 @@ enum bcop_op { enum cop0_coi_func { tlbr_op = 0x01, tlbwi_op = 0x02, tlbwr_op = 0x06, tlbp_op = 0x08, - rfe_op = 0x10, eret_op = 0x18 + rfe_op = 0x10, eret_op = 0x18, + wait_op = 0x20, }; /* -- cgit v1.1 From 6f5bb42498b0c7901d32a81d163962fd8e37f827 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 4 Mar 2014 15:11:12 +0000 Subject: MIPS: inst.h: define MT yield op The opcode for the MT ASE yield instruction within the spec3 group was missing. This patch adds it for use by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/uapi/asm/inst.h | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 044123b..b7492c6 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -74,16 +74,17 @@ enum spec2_op { enum spec3_op { ext_op, dextm_op, dextu_op, dext_op, ins_op, dinsm_op, dinsu_op, dins_op, - lx_op = 0x0a, lwle_op = 0x19, - lwre_op = 0x1a, cachee_op = 0x1b, - sbe_op = 0x1c, she_op = 0x1d, - sce_op = 0x1e, swe_op = 0x1f, - bshfl_op = 0x20, swle_op = 0x21, - swre_op = 0x22, prefe_op = 0x23, - dbshfl_op = 0x24, lbue_op = 0x28, - lhue_op = 0x29, lbe_op = 0x2c, - lhe_op = 0x2d, lle_op = 0x2e, - lwe_op = 0x2f, rdhwr_op = 0x3b + yield_op = 0x09, lx_op = 0x0a, + lwle_op = 0x19, lwre_op = 0x1a, + cachee_op = 0x1b, sbe_op = 0x1c, + she_op = 0x1d, sce_op = 0x1e, + swe_op = 0x1f, bshfl_op = 0x20, + swle_op = 0x21, swre_op = 0x22, + prefe_op = 0x23, dbshfl_op = 0x24, + lbue_op = 0x28, lhue_op = 0x29, + lbe_op = 0x2c, lhe_op = 0x2d, + lle_op = 0x2e, lwe_op = 0x2f, + rdhwr_op = 0x3b }; /* -- cgit v1.1 From 7ed82ad12c1553a40a5dd7e9c0b34a80c2c02e47 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 9 Jan 2014 15:27:32 +0000 Subject: MIPS: inst.h: define microMIPS sync op The opcode for the sync instruction within POOL32AXf was missing. This patch adds it for use by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/uapi/asm/inst.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/mips') diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index b7492c6..89e9155 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -303,6 +303,7 @@ enum mm_32axf_minor_op { mm_tlbwr_op = 0x0cd, mm_jalrs_op = 0x13c, mm_jalrshb_op = 0x17c, + mm_sync_op = 0x1ad, mm_syscall_op = 0x22d, mm_eret_op = 0x3cd, }; -- cgit v1.1 From f263839ab59a7f4beb74fc9a7ce0234b62ab2b52 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 9 Jan 2014 15:30:37 +0000 Subject: MIPS: inst.h: define microMIPS wait op The opcode for the wait instruction within POOL32AXf was missing. This patch adds it for use by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/uapi/asm/inst.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/mips') diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 89e9155..e6e7fe3 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -305,6 +305,7 @@ enum mm_32axf_minor_op { mm_jalrshb_op = 0x17c, mm_sync_op = 0x1ad, mm_syscall_op = 0x22d, + mm_wait_op = 0x24d, mm_eret_op = 0x3cd, }; -- cgit v1.1 From 8dee5901b20c5c95278aa3a462eb82826ff98b37 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 24 Dec 2013 03:51:39 +0000 Subject: MIPS: uasm: add a label variant of beq This patch allows for use of the beq instruction with labels from uasm, much as bne & others already do. It will be used by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/asm/uasm.h | 2 ++ arch/mips/mm/uasm.c | 8 ++++++++ 2 files changed, 10 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index c33a956..d1275a2 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -264,6 +264,8 @@ void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid); void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid); +void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1, + unsigned int r2, int lid); void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index b9d14b6..ae18b82 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -469,6 +469,14 @@ void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid) } UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); +void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1, + unsigned int r2, int lid) +{ + uasm_r_mips_pc16(r, *p, lid); + ISAFUNC(uasm_i_beq)(p, r1, r2, 0); +} +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq)); + void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { -- cgit v1.1 From 49e9529b9d43773307b8c73bd251b71784830c3d Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Sun, 16 Mar 2014 12:58:05 +0000 Subject: MIPS: uasm: add jalr instruction This patch allows use of the jalr instruction from uasm. It will be used by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/asm/uasm.h | 4 ++++ arch/mips/mm/uasm-mips.c | 1 + arch/mips/mm/uasm.c | 9 +++++---- 3 files changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index d1275a2..a3d88ae 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -74,6 +74,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ #define Ip_u1u2(op) \ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) +#define Ip_u2u1(op) \ +void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) + #define Ip_u1s2(op) \ void ISAOPC(op)(u32 **buf, unsigned int a, signed int b) @@ -114,6 +117,7 @@ Ip_u2u1msbu3(_ext); Ip_u2u1msbu3(_ins); Ip_u1(_j); Ip_u1(_jal); +Ip_u2u1(_jalr); Ip_u1(_jr); Ip_u2s3u1(_ld); Ip_u3u1u2(_ldx); diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 3abd609..45e3dc5 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -82,6 +82,7 @@ static struct insn insn_table[] = { { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index ae18b82..a77a4b8 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -49,10 +49,10 @@ enum opcode { insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, - insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, - insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, - insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, - insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, + insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_ld, + insn_ldx, insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, + insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, + insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, }; @@ -250,6 +250,7 @@ I_u2u1msbdu3(_ext) I_u2u1msbu3(_ins) I_u1(_j) I_u1(_jal) +I_u2u1(_jalr) I_u1(_jr) I_u2s3u1(_ld) I_u2s3u1(_ll) -- cgit v1.1 From 729ff56169395cb3e467e4e3c1e2637f4d3436ce Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 24 Dec 2013 03:49:45 +0000 Subject: MIPS: uasm: add sync instruction This patch allows use of the sync instruction from uasm. It will be used by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/asm/uasm.h | 1 + arch/mips/mm/uasm-micromips.c | 1 + arch/mips/mm/uasm-mips.c | 1 + arch/mips/mm/uasm.c | 5 +++-- 4 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index a3d88ae..306892c 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -141,6 +141,7 @@ Ip_u2u1u3(_sra); Ip_u2u1u3(_srl); Ip_u3u1u2(_subu); Ip_u2s3u1(_sw); +Ip_u1(_sync); Ip_u1(_syscall); Ip_0(_tlbp); Ip_0(_tlbr); diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index b8d580c..9500f2a 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -99,6 +99,7 @@ static struct insn insn_table_MM[] = { { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD }, { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD }, { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, + { insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS }, { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 }, { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 45e3dc5..51063fd 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -107,6 +107,7 @@ static struct insn insn_table[] = { { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE }, { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index a77a4b8..7c13801 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -53,8 +53,8 @@ enum opcode { insn_ldx, insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, - insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, - insn_xori, + insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, + insn_xor, insn_xori, }; struct insn { @@ -271,6 +271,7 @@ I_u2u1u3(_srl) I_u2u1u3(_rotr) I_u3u1u2(_subu) I_u2s3u1(_sw) +I_u1(_sync) I_0(_tlbp) I_0(_tlbr) I_0(_tlbwi) -- cgit v1.1 From 53ed138986e1022c2b6fef7f8e9731f5bf3e6af1 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 24 Dec 2013 03:50:35 +0000 Subject: MIPS: uasm: add wait instruction This patch allows use of the wait instruction from uasm. It will be used by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/asm/uasm.h | 1 + arch/mips/mm/uasm-micromips.c | 1 + arch/mips/mm/uasm-mips.c | 1 + arch/mips/mm/uasm.c | 3 ++- 4 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index 306892c..8810801 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -147,6 +147,7 @@ Ip_0(_tlbp); Ip_0(_tlbr); Ip_0(_tlbwi); Ip_0(_tlbwr); +Ip_u1(_wait); Ip_u3u1u2(_xor); Ip_u2u1u3(_xori); diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 9500f2a..bcbcf4a 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -104,6 +104,7 @@ static struct insn insn_table_MM[] = { { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, + { insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM }, { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD }, { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, { insn_dins, 0, 0 }, diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 51063fd..c69f785 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -113,6 +113,7 @@ static struct insn insn_table[] = { { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, + { insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM }, { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, { insn_invalid, 0, 0 } diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 7c13801..46d2173 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -54,7 +54,7 @@ enum opcode { insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, - insn_xor, insn_xori, + insn_wait, insn_xor, insn_xori, }; struct insn { @@ -276,6 +276,7 @@ I_0(_tlbp) I_0(_tlbr) I_0(_tlbwi) I_0(_tlbwr) +I_u1(_wait); I_u3u1u2(_xor) I_u2u1u3(_xori) I_u2u1msbu3(_dins); -- cgit v1.1 From d674dd14e85c49ca0e422de53a4c2b5bf44a339a Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 4 Mar 2014 15:12:36 +0000 Subject: MIPS: uasm: add MT ASE yield instruction This patch allows use of the MT ASE yield instruction from uasm. It will be used by a subsequent patch. Signed-off-by: Paul Burton --- arch/mips/include/asm/uasm.h | 1 + arch/mips/mm/uasm-mips.c | 1 + arch/mips/mm/uasm.c | 10 +++++++++- 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index 8810801..3d80387 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -150,6 +150,7 @@ Ip_0(_tlbwr); Ip_u1(_wait); Ip_u3u1u2(_xor); Ip_u2u1u3(_xori); +Ip_u2u1(_yield); /* Handle labels. */ diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index c69f785..4a2fc82 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -116,6 +116,7 @@ static struct insn insn_table[] = { { insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM }, { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, + { insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD }, { insn_invalid, 0, 0 } }; diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 46d2173..55a1fdf 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -54,7 +54,7 @@ enum opcode { insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, - insn_wait, insn_xor, insn_xori, + insn_wait, insn_xor, insn_xori, insn_yield, }; struct insn { @@ -200,6 +200,13 @@ Ip_u1u2(op) \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_u2u1(op) \ +Ip_u1u2(op) \ +{ \ + build_insn(buf, insn##op, b, a); \ +} \ +UASM_EXPORT_SYMBOL(uasm_i##op); + #define I_u1s2(op) \ Ip_u1s2(op) \ { \ @@ -279,6 +286,7 @@ I_0(_tlbwr) I_u1(_wait); I_u3u1u2(_xor) I_u2u1u3(_xori) +I_u2u1(_yield) I_u2u1msbu3(_dins); I_u2u1msb32u3(_dinsm); I_u1(_syscall); -- cgit v1.1 From 245a7868d2f2e54a9a9b084de00d003a9badb2a5 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 12:04:27 +0100 Subject: MIPS: smp-cps: rework core/VPE initialisation When hotplug and/or a powered down idle state are supported cases will arise where a non-zero VPE must be brought online without VPE 0, and it where multiple VPEs must be onlined simultaneously. This patch prepares for that by: - Splitting struct boot_config into core & VPE boot config structures, allocated one per core or VPE respectively. This allows for multiple VPEs to be onlined simultaneously without clobbering each others configuration. - Indicating which VPEs should be online within a core at any given time using a bitmap. This allows multiple VPEs to be brought online simultaneously and also indicates to VPE 0 whether it should halt after starting any non-zero VPEs that should be online within the core. For example if all VPEs within a core are offlined via hotplug and the user onlines the second VPE within that core: 1) The core will be powered up. 2) VPE 0 will run from the BEV (ie. mips_cps_core_entry) to initialise the core. 3) VPE 0 will start VPE 1 because its bit is set in the cores bitmap. 4) VPE 0 will halt itself because its bit is clear in the cores bitmap. - Moving the core & VPE initialisation to assembly code which does not make any use of the stack. This is because if a non-zero VPE is to be brought online in a powered down core then when VPE 0 of that core runs it may not have a valid stack, and even if it did then it's messy to run through parts of generic kernel code on VPE 0 before starting the correct VPE. Signed-off-by: Paul Burton --- arch/mips/include/asm/smp-cps.h | 14 +- arch/mips/kernel/asm-offsets.c | 14 +- arch/mips/kernel/cps-vec.S | 282 ++++++++++++++++++++++++++++++++++++++-- arch/mips/kernel/mips-cpc.c | 2 + arch/mips/kernel/smp-cps.c | 223 ++++++++++++------------------- 5 files changed, 374 insertions(+), 161 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h index d60d1a2..d49279e 100644 --- a/arch/mips/include/asm/smp-cps.h +++ b/arch/mips/include/asm/smp-cps.h @@ -13,17 +13,23 @@ #ifndef __ASSEMBLY__ -struct boot_config { - unsigned int core; - unsigned int vpe; +struct vpe_boot_config { unsigned long pc; unsigned long sp; unsigned long gp; }; -extern struct boot_config mips_cps_bootcfg; +struct core_boot_config { + atomic_t vpe_mask; + struct vpe_boot_config *vpe_config; +}; + +extern struct core_boot_config *mips_cps_core_bootcfg; extern void mips_cps_core_entry(void); +extern void mips_cps_core_init(void); + +extern struct vpe_boot_config *mips_cps_boot_vpes(void); #else /* __ASSEMBLY__ */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index e085cde..d63490d 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -487,10 +487,14 @@ void output_kvm_defines(void) void output_cps_defines(void) { COMMENT(" MIPS CPS offsets. "); - OFFSET(BOOTCFG_CORE, boot_config, core); - OFFSET(BOOTCFG_VPE, boot_config, vpe); - OFFSET(BOOTCFG_PC, boot_config, pc); - OFFSET(BOOTCFG_SP, boot_config, sp); - OFFSET(BOOTCFG_GP, boot_config, gp); + + OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); + OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); + DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); + + OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc); + OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp); + OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp); + DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config)); } #endif diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index f7a46db..57ec18c 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -14,12 +14,33 @@ #include #include #include +#include -#define GCR_CL_COHERENCE_OFS 0x2008 +#define GCR_CL_COHERENCE_OFS 0x2008 +#define GCR_CL_ID_OFS 0x2028 + +.extern mips_cm_base + +.set noreorder + + /* + * Set dest to non-zero if the core supports the MT ASE, else zero. If + * MT is not supported then branch to nomt. + */ + .macro has_mt dest, nomt + mfc0 \dest, CP0_CONFIG + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 1 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 2 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 3 + andi \dest, \dest, MIPS_CONF3_MT + beqz \dest, \nomt + .endm .section .text.cps-vec .balign 0x1000 -.set noreorder LEAF(mips_cps_core_entry) /* @@ -134,21 +155,24 @@ dcache_done: jr t0 nop -1: /* We're up, cached & coherent */ + /* + * We're up, cached & coherent. Perform any further required core-level + * initialisation. + */ +1: jal mips_cps_core_init + nop /* - * TODO: We should check the VPE number we intended to boot here, and - * if non-zero we should start that VPE and stop this one. For - * the moment this doesn't matter since CPUs are brought up - * sequentially and in order, but once hotplug is implemented - * this will need revisiting. + * Boot any other VPEs within this core that should be online, and + * deactivate this VPE if it should be offline. */ + jal mips_cps_boot_vpes + nop /* Off we go! */ - la t0, mips_cps_bootcfg - lw t1, BOOTCFG_PC(t0) - lw gp, BOOTCFG_GP(t0) - lw sp, BOOTCFG_SP(t0) + lw t1, VPEBOOTCFG_PC(v0) + lw gp, VPEBOOTCFG_GP(v0) + lw sp, VPEBOOTCFG_SP(v0) jr t1 nop END(mips_cps_core_entry) @@ -189,3 +213,237 @@ LEAF(excep_ejtag) jr k0 nop END(excep_ejtag) + +LEAF(mips_cps_core_init) +#ifdef CONFIG_MIPS_MT + /* Check that the core implements the MT ASE */ + has_mt t0, 3f + nop + + .set push + .set mt + + /* Only allow 1 TC per VPE to execute... */ + dmt + + /* ...and for the moment only 1 VPE */ + dvpe + la t1, 1f + jr.hb t1 + nop + + /* Enter VPE configuration state */ +1: mfc0 t0, CP0_MVPCONTROL + ori t0, t0, MVPCONTROL_VPC + mtc0 t0, CP0_MVPCONTROL + + /* Retrieve the number of VPEs within the core */ + mfc0 t0, CP0_MVPCONF0 + srl t0, t0, MVPCONF0_PVPE_SHIFT + andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) + addi t7, t0, 1 + + /* If there's only 1, we're done */ + beqz t0, 2f + nop + + /* Loop through each VPE within this core */ + li t5, 1 + +1: /* Operate on the appropriate TC */ + mtc0 t5, CP0_VPECONTROL + ehb + + /* Bind TC to VPE (1:1 TC:VPE mapping) */ + mttc0 t5, CP0_TCBIND + + /* Set exclusive TC, non-active, master */ + li t0, VPECONF0_MVP + sll t1, t5, VPECONF0_XTC_SHIFT + or t0, t0, t1 + mttc0 t0, CP0_VPECONF0 + + /* Set TC non-active, non-allocatable */ + mttc0 zero, CP0_TCSTATUS + + /* Set TC halted */ + li t0, TCHALT_H + mttc0 t0, CP0_TCHALT + + /* Next VPE */ + addi t5, t5, 1 + slt t0, t5, t7 + bnez t0, 1b + nop + + /* Leave VPE configuration state */ +2: mfc0 t0, CP0_MVPCONTROL + xori t0, t0, MVPCONTROL_VPC + mtc0 t0, CP0_MVPCONTROL + +3: .set pop +#endif + jr ra + nop + END(mips_cps_core_init) + +LEAF(mips_cps_boot_vpes) + /* Retrieve CM base address */ + la t0, mips_cm_base + lw t0, 0(t0) + + /* Calculate a pointer to this cores struct core_boot_config */ + lw t0, GCR_CL_ID_OFS(t0) + li t1, COREBOOTCFG_SIZE + mul t0, t0, t1 + la t1, mips_cps_core_bootcfg + lw t1, 0(t1) + addu t0, t0, t1 + + /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ + has_mt t6, 1f + li t9, 0 + + /* Find the number of VPEs present in the core */ + mfc0 t1, CP0_MVPCONF0 + srl t1, t1, MVPCONF0_PVPE_SHIFT + andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT + addi t1, t1, 1 + + /* Calculate a mask for the VPE ID from EBase.CPUNum */ + clz t1, t1 + li t2, 31 + subu t1, t2, t1 + li t2, 1 + sll t1, t2, t1 + addiu t1, t1, -1 + + /* Retrieve the VPE ID from EBase.CPUNum */ + mfc0 t9, $15, 1 + and t9, t9, t1 + +1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ + li t1, VPEBOOTCFG_SIZE + mul v0, t9, t1 + lw t7, COREBOOTCFG_VPECONFIG(t0) + addu v0, v0, t7 + +#ifdef CONFIG_MIPS_MT + + /* If the core doesn't support MT then return */ + bnez t6, 1f + nop + jr ra + nop + + .set push + .set mt + +1: /* Enter VPE configuration state */ + dvpe + la t1, 1f + jr.hb t1 + nop +1: mfc0 t1, CP0_MVPCONTROL + ori t1, t1, MVPCONTROL_VPC + mtc0 t1, CP0_MVPCONTROL + ehb + + /* Loop through each VPE */ + lw t6, COREBOOTCFG_VPEMASK(t0) + move t8, t6 + li t5, 0 + + /* Check whether the VPE should be running. If not, skip it */ +1: andi t0, t6, 1 + beqz t0, 2f + nop + + /* Operate on the appropriate TC */ + mfc0 t0, CP0_VPECONTROL + ori t0, t0, VPECONTROL_TARGTC + xori t0, t0, VPECONTROL_TARGTC + or t0, t0, t5 + mtc0 t0, CP0_VPECONTROL + ehb + + /* Skip the VPE if its TC is not halted */ + mftc0 t0, CP0_TCHALT + beqz t0, 2f + nop + + /* Calculate a pointer to the VPEs struct vpe_boot_config */ + li t0, VPEBOOTCFG_SIZE + mul t0, t0, t5 + addu t0, t0, t7 + + /* Set the TC restart PC */ + lw t1, VPEBOOTCFG_PC(t0) + mttc0 t1, CP0_TCRESTART + + /* Set the TC stack pointer */ + lw t1, VPEBOOTCFG_SP(t0) + mttgpr t1, sp + + /* Set the TC global pointer */ + lw t1, VPEBOOTCFG_GP(t0) + mttgpr t1, gp + + /* Copy config from this VPE */ + mfc0 t0, CP0_CONFIG + mttc0 t0, CP0_CONFIG + + /* Ensure no software interrupts are pending */ + mttc0 zero, CP0_CAUSE + mttc0 zero, CP0_STATUS + + /* Set TC active, not interrupt exempt */ + mftc0 t0, CP0_TCSTATUS + li t1, ~TCSTATUS_IXMT + and t0, t0, t1 + ori t0, t0, TCSTATUS_A + mttc0 t0, CP0_TCSTATUS + + /* Clear the TC halt bit */ + mttc0 zero, CP0_TCHALT + + /* Set VPE active */ + mftc0 t0, CP0_VPECONF0 + ori t0, t0, VPECONF0_VPA + mttc0 t0, CP0_VPECONF0 + + /* Next VPE */ +2: srl t6, t6, 1 + addi t5, t5, 1 + bnez t6, 1b + nop + + /* Leave VPE configuration state */ + mfc0 t1, CP0_MVPCONTROL + xori t1, t1, MVPCONTROL_VPC + mtc0 t1, CP0_MVPCONTROL + ehb + evpe + + /* Check whether this VPE is meant to be running */ + li t0, 1 + sll t0, t0, t9 + and t0, t0, t8 + bnez t0, 2f + nop + + /* This VPE should be offline, halt the TC */ + li t0, TCHALT_H + mtc0 t0, CP0_TCHALT + la t0, 1f +1: jr.hb t0 + nop + +2: .set pop + +#endif /* CONFIG_MIPS_MT */ + + /* Return */ + jr ra + nop + END(mips_cps_boot_vpes) diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index 2368fc5..ba47360 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -9,6 +9,8 @@ */ #include +#include +#include #include #include diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 536eec0..af90e82 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -26,98 +26,37 @@ static DECLARE_BITMAP(core_power, NR_CPUS); -struct boot_config mips_cps_bootcfg; +struct core_boot_config *mips_cps_core_bootcfg; -static void init_core(void) +static unsigned core_vpe_count(unsigned core) { - unsigned int nvpes, t; - u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status; + unsigned cfg; - if (!cpu_has_mipsmt) - return; - - /* Enter VPE configuration state */ - dvpe(); - set_c0_mvpcontrol(MVPCONTROL_VPC); - - /* Retrieve the count of VPEs in this core */ - mvpconf0 = read_c0_mvpconf0(); - nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - smp_num_siblings = nvpes; - - for (t = 1; t < nvpes; t++) { - /* Use a 1:1 mapping of TC index to VPE index */ - settc(t); - - /* Bind 1 TC to this VPE */ - tcbind = read_tc_c0_tcbind(); - tcbind &= ~TCBIND_CURVPE; - tcbind |= t << TCBIND_CURVPE_SHIFT; - write_tc_c0_tcbind(tcbind); - - /* Set exclusive TC, non-active, master */ - vpeconf0 = read_vpe_c0_vpeconf0(); - vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA); - vpeconf0 |= t << VPECONF0_XTC_SHIFT; - vpeconf0 |= VPECONF0_MVP; - write_vpe_c0_vpeconf0(vpeconf0); - - /* Declare TC non-active, non-allocatable & interrupt exempt */ - tcstatus = read_tc_c0_tcstatus(); - tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA); - tcstatus |= TCSTATUS_IXMT; - write_tc_c0_tcstatus(tcstatus); - - /* Halt the TC */ - write_tc_c0_tchalt(TCHALT_H); - - /* Allow only 1 TC to execute */ - vpecontrol = read_vpe_c0_vpecontrol(); - vpecontrol &= ~VPECONTROL_TE; - write_vpe_c0_vpecontrol(vpecontrol); - - /* Copy (most of) Status from VPE 0 */ - status = read_c0_status(); - status &= ~(ST0_IM | ST0_IE | ST0_KSU); - status |= ST0_CU0; - write_vpe_c0_status(status); - - /* Copy Config from VPE 0 */ - write_vpe_c0_config(read_c0_config()); - write_vpe_c0_config7(read_c0_config7()); - - /* Ensure no software interrupts are pending */ - write_vpe_c0_cause(0); - - /* Sync Count */ - write_vpe_c0_count(read_c0_count()); - } + if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) + return 1; - /* Leave VPE configuration state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); + write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); + cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; + return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; } static void __init cps_smp_setup(void) { unsigned int ncores, nvpes, core_vpes; int c, v; - u32 core_cfg, *entry_code; + u32 *entry_code; /* Detect & record VPE topology */ ncores = mips_cm_numcores(); pr_info("VPE topology "); for (c = nvpes = 0; c < ncores; c++) { - if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) { - write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF); - core_cfg = read_gcr_co_config(); - core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >> - CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; - } else { - core_vpes = 1; - } - + core_vpes = core_vpe_count(c); pr_cont("%c%u", c ? ',' : '{', core_vpes); + /* Use the number of VPEs in core 0 for smp_num_siblings */ + if (!c) + smp_num_siblings = core_vpes; + for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_data[nvpes + v].core = c; #ifdef CONFIG_MIPS_MT_SMP @@ -140,12 +79,8 @@ static void __init cps_smp_setup(void) /* Core 0 is powered up (we're running on it) */ bitmap_set(core_power, 0, 1); - /* Disable MT - we only want to run 1 TC per VPE */ - if (cpu_has_mipsmt) - dmt(); - /* Initialise core 0 */ - init_core(); + mips_cps_core_init(); /* Patch the start of mips_cps_core_entry to provide the CM base */ entry_code = (u32 *)&mips_cps_core_entry; @@ -157,15 +92,60 @@ static void __init cps_smp_setup(void) static void __init cps_prepare_cpus(unsigned int max_cpus) { + unsigned ncores, core_vpes, c; + mips_mt_set_cpuoptions(); + + /* Allocate core boot configuration structs */ + ncores = mips_cm_numcores(); + mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), + GFP_KERNEL); + if (!mips_cps_core_bootcfg) { + pr_err("Failed to allocate boot config for %u cores\n", ncores); + goto err_out; + } + + /* Allocate VPE boot configuration structs */ + for (c = 0; c < ncores; c++) { + core_vpes = core_vpe_count(c); + mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, + sizeof(*mips_cps_core_bootcfg[c].vpe_config), + GFP_KERNEL); + if (!mips_cps_core_bootcfg[c].vpe_config) { + pr_err("Failed to allocate %u VPE boot configs\n", + core_vpes); + goto err_out; + } + } + + /* Mark this CPU as booted */ + atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, + 1 << cpu_vpe_id(¤t_cpu_data)); + + return; +err_out: + /* Clean up allocations */ + if (mips_cps_core_bootcfg) { + for (c = 0; c < ncores; c++) + kfree(mips_cps_core_bootcfg[c].vpe_config); + kfree(mips_cps_core_bootcfg); + mips_cps_core_bootcfg = NULL; + } + + /* Effectively disable SMP by declaring CPUs not present */ + for_each_possible_cpu(c) { + if (c == 0) + continue; + set_cpu_present(c, false); + } } -static void boot_core(struct boot_config *cfg) +static void boot_core(unsigned core) { u32 access; /* Select the appropriate core */ - write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF); + write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); /* Set its reset vector */ write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); @@ -175,15 +155,12 @@ static void boot_core(struct boot_config *cfg) /* Ensure the core can access the GCRs */ access = read_gcr_access(); - access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core); + access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); write_gcr_access(access); - /* Copy cfg */ - mips_cps_bootcfg = *cfg; - if (mips_cpc_present()) { /* Select the appropriate core */ - write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF); + write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); /* Reset the core */ write_cpc_co_cmd(CPC_Cx_CMD_RESET); @@ -193,77 +170,47 @@ static void boot_core(struct boot_config *cfg) } /* The core is now powered up */ - bitmap_set(core_power, cfg->core, 1); + bitmap_set(core_power, core, 1); } -static void boot_vpe(void *info) +static void remote_vpe_boot(void *dummy) { - struct boot_config *cfg = info; - u32 tcstatus, vpeconf0; - - /* Enter VPE configuration state */ - dvpe(); - set_c0_mvpcontrol(MVPCONTROL_VPC); - - settc(cfg->vpe); - - /* Set the TC restart PC */ - write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); - - /* Activate the TC, allow interrupts */ - tcstatus = read_tc_c0_tcstatus(); - tcstatus &= ~TCSTATUS_IXMT; - tcstatus |= TCSTATUS_A; - write_tc_c0_tcstatus(tcstatus); - - /* Clear the TC halt bit */ - write_tc_c0_tchalt(0); - - /* Activate the VPE */ - vpeconf0 = read_vpe_c0_vpeconf0(); - vpeconf0 |= VPECONF0_VPA; - write_vpe_c0_vpeconf0(vpeconf0); - - /* Set the stack & global pointer registers */ - write_tc_gpr_sp(cfg->sp); - write_tc_gpr_gp(cfg->gp); - - /* Leave VPE configuration state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); - - /* Enable other VPEs to execute */ - evpe(EVPE_ENABLE); + mips_cps_boot_vpes(); } static void cps_boot_secondary(int cpu, struct task_struct *idle) { - struct boot_config cfg; + unsigned core = cpu_data[cpu].core; + unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); + struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; unsigned int remote; int err; - cfg.core = cpu_data[cpu].core; - cfg.vpe = cpu_vpe_id(&cpu_data[cpu]); - cfg.pc = (unsigned long)&smp_bootstrap; - cfg.sp = __KSTK_TOS(idle); - cfg.gp = (unsigned long)task_thread_info(idle); + vpe_cfg->pc = (unsigned long)&smp_bootstrap; + vpe_cfg->sp = __KSTK_TOS(idle); + vpe_cfg->gp = (unsigned long)task_thread_info(idle); - if (!test_bit(cfg.core, core_power)) { + atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); + + if (!test_bit(core, core_power)) { /* Boot a VPE on a powered down core */ - boot_core(&cfg); + boot_core(core); return; } - if (cfg.core != current_cpu_data.core) { + if (core != current_cpu_data.core) { /* Boot a VPE on another powered up core */ for (remote = 0; remote < NR_CPUS; remote++) { - if (cpu_data[remote].core != cfg.core) + if (cpu_data[remote].core != core) continue; if (cpu_online(remote)) break; } BUG_ON(remote >= NR_CPUS); - err = smp_call_function_single(remote, boot_vpe, &cfg, 1); + err = smp_call_function_single(remote, remote_vpe_boot, + NULL, 1); if (err) panic("Failed to call remote CPU\n"); return; @@ -272,7 +219,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) BUG_ON(!cpu_has_mipsmt); /* Boot a VPE on this core */ - boot_vpe(&cfg); + mips_cps_boot_vpes(); } static void cps_init_secondary(void) @@ -281,10 +228,6 @@ static void cps_init_secondary(void) if (cpu_has_mipsmt) dmt(); - /* TODO: revisit this assumption once hotplug is implemented */ - if (cpu_vpe_id(¤t_cpu_data) == 0) - init_core(); - change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | STATUSF_IP7); } -- cgit v1.1 From 68c1232f51350b007cb1f05260e9e784770ec513 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 14 Mar 2014 16:06:16 +0000 Subject: MIPS: smp-cps: function to determine whether CPS SMP is in use The core power down state for cpuidle will require that the CPS SMP implementation is in use. This patch provides a mips_cps_smp_in_use function which determines whether or not the CPS SMP implementation is currently in use. Signed-off-by: Paul Burton --- arch/mips/include/asm/smp-cps.h | 2 ++ arch/mips/kernel/smp-cps.c | 6 ++++++ 2 files changed, 8 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h index d49279e..324df2c 100644 --- a/arch/mips/include/asm/smp-cps.h +++ b/arch/mips/include/asm/smp-cps.h @@ -31,6 +31,8 @@ extern void mips_cps_core_init(void); extern struct vpe_boot_config *mips_cps_boot_vpes(void); +extern bool mips_cps_smp_in_use(void); + #else /* __ASSEMBLY__ */ .extern mips_cps_bootcfg; diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index af90e82..c7879fb 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -260,6 +260,12 @@ static struct plat_smp_ops cps_smp_ops = { .cpus_done = cps_cpus_done, }; +bool mips_cps_smp_in_use(void) +{ + extern struct plat_smp_ops *mp_ops; + return mp_ops == &cps_smp_ops; +} + int register_cps_smp_ops(void) { if (!mips_cm_present()) { -- cgit v1.1 From 0f4d3d1155d9a5e71e74658ac50b61141e370cf3 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 12:21:49 +0100 Subject: MIPS: smp-cps: flush cache after patching mips_cps_core_entry The start of mips_cps_core_entry is patched in order to provide the code with the address of the CM register region at a point where it will be running non-coherent with the rest of the system. However the cache wasn't being flushed after that patching which could in principle lead to secondary cores using an invalid CM base address. The patching is moved to cps_prepare_cpus since local_flush_icache_range has not been initialised at the point cps_smp_setup is called. Signed-off-by: Paul Burton --- arch/mips/kernel/smp-cps.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index c7879fb..c3661ca 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -44,7 +44,6 @@ static void __init cps_smp_setup(void) { unsigned int ncores, nvpes, core_vpes; int c, v; - u32 *entry_code; /* Detect & record VPE topology */ ncores = mips_cm_numcores(); @@ -82,10 +81,6 @@ static void __init cps_smp_setup(void) /* Initialise core 0 */ mips_cps_core_init(); - /* Patch the start of mips_cps_core_entry to provide the CM base */ - entry_code = (u32 *)&mips_cps_core_entry; - UASM_i_LA(&entry_code, 3, (long)mips_cm_base); - /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); } @@ -93,9 +88,16 @@ static void __init cps_smp_setup(void) static void __init cps_prepare_cpus(unsigned int max_cpus) { unsigned ncores, core_vpes, c; + u32 *entry_code; mips_mt_set_cpuoptions(); + /* Patch the start of mips_cps_core_entry to provide the CM base */ + entry_code = (u32 *)&mips_cps_core_entry; + UASM_i_LA(&entry_code, 3, (long)mips_cm_base); + dma_cache_wback_inv((unsigned long)&mips_cps_core_entry, + (void *)entry_code - (void *)&mips_cps_core_entry); + /* Allocate core boot configuration structs */ ncores = mips_cm_numcores(); mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), -- cgit v1.1 From dd9233d0470bb8b02b30982c1de0a2dbffb407d5 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 7 Mar 2014 10:42:52 +0000 Subject: MIPS: smp-cps: use CPC core-other locking The core which the CPC core-other region relates to is based upon the core-local core-other addressing register. As its name suggests this register is shared between all VPEs within a core, and if there is a possibility that multiple VPEs within a core will attempt to access another core simultaneously then locking is required. This wasn't previously a problem with the only user being cpu0 during boot, but will be an issue once hotplug is implemented & may race with other users such as cpuidle. Signed-off-by: Paul Burton --- arch/mips/kernel/smp-cps.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index c3661ca..b519c85 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -161,11 +161,10 @@ static void boot_core(unsigned core) write_gcr_access(access); if (mips_cpc_present()) { - /* Select the appropriate core */ - write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); - /* Reset the core */ + mips_cpc_lock_other(core); write_cpc_co_cmd(CPC_Cx_CMD_RESET); + mips_cpc_unlock_other(); } else { /* Take the core out of reset */ write_gcr_co_reset_release(0); -- cgit v1.1 From 3179d37ee1ed602770a8b8ed975bd30faa85b4a3 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 11:00:56 +0100 Subject: MIPS: pm-cps: add PM state entry code for CPS systems This patch adds code to generate entry & exit code for various low power states available on systems based around the MIPS Coherent Processing System architecture (ie. those with a Coherence Manager, Global Interrupt Controller & for >=CM2 a Cluster Power Controller). States supported are: - Non-coherent wait. This state first leaves the coherent domain and then executes a regular MIPS wait instruction. Power savings are found from the elimination of coherency interventions between the core and any other coherent requestors in the system. - Clock gated. This state leaves the coherent domain and then gates the clock input to the core. This removes all dynamic power from the core but leaves the core at the mercy of another to restart its clock. Register state is preserved, but the core can not service interrupts whilst its clock is gated. - Power gated. This deepest state removes all power input to the core. All register state is lost and the core will restart execution from its BEV when another core powers it back up. Because register state is lost this state requires cooperation with the CONFIG_MIPS_CPS SMP implementation in order for the core to exit the state successfully. The code will detect which states are available on the current system during boot & generate the entry/exit code for those states. This will be used by cpuidle & hotplug implementations. Signed-off-by: Paul Burton --- arch/mips/Kconfig | 3 + arch/mips/include/asm/pm-cps.h | 51 +++ arch/mips/include/asm/smp-cps.h | 3 + arch/mips/kernel/Makefile | 1 + arch/mips/kernel/cps-vec.S | 35 ++ arch/mips/kernel/pm-cps.c | 716 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 809 insertions(+) create mode 100644 arch/mips/include/asm/pm-cps.h create mode 100644 arch/mips/kernel/pm-cps.c (limited to 'arch/mips') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5cdc53b..c79e6a4 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2071,6 +2071,9 @@ config MIPS_CPS no external assistance. It is safe to enable this when hardware support is unavailable. +config MIPS_CPS_PM + bool + config MIPS_GIC_IPI bool diff --git a/arch/mips/include/asm/pm-cps.h b/arch/mips/include/asm/pm-cps.h new file mode 100644 index 0000000..625eda5 --- /dev/null +++ b/arch/mips/include/asm/pm-cps.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_PM_CPS_H__ +#define __MIPS_ASM_PM_CPS_H__ + +/* + * The CM & CPC can only handle coherence & power control on a per-core basis, + * thus in an MT system the VPEs within each core are coupled and can only + * enter or exit states requiring CM or CPC assistance in unison. + */ +#ifdef CONFIG_MIPS_MT +# define coupled_coherence cpu_has_mipsmt +#else +# define coupled_coherence 0 +#endif + +/* Enumeration of possible PM states */ +enum cps_pm_state { + CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */ + CPS_PM_CLOCK_GATED, /* Core clock gated */ + CPS_PM_POWER_GATED, /* Core power gated */ + CPS_PM_STATE_COUNT, +}; + +/** + * cps_pm_support_state - determine whether the system supports a PM state + * @state: the state to test for support + * + * Returns true if the system supports the given state, otherwise false. + */ +extern bool cps_pm_support_state(enum cps_pm_state state); + +/** + * cps_pm_enter_state - enter a PM state + * @state: the state to enter + * + * Enter the given PM state. If coupled_coherence is non-zero then it is + * expected that this function be called at approximately the same time on + * each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno. + */ +extern int cps_pm_enter_state(enum cps_pm_state state); + +#endif /* __MIPS_ASM_PM_CPS_H__ */ diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h index 324df2c..a06a08a 100644 --- a/arch/mips/include/asm/smp-cps.h +++ b/arch/mips/include/asm/smp-cps.h @@ -33,6 +33,9 @@ extern struct vpe_boot_config *mips_cps_boot_vpes(void); extern bool mips_cps_smp_in_use(void); +extern void mips_cps_pm_save(void); +extern void mips_cps_pm_restore(void); + #else /* __ASSEMBLY__ */ .extern mips_cps_bootcfg; diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 97540a8..6133e8b 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -108,6 +108,7 @@ obj-$(CONFIG_MIPS_CM) += mips-cm.o obj-$(CONFIG_MIPS_CPC) += mips-cpc.o obj-$(CONFIG_CPU_PM) += pm.o +obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o # # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 57ec18c..1c865ae 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -15,6 +15,7 @@ #include #include #include +#include #define GCR_CL_COHERENCE_OFS 0x2008 #define GCR_CL_ID_OFS 0x2028 @@ -447,3 +448,37 @@ LEAF(mips_cps_boot_vpes) jr ra nop END(mips_cps_boot_vpes) + +#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) + + /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ + .macro psstate dest + .set push + .set noat + lw $1, TI_CPU(gp) + sll $1, $1, LONGLOG + la \dest, __per_cpu_offset + addu $1, $1, \dest + lw $1, 0($1) + la \dest, cps_cpu_state + addu \dest, \dest, $1 + .set pop + .endm + +LEAF(mips_cps_pm_save) + /* Save CPU state */ + SUSPEND_SAVE_REGS + psstate t1 + SUSPEND_SAVE_STATIC + jr v0 + nop + END(mips_cps_pm_save) + +LEAF(mips_cps_pm_restore) + /* Restore CPU state */ + psstate t1 + RESUME_RESTORE_STATIC + RESUME_RESTORE_REGS_RETURN + END(mips_cps_pm_restore) + +#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c new file mode 100644 index 0000000..5aa4c6f --- /dev/null +++ b/arch/mips/kernel/pm-cps.c @@ -0,0 +1,716 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * cps_nc_entry_fn - type of a generated non-coherent state entry function + * @online: the count of online coupled VPEs + * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count + * + * The code entering & exiting non-coherent states is generated at runtime + * using uasm, in order to ensure that the compiler cannot insert a stray + * memory access at an unfortunate time and to allow the generation of optimal + * core-specific code particularly for cache routines. If coupled_coherence + * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, + * returns the number of VPEs that were in the wait state at the point this + * VPE left it. Returns garbage if coupled_coherence is zero or this is not + * the entry function for CPS_PM_NC_WAIT. + */ +typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); + +/* + * The entry point of the generated non-coherent idle state entry/exit + * functions. Actually per-core rather than per-CPU. + */ +static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], + nc_asm_enter); + +/* Bitmap indicating which states are supported by the system */ +DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); + +/* + * Indicates the number of coupled VPEs ready to operate in a non-coherent + * state. Actually per-core rather than per-CPU. + */ +static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); +static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); + +/* Indicates online CPUs coupled with the current CPU */ +static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); + +/* + * Used to synchronize entry to deep idle states. Actually per-core rather + * than per-CPU. + */ +static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); + +/* Saved CPU state across the CPS_PM_POWER_GATED state */ +DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); + +/* A somewhat arbitrary number of labels & relocs for uasm */ +static struct uasm_label labels[32] __initdata; +static struct uasm_reloc relocs[32] __initdata; + +/* CPU dependant sync types */ +static unsigned stype_intervention; +static unsigned stype_memory; +static unsigned stype_ordering; + +enum mips_reg { + zero, at, v0, v1, a0, a1, a2, a3, + t0, t1, t2, t3, t4, t5, t6, t7, + s0, s1, s2, s3, s4, s5, s6, s7, + t8, t9, k0, k1, gp, sp, fp, ra, +}; + +bool cps_pm_support_state(enum cps_pm_state state) +{ + return test_bit(state, state_support); +} + +static void coupled_barrier(atomic_t *a, unsigned online) +{ + /* + * This function is effectively the same as + * cpuidle_coupled_parallel_barrier, which can't be used here since + * there's no cpuidle device. + */ + + if (!coupled_coherence) + return; + + smp_mb__before_atomic_inc(); + atomic_inc(a); + + while (atomic_read(a) < online) + cpu_relax(); + + if (atomic_inc_return(a) == online * 2) { + atomic_set(a, 0); + return; + } + + while (atomic_read(a) > online) + cpu_relax(); +} + +int cps_pm_enter_state(enum cps_pm_state state) +{ + unsigned cpu = smp_processor_id(); + unsigned core = current_cpu_data.core; + unsigned online, left; + cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); + u32 *core_ready_count, *nc_core_ready_count; + void *nc_addr; + cps_nc_entry_fn entry; + struct core_boot_config *core_cfg; + struct vpe_boot_config *vpe_cfg; + + /* Check that there is an entry function for this state */ + entry = per_cpu(nc_asm_enter, core)[state]; + if (!entry) + return -EINVAL; + + /* Calculate which coupled CPUs (VPEs) are online */ +#ifdef CONFIG_MIPS_MT + if (cpu_online(cpu)) { + cpumask_and(coupled_mask, cpu_online_mask, + &cpu_sibling_map[cpu]); + online = cpumask_weight(coupled_mask); + cpumask_clear_cpu(cpu, coupled_mask); + } else +#endif + { + cpumask_clear(coupled_mask); + online = 1; + } + + /* Setup the VPE to run mips_cps_pm_restore when started again */ + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { + core_cfg = &mips_cps_core_bootcfg[core]; + vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id]; + vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; + vpe_cfg->gp = (unsigned long)current_thread_info(); + vpe_cfg->sp = 0; + } + + /* Indicate that this CPU might not be coherent */ + cpumask_clear_cpu(cpu, &cpu_coherent_mask); + smp_mb__after_clear_bit(); + + /* Create a non-coherent mapping of the core ready_count */ + core_ready_count = per_cpu(ready_count, core); + nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), + (unsigned long)core_ready_count); + nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); + nc_core_ready_count = nc_addr; + + /* Ensure ready_count is zero-initialised before the assembly runs */ + ACCESS_ONCE(*nc_core_ready_count) = 0; + coupled_barrier(&per_cpu(pm_barrier, core), online); + + /* Run the generated entry code */ + left = entry(online, nc_core_ready_count); + + /* Remove the non-coherent mapping of ready_count */ + kunmap_noncoherent(); + + /* Indicate that this CPU is definitely coherent */ + cpumask_set_cpu(cpu, &cpu_coherent_mask); + + /* + * If this VPE is the first to leave the non-coherent wait state then + * it needs to wake up any coupled VPEs still running their wait + * instruction so that they return to cpuidle, which can then complete + * coordination between the coupled VPEs & provide the governor with + * a chance to reflect on the length of time the VPEs were in the + * idle state. + */ + if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) + arch_send_call_function_ipi_mask(coupled_mask); + + return 0; +} + +static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + const struct cache_desc *cache, + unsigned op, int lbl) +{ + unsigned cache_size = cache->ways << cache->waybit; + unsigned i; + const unsigned unroll_lines = 32; + + /* If the cache isn't present this function has it easy */ + if (cache->flags & MIPS_CACHE_NOT_PRESENT) + return; + + /* Load base address */ + UASM_i_LA(pp, t0, (long)CKSEG0); + + /* Calculate end address */ + if (cache_size < 0x8000) + uasm_i_addiu(pp, t1, t0, cache_size); + else + UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); + + /* Start of cache op loop */ + uasm_build_label(pl, *pp, lbl); + + /* Generate the cache ops */ + for (i = 0; i < unroll_lines; i++) + uasm_i_cache(pp, op, i * cache->linesz, t0); + + /* Update the base address */ + uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); + + /* Loop if we haven't reached the end address yet */ + uasm_il_bne(pp, pr, t0, t1, lbl); + uasm_i_nop(pp); +} + +static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + const struct cpuinfo_mips *cpu_info, + int lbl) +{ + unsigned i, fsb_size = 8; + unsigned num_loads = (fsb_size * 3) / 2; + unsigned line_stride = 2; + unsigned line_size = cpu_info->dcache.linesz; + unsigned perf_counter, perf_event; + unsigned revision = cpu_info->processor_id & PRID_REV_MASK; + + /* + * Determine whether this CPU requires an FSB flush, and if so which + * performance counter/event reflect stalls due to a full FSB. + */ + switch (__get_cpu_type(cpu_info->cputype)) { + case CPU_INTERAPTIV: + perf_counter = 1; + perf_event = 51; + break; + + case CPU_PROAPTIV: + /* Newer proAptiv cores don't require this workaround */ + if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) + return 0; + + /* On older ones it's unavailable */ + return -1; + + /* CPUs which do not require the workaround */ + case CPU_P5600: + return 0; + + default: + WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n"); + return -1; + } + + /* + * Ensure that the fill/store buffer (FSB) is not holding the results + * of a prefetch, since if it is then the CPC sequencer may become + * stuck in the D3 (ClrBus) state whilst entering a low power state. + */ + + /* Preserve perf counter setup */ + uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + + /* Setup perf counter to count FSB full pipeline stalls */ + uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); + uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_ehb(pp); + uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_ehb(pp); + + /* Base address for loads */ + UASM_i_LA(pp, t0, (long)CKSEG0); + + /* Start of clear loop */ + uasm_build_label(pl, *pp, lbl); + + /* Perform some loads to fill the FSB */ + for (i = 0; i < num_loads; i++) + uasm_i_lw(pp, zero, i * line_size * line_stride, t0); + + /* + * Invalidate the new D-cache entries so that the cache will need + * refilling (via the FSB) if the loop is executed again. + */ + for (i = 0; i < num_loads; i++) { + uasm_i_cache(pp, Hit_Invalidate_D, + i * line_size * line_stride, t0); + uasm_i_cache(pp, Hit_Writeback_Inv_SD, + i * line_size * line_stride, t0); + } + + /* Completion barrier */ + uasm_i_sync(pp, stype_memory); + uasm_i_ehb(pp); + + /* Check whether the pipeline stalled due to the FSB being full */ + uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ + + /* Loop if it didn't */ + uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_nop(pp); + + /* Restore perf counter 1. The count may well now be wrong... */ + uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_ehb(pp); + uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_ehb(pp); + + return 0; +} + +static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + unsigned r_addr, int lbl) +{ + uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); + uasm_build_label(pl, *pp, lbl); + uasm_i_ll(pp, t1, 0, r_addr); + uasm_i_or(pp, t1, t1, t0); + uasm_i_sc(pp, t1, 0, r_addr); + uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_nop(pp); +} + +static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) +{ + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + u32 *buf, *p; + const unsigned r_online = a0; + const unsigned r_nc_count = a1; + const unsigned r_pcohctl = t7; + const unsigned max_instrs = 256; + unsigned cpc_cmd; + int err; + enum { + lbl_incready = 1, + lbl_poll_cont, + lbl_secondary_hang, + lbl_disable_coherence, + lbl_flush_fsb, + lbl_invicache, + lbl_flushdcache, + lbl_hang, + lbl_set_cont, + lbl_secondary_cont, + lbl_decready, + }; + + /* Allocate a buffer to hold the generated code */ + p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); + if (!buf) + return NULL; + + /* Clear labels & relocs ready for (re)use */ + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { + /* + * Save CPU state. Note the non-standard calling convention + * with the return address placed in v0 to avoid clobbering + * the ra register before it is saved. + */ + UASM_i_LA(&p, t0, (long)mips_cps_pm_save); + uasm_i_jalr(&p, v0, t0); + uasm_i_nop(&p); + } + + /* + * Load addresses of required CM & CPC registers. This is done early + * because they're needed in both the enable & disable coherence steps + * but in the coupled case the enable step will only run on one VPE. + */ + UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); + + if (coupled_coherence) { + /* Increment ready_count */ + uasm_i_sync(&p, stype_ordering); + uasm_build_label(&l, p, lbl_incready); + uasm_i_ll(&p, t1, 0, r_nc_count); + uasm_i_addiu(&p, t2, t1, 1); + uasm_i_sc(&p, t2, 0, r_nc_count); + uasm_il_beqz(&p, &r, t2, lbl_incready); + uasm_i_addiu(&p, t1, t1, 1); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + + /* + * If this is the last VPE to become ready for non-coherence + * then it should branch below. + */ + uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); + uasm_i_nop(&p); + + if (state < CPS_PM_POWER_GATED) { + /* + * Otherwise this is not the last VPE to become ready + * for non-coherence. It needs to wait until coherence + * has been disabled before proceeding, which it will do + * by polling for the top bit of ready_count being set. + */ + uasm_i_addiu(&p, t1, zero, -1); + uasm_build_label(&l, p, lbl_poll_cont); + uasm_i_lw(&p, t0, 0, r_nc_count); + uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); + uasm_i_ehb(&p); + uasm_i_yield(&p, zero, t1); + uasm_il_b(&p, &r, lbl_poll_cont); + uasm_i_nop(&p); + } else { + /* + * The core will lose power & this VPE will not continue + * so it can simply halt here. + */ + uasm_i_addiu(&p, t0, zero, TCHALT_H); + uasm_i_mtc0(&p, t0, 2, 4); + uasm_build_label(&l, p, lbl_secondary_hang); + uasm_il_b(&p, &r, lbl_secondary_hang); + uasm_i_nop(&p); + } + } + + /* + * This is the point of no return - this VPE will now proceed to + * disable coherence. At this point we *must* be sure that no other + * VPE within the core will interfere with the L1 dcache. + */ + uasm_build_label(&l, p, lbl_disable_coherence); + + /* Invalidate the L1 icache */ + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, + Index_Invalidate_I, lbl_invicache); + + /* Writeback & invalidate the L1 dcache */ + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, + Index_Writeback_Inv_D, lbl_flushdcache); + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + + /* + * Disable all but self interventions. The load from COHCTL is defined + * by the interAptiv & proAptiv SUMs as ensuring that the operation + * resulting from the preceeding store is complete. + */ + uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); + uasm_i_sw(&p, t0, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + /* Sync to ensure previous interventions are complete */ + uasm_i_sync(&p, stype_intervention); + uasm_i_ehb(&p); + + /* Disable coherence */ + uasm_i_sw(&p, zero, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + if (state >= CPS_PM_CLOCK_GATED) { + err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], + lbl_flush_fsb); + if (err) + goto out_err; + + /* Determine the CPC command to issue */ + switch (state) { + case CPS_PM_CLOCK_GATED: + cpc_cmd = CPC_Cx_CMD_CLOCKOFF; + break; + case CPS_PM_POWER_GATED: + cpc_cmd = CPC_Cx_CMD_PWRDOWN; + break; + default: + BUG(); + goto out_err; + } + + /* Issue the CPC command */ + UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); + uasm_i_addiu(&p, t1, zero, cpc_cmd); + uasm_i_sw(&p, t1, 0, t0); + + if (state == CPS_PM_POWER_GATED) { + /* If anything goes wrong just hang */ + uasm_build_label(&l, p, lbl_hang); + uasm_il_b(&p, &r, lbl_hang); + uasm_i_nop(&p); + + /* + * There's no point generating more code, the core is + * powered down & if powered back up will run from the + * reset vector not from here. + */ + goto gen_done; + } + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + } + + if (state == CPS_PM_NC_WAIT) { + /* + * At this point it is safe for all VPEs to proceed with + * execution. This VPE will set the top bit of ready_count + * to indicate to the other VPEs that they may continue. + */ + if (coupled_coherence) + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, + lbl_set_cont); + + /* + * VPEs which did not disable coherence will continue + * executing, after coherence has been disabled, from this + * point. + */ + uasm_build_label(&l, p, lbl_secondary_cont); + + /* Now perform our wait */ + uasm_i_wait(&p, 0); + } + + /* + * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs + * will run this. The first will actually re-enable coherence & the + * rest will just be performing a rather unusual nop. + */ + uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK); + uasm_i_sw(&p, t0, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + + if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { + /* Decrement ready_count */ + uasm_build_label(&l, p, lbl_decready); + uasm_i_sync(&p, stype_ordering); + uasm_i_ll(&p, t1, 0, r_nc_count); + uasm_i_addiu(&p, t2, t1, -1); + uasm_i_sc(&p, t2, 0, r_nc_count); + uasm_il_beqz(&p, &r, t2, lbl_decready); + uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + } + + if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { + /* + * At this point it is safe for all VPEs to proceed with + * execution. This VPE will set the top bit of ready_count + * to indicate to the other VPEs that they may continue. + */ + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); + + /* + * This core will be reliant upon another core sending a + * power-up command to the CPC in order to resume operation. + * Thus an arbitrary VPE can't trigger the core leaving the + * idle state and the one that disables coherence might as well + * be the one to re-enable it. The rest will continue from here + * after that has been done. + */ + uasm_build_label(&l, p, lbl_secondary_cont); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + } + + /* The core is coherent, time to return to C code */ + uasm_i_jr(&p, ra); + uasm_i_nop(&p); + +gen_done: + /* Ensure the code didn't exceed the resources allocated for it */ + BUG_ON((p - buf) > max_instrs); + BUG_ON((l - labels) > ARRAY_SIZE(labels)); + BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); + + /* Patch branch offsets */ + uasm_resolve_relocs(relocs, labels); + + /* Flush the icache */ + local_flush_icache_range((unsigned long)buf, (unsigned long)p); + + return buf; +out_err: + kfree(buf); + return NULL; +} + +static int __init cps_gen_core_entries(unsigned cpu) +{ + enum cps_pm_state state; + unsigned core = cpu_data[cpu].core; + unsigned dlinesz = cpu_data[cpu].dcache.linesz; + void *entry_fn, *core_rc; + + for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { + if (per_cpu(nc_asm_enter, core)[state]) + continue; + if (!test_bit(state, state_support)) + continue; + + entry_fn = cps_gen_entry_code(cpu, state); + if (!entry_fn) { + pr_err("Failed to generate core %u state %u entry\n", + core, state); + clear_bit(state, state_support); + } + + per_cpu(nc_asm_enter, core)[state] = entry_fn; + } + + if (!per_cpu(ready_count, core)) { + core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + if (!core_rc) { + pr_err("Failed allocate core %u ready_count\n", core); + return -ENOMEM; + } + per_cpu(ready_count_alloc, core) = core_rc; + + /* Ensure ready_count is aligned to a cacheline boundary */ + core_rc += dlinesz - 1; + core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); + per_cpu(ready_count, core) = core_rc; + } + + return 0; +} + +static int __init cps_pm_init(void) +{ + unsigned cpu; + int err; + + /* Detect appropriate sync types for the system */ + switch (current_cpu_data.cputype) { + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + case CPU_M5150: + case CPU_P5600: + stype_intervention = 0x2; + stype_memory = 0x3; + stype_ordering = 0x10; + break; + + default: + pr_warn("Power management is using heavyweight sync 0\n"); + } + + /* A CM is required for all non-coherent states */ + if (!mips_cm_present()) { + pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); + goto out; + } + + /* + * If interrupts were enabled whilst running a wait instruction on a + * non-coherent core then the VPE may end up processing interrupts + * whilst non-coherent. That would be bad. + */ + if (cpu_wait == r4k_wait_irqoff) + set_bit(CPS_PM_NC_WAIT, state_support); + else + pr_warn("pm-cps: non-coherent wait unavailable\n"); + + /* Detect whether a CPC is present */ + if (mips_cpc_present()) { + /* Detect whether clock gating is implemented */ + if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) + set_bit(CPS_PM_CLOCK_GATED, state_support); + else + pr_warn("pm-cps: CPC does not support clock gating\n"); + + /* Power gating is available with CPS SMP & any CPC */ + if (mips_cps_smp_in_use()) + set_bit(CPS_PM_POWER_GATED, state_support); + else + pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); + } else { + pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); + } + + for_each_present_cpu(cpu) { + err = cps_gen_core_entries(cpu); + if (err) + return err; + } +out: + return 0; +} +arch_initcall(cps_pm_init); -- cgit v1.1 From 1d8f1f5a780abe51257f7d2e33142f33d983a9ed Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 14:13:57 +0100 Subject: MIPS: smp-cps: hotplug support This patch adds support for offlining CPUs via hotplug when using the CONFIG_MIPS_CPS SMP implementation. When a CPU is offlined one of 2 things will happen: - If the CPU is part of a core which implements the MT ASE and there is at least one other VPE online within that core then the VPE will be halted by settings its TCHalt bit. - Otherwise if supported the core will be powered down via the CPC. - Otherwise the CPU will hang by executing an infinite loop. Bringing CPUs back online is then a process of either clearing the appropriate VPEs TCHalt bit or powering up the appropriate core via the CPC. Throughout the process the struct core_boot_config vpe_mask field must be maintained such that mips_cps_boot_vpes will start & stop the correct VPEs. Signed-off-by: Paul Burton --- arch/mips/Kconfig | 2 + arch/mips/kernel/smp-cps.c | 155 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 155 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c79e6a4..860a1e9 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2059,9 +2059,11 @@ config MIPS_CPS depends on SYS_SUPPORTS_MIPS_CPS select MIPS_CM select MIPS_CPC + select MIPS_CPS_PM if HOTPLUG_CPU select MIPS_GIC_IPI select SMP select SYNC_R4K if (CEVT_R4K || CSRC_R4K) + select SYS_SUPPORTS_HOTPLUG_CPU select SYS_SUPPORTS_SMP select WEAK_ORDERING help diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index b519c85..3c30891 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -194,10 +195,12 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); + preempt_disable(); + if (!test_bit(core, core_power)) { /* Boot a VPE on a powered down core */ boot_core(core); - return; + goto out; } if (core != current_cpu_data.core) { @@ -214,13 +217,15 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) NULL, 1); if (err) panic("Failed to call remote CPU\n"); - return; + goto out; } BUG_ON(!cpu_has_mipsmt); /* Boot a VPE on this core */ mips_cps_boot_vpes(); +out: + preempt_enable(); } static void cps_init_secondary(void) @@ -250,6 +255,148 @@ static void cps_cpus_done(void) { } +#ifdef CONFIG_HOTPLUG_CPU + +static int cps_cpu_disable(void) +{ + unsigned cpu = smp_processor_id(); + struct core_boot_config *core_cfg; + + if (!cpu) + return -EBUSY; + + if (!cps_pm_support_state(CPS_PM_POWER_GATED)) + return -EINVAL; + + core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; + atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); + smp_mb__after_atomic_dec(); + set_cpu_online(cpu, false); + cpu_clear(cpu, cpu_callin_map); + + return 0; +} + +static DECLARE_COMPLETION(cpu_death_chosen); +static unsigned cpu_death_sibling; +static enum { + CPU_DEATH_HALT, + CPU_DEATH_POWER, +} cpu_death; + +void play_dead(void) +{ + unsigned cpu, core; + + local_irq_disable(); + idle_task_exit(); + cpu = smp_processor_id(); + cpu_death = CPU_DEATH_POWER; + + if (cpu_has_mipsmt) { + core = cpu_data[cpu].core; + + /* Look for another online VPE within the core */ + for_each_online_cpu(cpu_death_sibling) { + if (cpu_data[cpu_death_sibling].core != core) + continue; + + /* + * There is an online VPE within the core. Just halt + * this TC and leave the core alone. + */ + cpu_death = CPU_DEATH_HALT; + break; + } + } + + /* This CPU has chosen its way out */ + complete(&cpu_death_chosen); + + if (cpu_death == CPU_DEATH_HALT) { + /* Halt this TC */ + write_c0_tchalt(TCHALT_H); + instruction_hazard(); + } else { + /* Power down the core */ + cps_pm_enter_state(CPS_PM_POWER_GATED); + } + + /* This should never be reached */ + panic("Failed to offline CPU %u", cpu); +} + +static void wait_for_sibling_halt(void *ptr_cpu) +{ + unsigned cpu = (unsigned)ptr_cpu; + unsigned vpe_id = cpu_data[cpu].vpe_id; + unsigned halted; + unsigned long flags; + + do { + local_irq_save(flags); + settc(vpe_id); + halted = read_tc_c0_tchalt(); + local_irq_restore(flags); + } while (!(halted & TCHALT_H)); +} + +static void cps_cpu_die(unsigned int cpu) +{ + unsigned core = cpu_data[cpu].core; + unsigned stat; + int err; + + /* Wait for the cpu to choose its way out */ + if (!wait_for_completion_timeout(&cpu_death_chosen, + msecs_to_jiffies(5000))) { + pr_err("CPU%u: didn't offline\n", cpu); + return; + } + + /* + * Now wait for the CPU to actually offline. Without doing this that + * offlining may race with one or more of: + * + * - Onlining the CPU again. + * - Powering down the core if another VPE within it is offlined. + * - A sibling VPE entering a non-coherent state. + * + * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing + * with which we could race, so do nothing. + */ + if (cpu_death == CPU_DEATH_POWER) { + /* + * Wait for the core to enter a powered down or clock gated + * state, the latter happening when a JTAG probe is connected + * in which case the CPC will refuse to power down the core. + */ + do { + mips_cpc_lock_other(core); + stat = read_cpc_co_stat_conf(); + stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; + mips_cpc_unlock_other(); + } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && + stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && + stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); + + /* Indicate the core is powered off */ + bitmap_clear(core_power, core, 1); + } else if (cpu_has_mipsmt) { + /* + * Have a CPU with access to the offlined CPUs registers wait + * for its TC to halt. + */ + err = smp_call_function_single(cpu_death_sibling, + wait_for_sibling_halt, + (void *)cpu, 1); + if (err) + panic("Failed to call remote sibling CPU\n"); + } +} + +#endif /* CONFIG_HOTPLUG_CPU */ + static struct plat_smp_ops cps_smp_ops = { .smp_setup = cps_smp_setup, .prepare_cpus = cps_prepare_cpus, @@ -259,6 +406,10 @@ static struct plat_smp_ops cps_smp_ops = { .send_ipi_single = gic_send_ipi_single, .send_ipi_mask = gic_send_ipi_mask, .cpus_done = cps_cpus_done, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = cps_cpu_disable, + .cpu_die = cps_cpu_die, +#endif }; bool mips_cps_smp_in_use(void) -- cgit v1.1 From 5c399f6eb2c3911d94509f8f39bfd9aed6568f86 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 15:21:25 +0100 Subject: MIPS: smp-cps: prevent multi-core SMP with unsuitable CCA If the user or bootloader sets the CCA to a value which is not suited for multi-core SMP (ie. anything non-coherent) then limit the system to using only a single core and warn the user. Signed-off-by: Paul Burton --- arch/mips/kernel/smp-cps.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 3c30891..e2f78b3 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -88,11 +88,38 @@ static void __init cps_smp_setup(void) static void __init cps_prepare_cpus(unsigned int max_cpus) { - unsigned ncores, core_vpes, c; + unsigned ncores, core_vpes, c, cca; + bool cca_unsuitable; u32 *entry_code; mips_mt_set_cpuoptions(); + /* Detect whether the CCA is unsuited to multi-core SMP */ + cca = read_c0_config() & CONF_CM_CMASK; + switch (cca) { + case 0x4: /* CWBE */ + case 0x5: /* CWB */ + /* The CCA is coherent, multi-core is fine */ + cca_unsuitable = false; + break; + + default: + /* CCA is not coherent, multi-core is not usable */ + cca_unsuitable = true; + } + + /* Warn the user if the CCA prevents multi-core */ + ncores = mips_cm_numcores(); + if (cca_unsuitable && ncores > 1) { + pr_warn("Using only one core due to unsuitable CCA 0x%x\n", + cca); + + for_each_present_cpu(c) { + if (cpu_data[c].core) + set_cpu_present(c, false); + } + } + /* Patch the start of mips_cps_core_entry to provide the CM base */ entry_code = (u32 *)&mips_cps_core_entry; UASM_i_LA(&entry_code, 3, (long)mips_cm_base); @@ -100,7 +127,6 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) (void *)entry_code - (void *)&mips_cps_core_entry); /* Allocate core boot configuration structs */ - ncores = mips_cm_numcores(); mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), GFP_KERNEL); if (!mips_cps_core_bootcfg) { -- cgit v1.1 From 33b6866568acce808361fcf25c107c2c94f8eadb Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 15:58:45 +0100 Subject: MIPS: smp-cps: set a coherent default CCA This patch sets a default CCA suited for use with multi-core SMP on all current MIPS CPS based systems. It may still be overriden by the cca= argument on the kernel command line. Signed-off-by: Paul Burton --- arch/mips/kernel/smp-cps.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index e2f78b3..6b96fed 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -76,6 +76,9 @@ static void __init cps_smp_setup(void) __cpu_logical_map[v] = v; } + /* Set a coherent default CCA (CWB) */ + change_c0_config(CONF_CM_CMASK, 0x5); + /* Core 0 is powered up (we're running on it) */ bitmap_set(core_power, 0, 1); -- cgit v1.1 From 0155a06529d4c8425573596720829b6c578c05f2 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 16 Apr 2014 11:10:57 +0100 Subject: MIPS: smp-cps: duplicate core0 CCA on secondary cores Rather than hardcoding CCA=0x5 for secondary cores, re-use the CCA from the boot CPU. This allows overrides of the CCA using the cca= kernel parameter to take effect on all CPUs for consistency. Signed-off-by: Paul Burton --- arch/mips/kernel/cps-vec.S | 11 +++++++---- arch/mips/kernel/smp-cps.c | 8 +++++++- 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 1c865ae..6f4f739 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -45,10 +45,12 @@ LEAF(mips_cps_core_entry) /* - * These first 8 bytes will be patched by cps_smp_setup to load the - * base address of the CM GCRs into register v1. + * These first 12 bytes will be patched by cps_smp_setup to load the + * base address of the CM GCRs into register v1 and the CCA to use into + * register s0. */ .quad 0 + .word 0 /* Check whether we're here due to an NMI */ mfc0 k0, CP0_STATUS @@ -139,10 +141,11 @@ icache_done: add a0, a0, t0 dcache_done: - /* Set Kseg0 cacheable, coherent, write-back, write-allocate */ + /* Set Kseg0 CCA to that in s0 */ mfc0 t0, CP0_CONFIG ori t0, 0x7 - xori t0, 0x2 + xori t0, 0x7 + or t0, t0, s0 mtc0 t0, CP0_CONFIG ehb diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 6b96fed..9e21bdd 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -123,9 +123,15 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) } } - /* Patch the start of mips_cps_core_entry to provide the CM base */ + /* + * Patch the start of mips_cps_core_entry to provide: + * + * v0 = CM base address + * s0 = kseg0 CCA + */ entry_code = (u32 *)&mips_cps_core_entry; UASM_i_LA(&entry_code, 3, (long)mips_cm_base); + uasm_i_addiu(&entry_code, 16, 0, cca); dma_cache_wback_inv((unsigned long)&mips_cps_core_entry, (void *)entry_code - (void *)&mips_cps_core_entry); -- cgit v1.1 From da9f970fdd55a018ab97ec2d25653756407bdaaf Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 16:16:41 +0100 Subject: MIPS: cpuidle wait instruction state Defines a macro intended to allow trivial use of the regular MIPS wait instruction from cpuidle drivers, which may simply invoke the macro within their array of states. Signed-off-by: Paul Burton --- arch/mips/include/asm/idle.h | 14 ++++++++++++++ arch/mips/kernel/idle.c | 11 +++++++++++ 2 files changed, 25 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h index d192158..d9f932d 100644 --- a/arch/mips/include/asm/idle.h +++ b/arch/mips/include/asm/idle.h @@ -1,6 +1,7 @@ #ifndef __ASM_IDLE_H #define __ASM_IDLE_H +#include #include extern void (*cpu_wait)(void); @@ -20,4 +21,17 @@ static inline int address_is_in_r4k_wait_irqoff(unsigned long addr) addr < (unsigned long)__pastwait; } +extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index); + +#define MIPS_CPUIDLE_WAIT_STATE {\ + .enter = mips_cpuidle_wait_enter,\ + .exit_latency = 1,\ + .target_residency = 1,\ + .power_usage = UINT_MAX,\ + .flags = CPUIDLE_FLAG_TIME_VALID,\ + .name = "wait",\ + .desc = "MIPS wait",\ +} + #endif /* __ASM_IDLE_H */ diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 837ff27..2879e2e 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -250,3 +250,14 @@ void arch_cpu_idle(void) else local_irq_enable(); } + +#ifdef CONFIG_CPU_IDLE + +int mips_cpuidle_wait_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + arch_cpu_idle(); + return index; +} + +#endif -- cgit v1.1 From c095ebafeae302c8403d7ca5e78c7afe97a1afbb Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 16:24:22 +0100 Subject: MIPS: include cpuidle Kconfig menu This patch simply includes the cpuidle Kconfig entries in preparation for cpuidle drivers used on MIPS systems. Signed-off-by: Paul Burton --- arch/mips/Kconfig | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 860a1e9..a3f32e7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2678,12 +2678,16 @@ endmenu config MIPS_EXTERNAL_TIMER bool -if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER menu "CPU Power Management" + +if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER source "drivers/cpufreq/Kconfig" -endmenu endif +source "drivers/cpuidle/Kconfig" + +endmenu + source "net/Kconfig" source "drivers/Kconfig" -- cgit v1.1 From d050894435cdc78807e714a0148527542a583e87 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 14 Apr 2014 16:25:29 +0100 Subject: cpuidle: cpuidle-cps: add MIPS CPS cpuidle driver This patch adds a cpuidle driver for systems based around the MIPS Coherent Processing System (CPS) architecture. It supports four idle states: - The standard MIPS wait instruction. - The non-coherent wait, clock gated & power gated states exposed by the recently added pm-cps layer. The pm-cps layer is used to enter all the deep idle states. Since cores in the clock or power gated states cannot service interrupts, the gic_send_ipi_single function is modified to send a power up command for the appropriate core to the CPC in cases where the target CPU has marked itself potentially incoherent. Signed-off-by: Paul Burton --- arch/mips/kernel/smp-gic.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c index 3bb1f92..3b21a96 100644 --- a/arch/mips/kernel/smp-gic.c +++ b/arch/mips/kernel/smp-gic.c @@ -15,12 +15,14 @@ #include #include +#include #include void gic_send_ipi_single(int cpu, unsigned int action) { unsigned long flags; unsigned int intr; + unsigned int core = cpu_data[cpu].core; pr_debug("CPU%d: %s cpu %d action %u status %08x\n", smp_processor_id(), __func__, cpu, action, read_c0_status()); @@ -41,6 +43,15 @@ void gic_send_ipi_single(int cpu, unsigned int action) } gic_send_ipi(intr); + + if (mips_cpc_present() && (core != current_cpu_data.core)) { + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { + mips_cpc_lock_other(core); + write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); + mips_cpc_unlock_other(); + } + } + local_irq_restore(flags); } -- cgit v1.1 From 322014531e1fac4674b8eef67e4f80aca1e9f003 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 15 Apr 2014 12:24:23 +0100 Subject: MIPS: Malta: CPS SMP by default The CONFIG_MIPS_CPS SMP implementation should be able to handle all cases the CONFIG_MIPS_CMP implementation does, but without requiring bootloader assistance. It is also required in order to make use of features such as hotplug & cpuidle core power gating. Enable it by default for Malta configs that previously enabled the now deprecated CONFIG_MIPS_CMP, and disable the latter. The local version suffix "cmp" is removed rather than replaced with "cps" since there are other ways to tell that the CPS SMP implementation is in use (the "VPE topology" line in the boot log being one). Signed-off-by: Paul Burton --- arch/mips/configs/maltasmvp_defconfig | 3 +-- arch/mips/configs/maltasmvp_eva_defconfig | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig index 10ef3be..f8a3231 100644 --- a/arch/mips/configs/maltasmvp_defconfig +++ b/arch/mips/configs/maltasmvp_defconfig @@ -4,10 +4,9 @@ CONFIG_CPU_MIPS32_R2=y CONFIG_PAGE_SIZE_16KB=y CONFIG_MIPS_MT_SMP=y CONFIG_SCHED_SMT=y -CONFIG_MIPS_CMP=y +CONFIG_MIPS_CPS=y CONFIG_NR_CPUS=8 CONFIG_HZ_100=y -CONFIG_LOCALVERSION="cmp" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig index 2d3002c..c83338a 100644 --- a/arch/mips/configs/maltasmvp_eva_defconfig +++ b/arch/mips/configs/maltasmvp_eva_defconfig @@ -5,10 +5,9 @@ CONFIG_CPU_MIPS32_3_5_FEATURES=y CONFIG_PAGE_SIZE_16KB=y CONFIG_MIPS_MT_SMP=y CONFIG_SCHED_SMT=y -CONFIG_MIPS_CMP=y +CONFIG_MIPS_CPS=y CONFIG_NR_CPUS=8 CONFIG_HZ_100=y -CONFIG_LOCALVERSION="cmp" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y -- cgit v1.1