diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/Kconfig | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cpu_has_feature.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/module.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/stackprotector.h | 40 | ||||
-rw-r--r-- | arch/powerpc/include/asm/xics.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/eeh_driver.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/module_64.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 21 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable-radix.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb-radix.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/smp.c | 12 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/icp-opal.c | 35 |
19 files changed, 64 insertions, 100 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a8ee573..281f4f1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,7 +164,6 @@ config PPC select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select HAVE_ARCH_HARDENED_USERCOPY select HAVE_KERNEL_GZIP - select HAVE_CC_STACKPROTECTOR config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN @@ -484,6 +483,7 @@ config RELOCATABLE bool "Build a relocatable kernel" depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) select NONSTATIC_KERNEL + select MODULE_REL_CRCS if MODVERSIONS help This builds a kernel image that is capable of running at the location the kernel is loaded at. For ppc32, there is no any diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index b312b15..6e834ca 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature) { int i; +#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); +#endif #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_initialized) { diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index a34c764..233a7e8 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature) { int i; +#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); +#endif #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_initialized) { diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index cc12c61..5388551 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec } #endif -#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64) -#define ARCH_RELOCATES_KCRCTAB -#define reloc_start PHYSICAL_START -#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MODULE_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 0d4531a..dff7979 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -649,9 +649,10 @@ #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ -#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ +#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */ #define SRR1_WAKESYSERR 0x00300000 /* System error */ #define SRR1_WAKEEE 0x00200000 /* External interrupt */ +#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */ #define SRR1_WAKEMT 0x00280000 /* mtctrl */ #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h deleted file mode 100644 index 6720190..0000000 --- a/arch/powerpc/include/asm/stackprotector.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * GCC stack protector support. - * - * Stack protector works by putting predefined pattern at the start of - * the stack frame and verifying that it hasn't been overwritten when - * returning from the function. The pattern is called stack canary - * and gcc expects it to be defined by a global variable called - * "__stack_chk_guard" on PPC. This unfortunately means that on SMP - * we cannot have a different canary value per task. - */ - -#ifndef _ASM_STACKPROTECTOR_H -#define _ASM_STACKPROTECTOR_H - -#include <linux/random.h> -#include <linux/version.h> -#include <asm/reg.h> - -extern unsigned long __stack_chk_guard; - -/* - * Initialize the stackprotector canary value. - * - * NOTE: this must only be called from functions that never return, - * and it must always be inlined. - */ -static __always_inline void boot_init_stack_canary(void) -{ - unsigned long canary; - - /* Try to get a semi random initial value. */ - get_random_bytes(&canary, sizeof(canary)); - canary ^= mftb(); - canary ^= LINUX_VERSION_CODE; - - current->stack_canary = canary; - __stack_chk_guard = current->stack_canary; -} - -#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index f0b2385..e0b9e57 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; } #ifdef CONFIG_PPC_POWERNV extern int icp_opal_init(void); +extern void icp_opal_flush_interrupt(void); #else static inline int icp_opal_init(void) { return -ENODEV; } #endif diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 23f8082..f4c2b52 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -# -fstack-protector triggers protection checks in this code, -# but it is being used too early to link to meaningful stack_chk logic. -CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) - ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0601e6a..195a9fc 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -91,9 +91,6 @@ int main(void) DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); #endif -#ifdef CONFIG_CC_STACKPROTECTOR - DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); -#endif DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index d88573b..b948871 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) static void *__eeh_clear_pe_frozen_state(void *data, void *flag) { struct eeh_pe *pe = (struct eeh_pe *)data; - bool *clear_sw_state = flag; + bool clear_sw_state = *(bool *)flag; int i, rc = 1; for (i = 0; rc && i < 3; i++) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 5742dbd..3841d74 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -674,11 +674,7 @@ BEGIN_FTR_SECTION mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ END_FTR_SECTION_IFSET(CPU_FTR_SPE) #endif /* CONFIG_SPE */ -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) - lwz r0,TSK_STACK_CANARY(r2) - lis r4,__stack_chk_guard@ha - stw r0,__stack_chk_guard@l(r4) -#endif + lwz r0,_CCR(r1) mtcrf 0xFF,r0 /* r3-r12 are destroyed -- Cort */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index bb18071..0b0f896 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers, for (end = (void *)vers + size; vers < end; vers++) if (vers->name[0] == '.') { memmove(vers->name, vers->name+1, strlen(vers->name)); -#ifdef ARCH_RELOCATES_KCRCTAB - /* The TOC symbol has no CRC computed. To avoid CRC - * check failing, we must force it to the expected - * value (see CRC check in module.c). - */ - if (!strcmp(vers->name, "TOC.")) - vers->crc = -(unsigned long)reloc_start; -#endif } } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 04885ce..5dd056d 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -64,12 +64,6 @@ #include <linux/kprobes.h> #include <linux/kdebug.h> -#ifdef CONFIG_CC_STACKPROTECTOR -#include <linux/stackprotector.h> -unsigned long __stack_chk_guard __read_mostly; -EXPORT_SYMBOL(__stack_chk_guard); -#endif - /* Transactional Memory debug */ #ifdef TM_DEBUG_SW #define TM_DEBUG(x...) printk(KERN_INFO x) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ec47a93..ac83eb0 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void) cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); + if (!PHANDLE_VALID(cpu_pkg)) + return; + prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); prom.cpu = be32_to_cpu(rval); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 6fd30ac..62a50d6 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(debugger_fault_handler(regs))) goto bail; - /* On a kernel SLB miss we can only check for a valid exception entry */ - if (!user_mode(regs) && (address >= TASK_SIZE)) { + /* + * The kernel should never take an execute fault nor should it + * take a page fault to a kernel address. + */ + if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) { rc = SIGSEGV; goto bail; } @@ -391,20 +394,6 @@ good_area: if (is_exec) { /* - * An execution fault + no execute ? - * - * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we - * deliberately create NX mappings, and use the fault to do the - * cache flush. This is usually handled in hash_page_do_lazy_icache() - * but we could end up here if that races with a concurrent PTE - * update. In that case we need to fall through here to the VMA - * check below. - */ - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && - (regs->msr & SRR1_ISI_N_OR_G)) - goto bad_area; - - /* * Allow execution from readable areas if the MMU does not * provide separate controls over reading and executing. * diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index cfa53cc..34f1a0d 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, if (!pmdp) return -ENOMEM; if (map_page_size == PMD_SIZE) { - ptep = (pte_t *)pudp; + ptep = pmdp_ptep(pmdp); goto set_the_pte; } ptep = pte_alloc_kernel(pmdp, ea); @@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, } pmdp = pmd_offset(pudp, ea); if (map_page_size == PMD_SIZE) { - ptep = (pte_t *)pudp; + ptep = pmdp_ptep(pmdp); goto set_the_pte; } if (!pmd_present(*pmdp)) { diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 61b7911..952713d 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { __tlbiel_pid(pid, set, ric); } - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); - return; + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } static inline void _tlbie_pid(unsigned long pid, unsigned long ric) @@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); asm volatile("ptesync": : :"memory"); - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } static inline void _tlbie_va(unsigned long va, unsigned long pid, diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index c789258..eec0e8d 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void) wmask = SRR1_WAKEMASK_P8; idle_states = pnv_get_supported_cpuidle_states(); + /* We don't want to take decrementer interrupts while we are offline, - * so clear LPCR:PECE1. We keep PECE2 enabled. + * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9) + * enabled as to let IPIs in. */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); @@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void) * contains 0. */ if (((srr1 & wmask) == SRR1_WAKEEE) || + ((srr1 & wmask) == SRR1_WAKEHVI) || (local_paca->irq_happened & PACA_IRQ_EE)) { - icp_native_flush_interrupt(); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + icp_opal_flush_interrupt(); + else + icp_native_flush_interrupt(); } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); @@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void) if (srr1 && !generic_check_cpu_restart(cpu)) DBG("CPU%d Unexpected exit while offline !\n", cpu); } + + /* Re-enable decrementer interrupts */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); DBG("CPU%d coming online...\n", cpu); } diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index 60c5765..f9670ea 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data) { int hw_cpu = get_hard_smp_processor_id(cpu); + kvmppc_set_host_ipi(cpu, 1); opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); } static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) { - int hw_cpu = hard_smp_processor_id(); + int cpu = smp_processor_id(); - opal_int_set_mfrr(hw_cpu, 0xff); + kvmppc_set_host_ipi(cpu, 0); + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); return smp_ipi_demux(); } +/* + * Called when an interrupt is received on an off-line CPU to + * clear the interrupt, so that the CPU can go back to nap mode. + */ +void icp_opal_flush_interrupt(void) +{ + unsigned int xirr; + unsigned int vec; + + do { + xirr = icp_opal_get_xirr(); + vec = xirr & 0x00ffffff; + if (vec == XICS_IRQ_SPURIOUS) + break; + if (vec == XICS_IPI) { + /* Clear pending IPI */ + int cpu = smp_processor_id(); + kvmppc_set_host_ipi(cpu, 0); + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); + } else { + pr_err("XICS: hw interrupt 0x%x to offline cpu, " + "disabling\n", vec); + xics_mask_unknown_vec(vec); + } + + /* EOI the interrupt */ + } while (opal_int_eoi(xirr) > 0); +} + #endif /* CONFIG_SMP */ static const struct icp_ops icp_opal_ops = { |