From 202fb4ef81e3ec765c23bd1e6746a5c25b797d0e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 31 Jan 2018 12:12:20 +0000 Subject: arm64: spinlock: Fix theoretical trylock() A-B-A with LSE atomics If the spinlock "next" ticket wraps around between the initial LDR and the cmpxchg in the LSE version of spin_trylock, then we can erroneously think that we have successfuly acquired the lock because we only check whether the next ticket return by the cmpxchg is equal to the owner ticket in our updated lock word. This patch fixes the issue by performing a full 32-bit check of the lock word when trying to determine whether or not the CASA instruction updated memory. Reported-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/spinlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index fdb827c..ebdae15 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -87,8 +87,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) " cbnz %w1, 1f\n" " add %w1, %w0, %3\n" " casa %w0, %w1, %2\n" - " and %w1, %w1, #0xffff\n" - " eor %w1, %w1, %w0, lsr #16\n" + " sub %w1, %w1, %3\n" + " eor %w1, %w1, %w0\n" "1:") : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) : "I" (1 << TICKET_SHIFT) -- cgit v1.1 From 3060e9f0d14b46eb1949aaf9a31519ef75e59fda Mon Sep 17 00:00:00 2001 From: Shanker Donthineni Date: Mon, 29 Jan 2018 11:59:52 +0000 Subject: arm64: Add software workaround for Falkor erratum 1041 The ARM architecture defines the memory locations that are permitted to be accessed as the result of a speculative instruction fetch from an exception level for which all stages of translation are disabled. Specifically, the core is permitted to speculatively fetch from the 4KB region containing the current program counter 4K and next 4K. When translation is changed from enabled to disabled for the running exception level (SCTLR_ELn[M] changed from a value of 1 to 0), the Falkor core may errantly speculatively access memory locations outside of the 4KB region permitted by the architecture. The errant memory access may lead to one of the following unexpected behaviors. 1) A System Error Interrupt (SEI) being raised by the Falkor core due to the errant memory access attempting to access a region of memory that is protected by a slave-side memory protection unit. 2) Unpredictable device behavior due to a speculative read from device memory. This behavior may only occur if the instruction cache is disabled prior to or coincident with translation being changed from enabled to disabled. The conditions leading to this erratum will not occur when either of the following occur: 1) A higher exception level disables translation of a lower exception level (e.g. EL2 changing SCTLR_EL1[M] from a value of 1 to 0). 2) An exception level disabling its stage-1 translation if its stage-2 translation is enabled (e.g. EL1 changing SCTLR_EL1[M] from a value of 1 to 0 when HCR_EL2[VM] has a value of 1). To avoid the errant behavior, software must execute an ISB immediately prior to executing the MSR that will change SCTLR_ELn[M] from 1 to 0. Signed-off-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- Documentation/arm64/silicon-errata.txt | 1 + arch/arm64/Kconfig | 12 +++++++++++- arch/arm64/include/asm/assembler.h | 10 ++++++++++ arch/arm64/kernel/cpu-reset.S | 1 + arch/arm64/kernel/efi-entry.S | 2 ++ arch/arm64/kernel/head.S | 1 + arch/arm64/kernel/relocate_kernel.S | 1 + arch/arm64/kvm/hyp-init.S | 1 + 8 files changed, 28 insertions(+), 1 deletion(-) diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index b9d93e9..c1d520d 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -75,3 +75,4 @@ stable kernels. | Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | +| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1d51c8e..b488076 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -550,7 +550,6 @@ config QCOM_QDF2400_ERRATUM_0065 If unsure, say Y. - config SOCIONEXT_SYNQUACER_PREITS bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" default y @@ -569,6 +568,17 @@ config HISILICON_ERRATUM_161600802 a 128kB offset to be applied to the target address in this commands. If unsure, say Y. + +config QCOM_FALKOR_ERRATUM_E1041 + bool "Falkor E1041: Speculative instruction fetches might cause errant memory access" + default y + help + Falkor CPU may speculatively fetch instructions from an improper + memory location when MMU translation is changed from SCTLR_ELn[M]=1 + to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem. + + If unsure, say Y. + endmenu diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 794fe81..3873dd7 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -523,4 +523,14 @@ alternative_endif #endif .endm +/** + * Errata workaround prior to disable MMU. Insert an ISB immediately prior + * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. + */ + .macro pre_disable_mmu_workaround +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041 + isb +#endif + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 65f42d2..2a752cb 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart) mrs x12, sctlr_el1 ldr x13, =SCTLR_ELx_FLAGS bic x12, x12, x13 + pre_disable_mmu_workaround msr sctlr_el1, x12 isb diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 4e6ad35..6b9736c 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -96,6 +96,7 @@ ENTRY(entry) mrs x0, sctlr_el2 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C + pre_disable_mmu_workaround msr sctlr_el2, x0 isb b 2f @@ -103,6 +104,7 @@ ENTRY(entry) mrs x0, sctlr_el1 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C + pre_disable_mmu_workaround msr sctlr_el1, x0 isb 2: diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c3b241b..ba3ab047 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -849,6 +849,7 @@ __primary_switch: * to take into account by discarding the current kernel mapping and * creating a new one. */ + pre_disable_mmu_workaround msr sctlr_el1, x20 // disable the MMU isb bl __create_page_tables // recreate kernel mapping diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index ce704a4..f407e42 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel) mrs x0, sctlr_el2 ldr x1, =SCTLR_ELx_FLAGS bic x0, x0, x1 + pre_disable_mmu_workaround msr sctlr_el2, x0 isb 1: diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 8a00de1..e086c6e 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -153,6 +153,7 @@ reset: mrs x5, sctlr_el2 ldr x6, =SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc + pre_disable_mmu_workaround msr sctlr_el2, x5 isb -- cgit v1.1 From 41acec624087b1268a15f414cf7d573deebafeec Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 11:59:53 +0000 Subject: arm64: kpti: Make use of nG dependent on arm64_kernel_unmapped_at_el0() To allow systems which do not require kpti to continue running with global kernel mappings (which appears to be a requirement for Cavium ThunderX due to a CPU erratum), make the use of nG in the kernel page tables dependent on arm64_kernel_unmapped_at_el0(), which is resolved at runtime. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kernel-pgtable.h | 12 ++---------- arch/arm64/include/asm/pgtable-prot.h | 30 ++++++++++++++---------------- 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 82386e8..a780f67 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -123,16 +123,8 @@ /* * Initial memory map attributes. */ -#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) - -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG) -#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG) -#else -#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS -#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS -#endif +#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) #if ARM64_SWAPPER_USES_SECTION_MAPS #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 22a9268..2db84df 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -37,13 +37,11 @@ #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG) -#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG) -#else -#define PROT_DEFAULT _PROT_DEFAULT -#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT -#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) + +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) @@ -55,22 +53,22 @@ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) -#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) -#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG) +#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) +#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT -#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) +#define PAGE_KERNEL __pgprot(PROT_NORMAL) +#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) +#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) +#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) +#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) -#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) -#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) +#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -- cgit v1.1 From 4e602056559633303d7e5bb2ff624778ca248f68 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 11:59:54 +0000 Subject: arm64: mm: Permit transitioning from Global to Non-Global without BBM Break-before-make is not needed when transitioning from Global to Non-Global mappings, provided that the contiguous hint is not being used. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b44992e..fc7902b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -118,6 +118,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new) if ((old | new) & PTE_CONT) return false; + /* Transitioning from Global to Non-Global is safe */ + if (((old ^ new) == PTE_NG) && (new & PTE_NG)) + return true; + return ((old ^ new) & ~mask) == 0; } -- cgit v1.1 From f992b4dfd58be07e31a42bc940a53b3e4b282616 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 6 Feb 2018 22:22:50 +0000 Subject: arm64: kpti: Add ->enable callback to remap swapper using nG mappings Defaulting to global mappings for kernel space is generally good for performance and appears to be necessary for Cavium ThunderX. If we subsequently decide that we need to enable kpti, then we need to rewrite our existing page table entries to be non-global. This is fiddly, and made worse by the possible use of contiguous mappings, which require a strict break-before-make sequence. Since the enable callback runs on each online CPU from stop_machine context, we can have all CPUs enter the idmap, where secondaries can wait for the primary CPU to rewrite swapper with its MMU off. It's all fairly horrible, but at least it only runs once. Tested-by: Marc Zyngier Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 10 ++ arch/arm64/kernel/cpufeature.c | 25 +++++ arch/arm64/mm/proc.S | 204 +++++++++++++++++++++++++++++++++++-- 3 files changed, 231 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 3873dd7..5aae7a6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -523,6 +523,16 @@ alternative_endif #endif .endm + .macro pte_to_phys, phys, pte +#ifdef CONFIG_ARM64_PA_BITS_52 + ubfiz \phys, \pte, #(48 - 16 - 12), #16 + bfxil \phys, \pte, #16, #32 + lsl \phys, \phys, #16 +#else + and \phys, \pte, #PTE_ADDR_MASK +#endif + .endm + /** * Errata workaround prior to disable MMU. Insert an ISB immediately prior * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4b6f905..cd9f469 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -880,6 +880,30 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, ID_AA64PFR0_CSV3_SHIFT); } +static int kpti_install_ng_mappings(void *__unused) +{ + typedef void (kpti_remap_fn)(int, int, phys_addr_t); + extern kpti_remap_fn idmap_kpti_install_ng_mappings; + kpti_remap_fn *remap_fn; + + static bool kpti_applied = false; + int cpu = smp_processor_id(); + + if (kpti_applied) + return 0; + + remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); + + cpu_install_idmap(); + remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); + cpu_uninstall_idmap(); + + if (!cpu) + kpti_applied = true; + + return 0; +} + static int __init parse_kpti(char *str) { bool enabled; @@ -1003,6 +1027,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_UNMAP_KERNEL_AT_EL0, .def_scope = SCOPE_SYSTEM, .matches = unmap_kernel_at_el0, + .enable = kpti_install_ng_mappings, }, #endif { diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 9f177aa..ab8660e 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -166,6 +166,17 @@ ENTRY(cpu_do_switch_mm) ENDPROC(cpu_do_switch_mm) .pushsection ".idmap.text", "ax" + +.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 + adrp \tmp1, empty_zero_page + phys_to_ttbr \tmp1, \tmp2 + msr ttbr1_el1, \tmp2 + isb + tlbi vmalle1 + dsb nsh + isb +.endm + /* * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) * @@ -175,14 +186,7 @@ ENDPROC(cpu_do_switch_mm) ENTRY(idmap_cpu_replace_ttbr1) save_and_disable_daif flags=x2 - adrp x1, empty_zero_page - phys_to_ttbr x1, x3 - msr ttbr1_el1, x3 - isb - - tlbi vmalle1 - dsb nsh - isb + __idmap_cpu_set_reserved_ttbr1 x1, x3 phys_to_ttbr x0, x3 msr ttbr1_el1, x3 @@ -194,6 +198,190 @@ ENTRY(idmap_cpu_replace_ttbr1) ENDPROC(idmap_cpu_replace_ttbr1) .popsection +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + .pushsection ".idmap.text", "ax" + + .macro __idmap_kpti_get_pgtable_ent, type + dc cvac, cur_\()\type\()p // Ensure any existing dirty + dmb sy // lines are written back before + ldr \type, [cur_\()\type\()p] // loading the entry + tbz \type, #0, next_\()\type // Skip invalid entries + .endm + + .macro __idmap_kpti_put_pgtable_ent_ng, type + orr \type, \type, #PTE_NG // Same bit for blocks and pages + str \type, [cur_\()\type\()p] // Update the entry and ensure it + dc civac, cur_\()\type\()p // is visible to all CPUs. + .endm + +/* + * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) + * + * Called exactly once from stop_machine context by each CPU found during boot. + */ +__idmap_kpti_flag: + .long 1 +ENTRY(idmap_kpti_install_ng_mappings) + cpu .req w0 + num_cpus .req w1 + swapper_pa .req x2 + swapper_ttb .req x3 + flag_ptr .req x4 + cur_pgdp .req x5 + end_pgdp .req x6 + pgd .req x7 + cur_pudp .req x8 + end_pudp .req x9 + pud .req x10 + cur_pmdp .req x11 + end_pmdp .req x12 + pmd .req x13 + cur_ptep .req x14 + end_ptep .req x15 + pte .req x16 + + mrs swapper_ttb, ttbr1_el1 + adr flag_ptr, __idmap_kpti_flag + + cbnz cpu, __idmap_kpti_secondary + + /* We're the boot CPU. Wait for the others to catch up */ + sevl +1: wfe + ldaxr w18, [flag_ptr] + eor w18, w18, num_cpus + cbnz w18, 1b + + /* We need to walk swapper, so turn off the MMU. */ + pre_disable_mmu_workaround + mrs x18, sctlr_el1 + bic x18, x18, #SCTLR_ELx_M + msr sctlr_el1, x18 + isb + + /* Everybody is enjoying the idmap, so we can rewrite swapper. */ + /* PGD */ + mov cur_pgdp, swapper_pa + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) +do_pgd: __idmap_kpti_get_pgtable_ent pgd + tbnz pgd, #1, walk_puds + __idmap_kpti_put_pgtable_ent_ng pgd +next_pgd: + add cur_pgdp, cur_pgdp, #8 + cmp cur_pgdp, end_pgdp + b.ne do_pgd + + /* Publish the updated tables and nuke all the TLBs */ + dsb sy + tlbi vmalle1is + dsb ish + isb + + /* We're done: fire up the MMU again */ + mrs x18, sctlr_el1 + orr x18, x18, #SCTLR_ELx_M + msr sctlr_el1, x18 + isb + + /* Set the flag to zero to indicate that we're all done */ + str wzr, [flag_ptr] + ret + + /* PUD */ +walk_puds: + .if CONFIG_PGTABLE_LEVELS > 3 + pte_to_phys cur_pudp, pgd + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) +do_pud: __idmap_kpti_get_pgtable_ent pud + tbnz pud, #1, walk_pmds + __idmap_kpti_put_pgtable_ent_ng pud +next_pud: + add cur_pudp, cur_pudp, 8 + cmp cur_pudp, end_pudp + b.ne do_pud + b next_pgd + .else /* CONFIG_PGTABLE_LEVELS <= 3 */ + mov pud, pgd + b walk_pmds +next_pud: + b next_pgd + .endif + + /* PMD */ +walk_pmds: + .if CONFIG_PGTABLE_LEVELS > 2 + pte_to_phys cur_pmdp, pud + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) +do_pmd: __idmap_kpti_get_pgtable_ent pmd + tbnz pmd, #1, walk_ptes + __idmap_kpti_put_pgtable_ent_ng pmd +next_pmd: + add cur_pmdp, cur_pmdp, #8 + cmp cur_pmdp, end_pmdp + b.ne do_pmd + b next_pud + .else /* CONFIG_PGTABLE_LEVELS <= 2 */ + mov pmd, pud + b walk_ptes +next_pmd: + b next_pud + .endif + + /* PTE */ +walk_ptes: + pte_to_phys cur_ptep, pmd + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) +do_pte: __idmap_kpti_get_pgtable_ent pte + __idmap_kpti_put_pgtable_ent_ng pte +next_pte: + add cur_ptep, cur_ptep, #8 + cmp cur_ptep, end_ptep + b.ne do_pte + b next_pmd + + /* Secondary CPUs end up here */ +__idmap_kpti_secondary: + /* Uninstall swapper before surgery begins */ + __idmap_cpu_set_reserved_ttbr1 x18, x17 + + /* Increment the flag to let the boot CPU we're ready */ +1: ldxr w18, [flag_ptr] + add w18, w18, #1 + stxr w17, w18, [flag_ptr] + cbnz w17, 1b + + /* Wait for the boot CPU to finish messing around with swapper */ + sevl +1: wfe + ldxr w18, [flag_ptr] + cbnz w18, 1b + + /* All done, act like nothing happened */ + msr ttbr1_el1, swapper_ttb + isb + ret + + .unreq cpu + .unreq num_cpus + .unreq swapper_pa + .unreq swapper_ttb + .unreq flag_ptr + .unreq cur_pgdp + .unreq end_pgdp + .unreq pgd + .unreq cur_pudp + .unreq end_pudp + .unreq pud + .unreq cur_pmdp + .unreq end_pmdp + .unreq pmd + .unreq cur_ptep + .unreq end_ptep + .unreq pte +ENDPROC(idmap_kpti_install_ng_mappings) + .popsection +#endif + /* * __cpu_setup * -- cgit v1.1 From 6dc52b15c4a48052ade2529d639eee401d76e469 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 29 Jan 2018 11:59:56 +0000 Subject: arm64: Force KPTI to be disabled on Cavium ThunderX Cavium ThunderX's erratum 27456 results in a corruption of icache entries that are loaded from memory that is mapped as non-global (i.e. ASID-tagged). As KPTI is based on memory being mapped non-global, let's prevent it from kicking in if this erratum is detected. Signed-off-by: Marc Zyngier [will: Update comment] Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cd9f469..0ff8ab7 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -855,12 +855,23 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, int __unused) { + char const *str = "command line option"; u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); - /* Forced on command line? */ + /* + * For reasons that aren't entirely clear, enabling KPTI on Cavium + * ThunderX leads to apparent I-cache corruption of kernel text, which + * ends as well as you might imagine. Don't even try. + */ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { + str = "ARM64_WORKAROUND_CAVIUM_27456"; + __kpti_forced = -1; + } + + /* Forced? */ if (__kpti_forced) { - pr_info_once("kernel page table isolation forced %s by command line option\n", - __kpti_forced > 0 ? "ON" : "OFF"); + pr_info_once("kernel page table isolation forced %s by %s\n", + __kpti_forced > 0 ? "ON" : "OFF", str); return __kpti_forced > 0; } -- cgit v1.1 From fa0465fc07c2f9f47bd1198ab368d341bd7c7e4e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 11:59:57 +0000 Subject: arm64: assembler: Change order of macro arguments in phys_to_ttbr Since AArch64 assembly instructions take the destination register as their first operand, do the same thing for the phys_to_ttbr macro. Acked-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/kernel/head.S | 4 ++-- arch/arm64/kernel/hibernate-asm.S | 4 ++-- arch/arm64/kvm/hyp-init.S | 2 +- arch/arm64/mm/proc.S | 6 +++--- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5aae7a6..47555f6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -514,7 +514,7 @@ alternative_endif * phys: physical address, preserved * ttbr: returns the TTBR value */ - .macro phys_to_ttbr, phys, ttbr + .macro phys_to_ttbr, ttbr, phys #ifdef CONFIG_ARM64_PA_BITS_52 orr \ttbr, \phys, \phys, lsr #46 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ba3ab047..341649c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -776,8 +776,8 @@ ENTRY(__enable_mmu) update_early_cpu_boot_status 0, x1, x2 adrp x1, idmap_pg_dir adrp x2, swapper_pg_dir - phys_to_ttbr x1, x3 - phys_to_ttbr x2, x4 + phys_to_ttbr x3, x1 + phys_to_ttbr x4, x2 msr ttbr0_el1, x3 // load TTBR0 msr ttbr1_el1, x4 // load TTBR1 isb diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index 84f5d52..dd14ab8 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -34,12 +34,12 @@ * each stage of the walk. */ .macro break_before_make_ttbr_switch zero_page, page_table, tmp - phys_to_ttbr \zero_page, \tmp + phys_to_ttbr \tmp, \zero_page msr ttbr1_el1, \tmp isb tlbi vmalle1 dsb nsh - phys_to_ttbr \page_table, \tmp + phys_to_ttbr \tmp, \page_table msr ttbr1_el1, \tmp isb .endm diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index e086c6e..5aa9ccf 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -63,7 +63,7 @@ __do_hyp_init: cmp x0, #HVC_STUB_HCALL_NR b.lo __kvm_handle_stub_hvc - phys_to_ttbr x0, x4 + phys_to_ttbr x4, x0 msr ttbr0_el2, x4 mrs x4, tcr_el1 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index ab8660e..cfd22ba 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -153,7 +153,7 @@ ENDPROC(cpu_do_resume) ENTRY(cpu_do_switch_mm) mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id - phys_to_ttbr x0, x3 + phys_to_ttbr x3, x0 #ifdef CONFIG_ARM64_SW_TTBR0_PAN bfi x3, x1, #48, #16 // set the ASID field in TTBR0 #endif @@ -169,7 +169,7 @@ ENDPROC(cpu_do_switch_mm) .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, empty_zero_page - phys_to_ttbr \tmp1, \tmp2 + phys_to_ttbr \tmp2, \tmp1 msr ttbr1_el1, \tmp2 isb tlbi vmalle1 @@ -188,7 +188,7 @@ ENTRY(idmap_cpu_replace_ttbr1) __idmap_cpu_set_reserved_ttbr1 x1, x3 - phys_to_ttbr x0, x3 + phys_to_ttbr x3, x0 msr ttbr1_el1, x3 isb -- cgit v1.1 From f167211a93ac41a65b7a0ab79d4479d0fb58c4f1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 11:59:58 +0000 Subject: arm64: entry: Reword comment about post_ttbr_update_workaround We don't fully understand the Cavium ThunderX erratum, but it appears that mapping the kernel as nG can lead to horrible consequences such as attempting to execute userspace from kernel context. Since kpti isn't enabled for these CPUs anyway, simplify the comment justifying the lack of post_ttbr_update_workaround in the exception trampoline. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b34e717..9b8635d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -1013,16 +1013,9 @@ alternative_else_nop_endif orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* - * We avoid running the post_ttbr_update_workaround here because the - * user and kernel ASIDs don't have conflicting mappings, so any - * "blessing" as described in: - * - * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com - * - * will not hurt correctness. Whilst this may partially defeat the - * point of using split ASIDs in the first place, it avoids - * the hit of invalidating the entire I-cache on every return to - * userspace. + * We avoid running the post_ttbr_update_workaround here because + * it's only needed by Cavium ThunderX, which requires KPTI to be + * disabled. */ .endm -- cgit v1.1 From 79ddab3b05ca903f552fd5bf920efa055210322b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 11:59:59 +0000 Subject: arm64: assembler: Align phys_to_pte with pte_to_phys pte_to_phys lives in assembler.h and takes its destination register as the first argument. Move phys_to_pte out of head.S to sit with its counterpart and rejig it to follow the same calling convention. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 13 +++++++++++++ arch/arm64/kernel/head.S | 24 ++---------------------- 2 files changed, 15 insertions(+), 22 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 47555f6..544878a 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -523,6 +523,19 @@ alternative_endif #endif .endm + .macro phys_to_pte, pte, phys +#ifdef CONFIG_ARM64_PA_BITS_52 + /* + * We assume \phys is 64K aligned and this is guaranteed by only + * supporting this configuration with 64K pages. + */ + orr \pte, \phys, \phys, lsr #36 + and \pte, \pte, #PTE_ADDR_MASK +#else + mov \pte, \phys +#endif + .endm + .macro pte_to_phys, phys, pte #ifdef CONFIG_ARM64_PA_BITS_52 ubfiz \phys, \pte, #(48 - 16 - 12), #16 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 341649c..25f5b2e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -148,26 +148,6 @@ preserve_boot_args: ENDPROC(preserve_boot_args) /* - * Macro to arrange a physical address in a page table entry, taking care of - * 52-bit addresses. - * - * Preserves: phys - * Returns: pte - */ - .macro phys_to_pte, phys, pte -#ifdef CONFIG_ARM64_PA_BITS_52 - /* - * We assume \phys is 64K aligned and this is guaranteed by only - * supporting this configuration with 64K pages. - */ - orr \pte, \phys, \phys, lsr #36 - and \pte, \pte, #PTE_ADDR_MASK -#else - mov \pte, \phys -#endif - .endm - -/* * Macro to create a table entry to the next page. * * tbl: page table address @@ -181,7 +161,7 @@ ENDPROC(preserve_boot_args) */ .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 add \tmp1, \tbl, #PAGE_SIZE - phys_to_pte \tmp1, \tmp2 + phys_to_pte \tmp2, \tmp1 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type lsr \tmp1, \virt, #\shift sub \ptrs, \ptrs, #1 @@ -207,7 +187,7 @@ ENDPROC(preserve_boot_args) * Returns: rtbl */ .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 -.Lpe\@: phys_to_pte \rtbl, \tmp1 +.Lpe\@: phys_to_pte \tmp1, \rtbl orr \tmp1, \tmp1, \flags // tmp1 = table entry str \tmp1, [\tbl, \index, lsl #3] add \rtbl, \rtbl, \inc // rtbl = pa next level -- cgit v1.1 From 439e70e27a51fe374806f460848066b237b0c10b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 12:00:00 +0000 Subject: arm64: idmap: Use "awx" flags for .idmap.text .pushsection directives The identity map is mapped as both writeable and executable by the SWAPPER_MM_MMUFLAGS and this is relied upon by the kpti code to manage a synchronisation flag. Update the .pushsection flags to reflect the actual mapping attributes. Reported-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpu-reset.S | 2 +- arch/arm64/kernel/head.S | 2 +- arch/arm64/kernel/sleep.S | 2 +- arch/arm64/mm/proc.S | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 2a752cb..8021b46 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -16,7 +16,7 @@ #include .text -.pushsection .idmap.text, "ax" +.pushsection .idmap.text, "awx" /* * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 25f5b2e..2b6b8b2 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -455,7 +455,7 @@ ENDPROC(__primary_switched) * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ - .section ".idmap.text","ax" + .section ".idmap.text","awx" ENTRY(kimage_vaddr) .quad _text - TEXT_OFFSET diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 10dd16d..bebec8e 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter) ret ENDPROC(__cpu_suspend_enter) - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly bl __cpu_setup diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index cfd22ba..71baed7 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -90,7 +90,7 @@ ENDPROC(cpu_do_suspend) * * x0: Address of context pointer */ - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] @@ -165,7 +165,7 @@ ENTRY(cpu_do_switch_mm) b post_ttbr_update_workaround // Back to C code... ENDPROC(cpu_do_switch_mm) - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, empty_zero_page @@ -199,7 +199,7 @@ ENDPROC(idmap_cpu_replace_ttbr1) .popsection #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" .macro __idmap_kpti_get_pgtable_ent, type dc cvac, cur_\()\type\()p // Ensure any existing dirty @@ -388,7 +388,7 @@ ENDPROC(idmap_kpti_install_ng_mappings) * Initialise the processor for turning the MMU on. Return in x0 the * value of the SCTLR_EL1 register. */ - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh -- cgit v1.1 From 669474e772b952b14f4de4845a1558fd4c0414a4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:16 +0000 Subject: arm64: barrier: Add CSDB macros to control data-value prediction For CPUs capable of data value prediction, CSDB waits for any outstanding predictions to architecturally resolve before allowing speculative execution to continue. Provide macros to expose it to the arch code. Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 7 +++++++ arch/arm64/include/asm/barrier.h | 1 + 2 files changed, 8 insertions(+) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 544878a..1f44b42 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -116,6 +116,13 @@ .endm /* + * Value prediction barrier + */ + .macro csdb + hint #20 + .endm + +/* * NOP sequence */ .macro nops, num diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 77651c4..c0a846d 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -32,6 +32,7 @@ #define dsb(opt) asm volatile("dsb " #opt : : : "memory") #define psb_csync() asm volatile("hint #17" : : : "memory") +#define csdb() asm volatile("hint #20" : : : "memory") #define mb() dsb(sy) #define rmb() dsb(ld) -- cgit v1.1 From 022620eed3d0bc4bf2027326f599f5ad71c2ea3f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 5 Feb 2018 15:34:17 +0000 Subject: arm64: Implement array_index_mask_nospec() Provide an optimised, assembly implementation of array_index_mask_nospec() for arm64 so that the compiler is not in a position to transform the code in ways which affect its ability to inhibit speculation (e.g. by introducing conditional branches). This is similar to the sequence used by x86, modulo architectural differences in the carry/borrow flags. Reviewed-by: Mark Rutland Signed-off-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/barrier.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index c0a846d..f11518a 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -41,6 +41,27 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) +/* + * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz + * and 0 otherwise. + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( + " cmp %1, %2\n" + " sbc %0, xzr, xzr\n" + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc"); + + csdb(); + return mask; +} + #define __smp_mb() dmb(ish) #define __smp_rmb() dmb(ishld) #define __smp_wmb() dmb(ishst) -- cgit v1.1 From 51369e398d0d33e8f524314e672b07e8cf870e79 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 5 Feb 2018 15:34:18 +0000 Subject: arm64: Make USER_DS an inclusive limit Currently, USER_DS represents an exclusive limit while KERNEL_DS is inclusive. In order to do some clever trickery for speculation-safe masking, we need them both to behave equivalently - there aren't enough bits to make KERNEL_DS exclusive, so we have precisely one option. This also happens to correct a longstanding false negative for a range ending on the very top byte of kernel memory. Mark Rutland points out that we've actually got the semantics of addresses vs. segments muddled up in most of the places we need to amend, so shuffle the {USER,KERNEL}_DS definitions around such that we can correct those properly instead of just pasting "-1"s everywhere. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 3 +++ arch/arm64/include/asm/uaccess.h | 45 ++++++++++++++++++++++---------------- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/mm/fault.c | 4 ++-- 4 files changed, 33 insertions(+), 23 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index cee4ae2..dfefbf3 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -21,6 +21,9 @@ #define TASK_SIZE_64 (UL(1) << VA_BITS) +#define KERNEL_DS UL(-1) +#define USER_DS (TASK_SIZE_64 - 1) + #ifndef __ASSEMBLY__ /* diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 59fda52..f2fc026 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -35,10 +35,7 @@ #include #include -#define KERNEL_DS (-1UL) #define get_ds() (KERNEL_DS) - -#define USER_DS TASK_SIZE_64 #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) @@ -66,22 +63,32 @@ static inline void set_fs(mm_segment_t fs) * Returns 1 if the range is valid, 0 otherwise. * * This is equivalent to the following test: - * (u65)addr + (u65)size <= current->addr_limit - * - * This needs 65-bit arithmetic. + * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 */ -#define __range_ok(addr, size) \ -({ \ - unsigned long __addr = (unsigned long)(addr); \ - unsigned long flag, roksum; \ - __chk_user_ptr(addr); \ - asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ - : "=&r" (flag), "=&r" (roksum) \ - : "1" (__addr), "Ir" (size), \ - "r" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; \ -}) +static inline unsigned long __range_ok(unsigned long addr, unsigned long size) +{ + unsigned long limit = current_thread_info()->addr_limit; + + __chk_user_ptr(addr); + asm volatile( + // A + B <= C + 1 for all A,B,C, in four easy steps: + // 1: X = A + B; X' = X % 2^64 + " adds %0, %0, %2\n" + // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 + " csel %1, xzr, %1, hi\n" + // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' + // to compensate for the carry flag being set in step 4. For + // X > 2^64, X' merely has to remain nonzero, which it does. + " csinv %0, %0, xzr, cc\n" + // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 + // comes from the carry in being clear. Otherwise, we are + // testing X' - C == 0, subject to the previous adjustments. + " sbcs xzr, %0, %1\n" + " cset %0, ls\n" + : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); + + return addr; +} /* * When dealing with data aborts, watchpoints, or instruction traps we may end @@ -90,7 +97,7 @@ static inline void set_fs(mm_segment_t fs) */ #define untagged_addr(addr) sign_extend64(addr, 55) -#define access_ok(type, addr, size) __range_ok(addr, size) +#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) #define user_addr_max get_fs #define _ASM_EXTABLE(from, to) \ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9b8635d..17c15f7 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -167,10 +167,10 @@ alternative_else_nop_endif .else add x21, sp, #S_FRAME_SIZE get_thread_info tsk - /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ + /* Save the task's original addr_limit and set USER_DS */ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [sp, #S_ORIG_ADDR_LIMIT] - mov x20, #TASK_SIZE_64 + mov x20, #USER_DS str x20, [tsk, #TSK_TI_ADDR_LIMIT] /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ .endif /* \el == 0 */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0e671dd..af530eb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -240,7 +240,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, if (fsc_type == ESR_ELx_FSC_PERM) return true; - if (addr < USER_DS && system_uses_ttbr0_pan()) + if (addr < TASK_SIZE && system_uses_ttbr0_pan()) return fsc_type == ESR_ELx_FSC_FAULT && (regs->pstate & PSR_PAN_BIT); @@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, mm_flags |= FAULT_FLAG_WRITE; } - if (addr < USER_DS && is_permission_fault(esr, regs, addr)) { + if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) { /* regs->orig_addr_limit may be 0 if we entered from EL0 */ if (regs->orig_addr_limit == KERNEL_DS) die("Accessing user space memory with fs=KERNEL_DS", regs, esr); -- cgit v1.1 From 4d8efc2d5ee4c9ccfeb29ee8afd47a8660d0c0ce Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 5 Feb 2018 15:34:19 +0000 Subject: arm64: Use pointer masking to limit uaccess speculation Similarly to x86, mitigate speculation past an access_ok() check by masking the pointer against the address limit before use. Even if we don't expect speculative writes per se, it is plausible that a CPU may still speculate at least as far as fetching a cache line for writing, hence we also harden put_user() and clear_user() for peace of mind. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index f2fc026..e49fe72 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -228,6 +228,26 @@ static inline void uaccess_enable_not_uao(void) } /* + * Sanitise a uaccess pointer such that it becomes NULL if above the + * current addr_limit. + */ +#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) +static inline void __user *__uaccess_mask_ptr(const void __user *ptr) +{ + void __user *safe_ptr; + + asm volatile( + " bics xzr, %1, %2\n" + " csel %0, %1, xzr, eq\n" + : "=&r" (safe_ptr) + : "r" (ptr), "r" (current_thread_info()->addr_limit) + : "cc"); + + csdb(); + return safe_ptr; +} + +/* * The "__xxx" versions of the user access functions do not verify the address * space - it must have been done previously with a separate "access_ok()" * call. @@ -297,7 +317,7 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ - __get_user((x), __p) : \ + __p = uaccess_mask_ptr(__p), __get_user((x), __p) : \ ((x) = 0, -EFAULT); \ }) @@ -361,7 +381,7 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ - __put_user((x), __p) : \ + __p = uaccess_mask_ptr(__p), __put_user((x), __p) : \ -EFAULT; \ }) @@ -377,7 +397,7 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user(to, n); + n = __clear_user(__uaccess_mask_ptr(to), n); return n; } -- cgit v1.1 From 6314d90e64936c584f300a52ef173603fb2461b5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:20 +0000 Subject: arm64: entry: Ensure branch through syscall table is bounded under speculation In a similar manner to array_index_mask_nospec, this patch introduces an assembly macro (mask_nospec64) which can be used to bound a value under speculation. This macro is then used to ensure that the indirect branch through the syscall table is bounded under speculation, with out-of-range addresses speculating as calls to sys_io_setup (0). Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 11 +++++++++++ arch/arm64/kernel/entry.S | 2 ++ 2 files changed, 13 insertions(+) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 1f44b42..1241fb2 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -123,6 +123,17 @@ .endm /* + * Sanitise a 64-bit bounded index wrt speculation, returning zero if out + * of bounds. + */ + .macro mask_nospec64, idx, limit, tmp + sub \tmp, \idx, \limit + bic \tmp, \tmp, \idx + and \idx, \idx, \tmp, asr #63 + csdb + .endm + +/* * NOP sequence */ .macro nops, num diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 17c15f7..4dd1b5a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -378,6 +378,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 * x7 is reserved for the system call number in 32-bit mode. */ wsc_nr .req w25 // number of system calls +xsc_nr .req x25 // number of system calls (zero-extended) wscno .req w26 // syscall number xscno .req x26 // syscall number (zero-extended) stbl .req x27 // syscall table pointer @@ -935,6 +936,7 @@ el0_svc_naked: // compat entry point b.ne __sys_trace cmp wscno, wsc_nr // check upper syscall limit b.hs ni_sys + mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number ldr x16, [stbl, xscno, lsl #3] // address in the syscall table blr x16 // call sys_* routine b ret_fast_syscall -- cgit v1.1 From c2f0ad4fc089cff81cef6a13d04b399980ecbfcc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:21 +0000 Subject: arm64: uaccess: Prevent speculative use of the current addr_limit A mispredicted conditional call to set_fs could result in the wrong addr_limit being forwarded under speculation to a subsequent access_ok check, potentially forming part of a spectre-v1 attack using uaccess routines. This patch prevents this forwarding from taking place, but putting heavy barriers in set_fs after writing the addr_limit. Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index e49fe72..2057dee 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -42,6 +42,13 @@ static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; + /* + * Prevent a mispredicted conditional call to set_fs from forwarding + * the wrong address limit to access_ok under speculation. + */ + dsb(nsh); + isb(); + /* On user-mode return, check fs is correct */ set_thread_flag(TIF_FSCHECK); -- cgit v1.1 From 84624087dd7e3b482b7b11c170ebc1f329b3a218 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:22 +0000 Subject: arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user access_ok isn't an expensive operation once the addr_limit for the current thread has been loaded into the cache. Given that the initial access_ok check preceding a sequence of __{get,put}_user operations will take the brunt of the miss, we can make the __* variants identical to the full-fat versions, which brings with it the benefits of address masking. The likely cost in these sequences will be from toggling PAN/UAO, which we can address later by implementing the *_unsafe versions. Reviewed-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 54 ++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 2057dee..e5002bc 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -306,28 +306,33 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user(x, ptr) \ +#define __get_user_check(x, ptr, err) \ ({ \ - int __gu_err = 0; \ - __get_user_err((x), (ptr), __gu_err); \ - __gu_err; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __get_user_err((x), __p, (err)); \ + } else { \ + (x) = 0; (err) = -EFAULT; \ + } \ }) #define __get_user_error(x, ptr, err) \ ({ \ - __get_user_err((x), (ptr), (err)); \ + __get_user_check((x), (ptr), (err)); \ (void)0; \ }) -#define get_user(x, ptr) \ +#define __get_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - might_fault(); \ - access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ - __p = uaccess_mask_ptr(__p), __get_user((x), __p) : \ - ((x) = 0, -EFAULT); \ + int __gu_err = 0; \ + __get_user_check((x), (ptr), __gu_err); \ + __gu_err; \ }) +#define get_user __get_user + #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ @@ -370,28 +375,33 @@ do { \ uaccess_disable_not_uao(); \ } while (0) -#define __put_user(x, ptr) \ +#define __put_user_check(x, ptr, err) \ ({ \ - int __pu_err = 0; \ - __put_user_err((x), (ptr), __pu_err); \ - __pu_err; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __put_user_err((x), __p, (err)); \ + } else { \ + (err) = -EFAULT; \ + } \ }) #define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x), (ptr), (err)); \ + __put_user_check((x), (ptr), (err)); \ (void)0; \ }) -#define put_user(x, ptr) \ +#define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - might_fault(); \ - access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ - __p = uaccess_mask_ptr(__p), __put_user((x), __p) : \ - -EFAULT; \ + int __pu_err = 0; \ + __put_user_check((x), (ptr), __pu_err); \ + __pu_err; \ }) +#define put_user __put_user + extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user __arch_copy_from_user extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -- cgit v1.1 From f71c2ffcb20dd8626880747557014bb9a61eb90e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:23 +0000 Subject: arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user Like we've done for get_user and put_user, ensure that user pointers are masked before invoking the underlying __arch_{clear,copy_*}_user operations. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 29 ++++++++++++++++++++++------- arch/arm64/kernel/arm64ksyms.c | 4 ++-- arch/arm64/lib/clear_user.S | 6 +++--- arch/arm64/lib/copy_in_user.S | 5 +++-- 4 files changed, 30 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index e5002bc..543e11f 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -403,20 +403,35 @@ do { \ #define put_user __put_user extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); -#define raw_copy_from_user __arch_copy_from_user +#define raw_copy_from_user(to, from, n) \ +({ \ + __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ +}) + extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -#define raw_copy_to_user __arch_copy_to_user -extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); +#define raw_copy_to_user(to, from, n) \ +({ \ + __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ +}) + +extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); +#define raw_copy_in_user(to, from, n) \ +({ \ + __arch_copy_in_user(__uaccess_mask_ptr(to), \ + __uaccess_mask_ptr(from), (n)); \ +}) + #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); +static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user(__uaccess_mask_ptr(to), n); + n = __arch_clear_user(__uaccess_mask_ptr(to), n); return n; } +#define clear_user __clear_user extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -430,7 +445,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) { kasan_check_write(dst, size); - return __copy_user_flushcache(dst, src, size); + return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); } #endif diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 67368c7..66be504 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page); /* user mem (segment) */ EXPORT_SYMBOL(__arch_copy_from_user); EXPORT_SYMBOL(__arch_copy_to_user); -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(raw_copy_in_user); +EXPORT_SYMBOL(__arch_clear_user); +EXPORT_SYMBOL(__arch_copy_in_user); /* physical memory */ EXPORT_SYMBOL(memstart_addr); diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 3d69a8d..21ba0b2 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -21,7 +21,7 @@ .text -/* Prototype: int __clear_user(void *addr, size_t sz) +/* Prototype: int __arch_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear @@ -29,7 +29,7 @@ * * Alignment fixed up by hardware. */ -ENTRY(__clear_user) +ENTRY(__arch_clear_user) uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 @@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 uaccess_disable_not_uao x2, x3 ret -ENDPROC(__clear_user) +ENDPROC(__arch_clear_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index fbb090f..54b75de 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -64,14 +64,15 @@ .endm end .req x5 -ENTRY(raw_copy_in_user) + +ENTRY(__arch_copy_in_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 ret -ENDPROC(raw_copy_in_user) +ENDPROC(__arch_copy_in_user) .section .fixup,"ax" .align 2 -- cgit v1.1 From 91b2d3442f6a44dce875670d702af22737ad5eff Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:24 +0000 Subject: arm64: futex: Mask __user pointers prior to dereference The arm64 futex code has some explicit dereferencing of user pointers where performing atomic operations in response to a futex command. This patch uses masking to limit any speculative futex operations to within the user address space. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/futex.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 5bb2fd4..07fe247 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -48,9 +48,10 @@ do { \ } while (0) static inline int -arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { int oldval = 0, ret, tmp; + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); pagefault_disable(); @@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, u32 oldval, u32 newval) { int ret = 0; u32 val, tmp; + u32 __user *uaddr; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) return -EFAULT; + uaddr = __uaccess_mask_ptr(_uaddr); uaccess_enable(); asm volatile("// futex_atomic_cmpxchg_inatomic\n" " prfm pstl1strm, %2\n" -- cgit v1.1 From 5dfc6ed27710c42cbc15db5c0d4475699991da0a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 2 Feb 2018 17:31:39 +0000 Subject: arm64: entry: Apply BP hardening for high-priority synchronous exceptions Software-step and PC alignment fault exceptions have higher priority than instruction abort exceptions, so apply the BP hardening hooks there too if the user PC appears to reside in kernel space. Reported-by: Dan Hettena Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 5 ++++- arch/arm64/mm/fault.c | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 4dd1b5a..af22793 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -767,7 +767,10 @@ el0_sp_pc: * Stack or PC alignment exception handling */ mrs x26, far_el1 - enable_daif + enable_da_f +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif ct_user_exit mov x0, x26 mov x1, x25 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index af530eb..43b28a7 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -732,6 +732,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, struct siginfo info; struct task_struct *tsk = current; + if (user_mode(regs)) { + if (instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + local_irq_enable(); + } + if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", tsk->comm, task_pid_nr(tsk), @@ -791,6 +797,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); + if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + if (!inf->fn(addr, esr, regs)) { rv = 1; } else { -- cgit v1.1 From 30d88c0e3ace625a92eead9ca0ad94093a8f59fe Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 2 Feb 2018 17:31:40 +0000 Subject: arm64: entry: Apply BP hardening for suspicious interrupts from EL0 It is possible to take an IRQ from EL0 following a branch to a kernel address in such a way that the IRQ is prioritised over the instruction abort. Whilst an attacker would need to get the stars to align here, it might be sufficient with enough calibration so perform BP hardening in the rare case that we see a kernel address in the ELR when handling an IRQ from EL0. Reported-by: Dan Hettena Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 5 +++++ arch/arm64/mm/fault.c | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index af22793..25e022c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -828,6 +828,11 @@ el0_irq_naked: #endif ct_user_exit +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + tbz x22, #55, 1f + bl do_el0_irq_bp_hardening +1: +#endif irq_handler #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 43b28a7..8fd1412 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -708,6 +708,12 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, arm64_notify_die("", regs, &info, esr); } +asmlinkage void __exception do_el0_irq_bp_hardening(void) +{ + /* PC has already been checked in entry.S */ + arm64_apply_bp_hardening(); +} + asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr, struct pt_regs *regs) -- cgit v1.1 From c0938c72f8070aabb766b06edba85941ea7911da Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:05 +0000 Subject: arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls KVM doesn't follow the SMCCC when it comes to unimplemented calls, and inject an UNDEF instead of returning an error. Since firmware calls are now used for security mitigation, they are becoming more common, and the undef is counter productive. Instead, let's follow the SMCCC which states that -1 must be returned to the caller when getting an unknown function number. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Catalin Marinas --- arch/arm64/kvm/handle_exit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index c09fc5a..520b0da 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -53,7 +53,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = kvm_psci_call(vcpu); if (ret < 0) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } @@ -62,7 +62,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } -- cgit v1.1 From 20e8175d246e9f9deb377f2784b3e7dfb2ad3e86 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:06 +0000 Subject: arm: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls KVM doesn't follow the SMCCC when it comes to unimplemented calls, and inject an UNDEF instead of returning an error. Since firmware calls are now used for security mitigation, they are becoming more common, and the undef is counter productive. Instead, let's follow the SMCCC which states that -1 must be returned to the caller when getting an unknown function number. Cc: Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm/kvm/handle_exit.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index cf8bf6b..a4bf0f6 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -38,7 +38,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = kvm_psci_call(vcpu); if (ret < 0) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } @@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_inject_undefined(vcpu); + /* + * "If an SMC instruction executed at Non-secure EL1 is + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a + * Trap exception, not a Secure Monitor Call exception [...]" + * + * We need to advance the PC after the trap, as it would + * otherwise return to the same address... + */ + vcpu_set_reg(vcpu, 0, ~0UL); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } -- cgit v1.1 From f5115e8869e1dfafac0e414b4f1664f3a84a4683 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:07 +0000 Subject: arm64: KVM: Increment PC after handling an SMC trap When handling an SMC trap, the "preferred return address" is set to that of the SMC, and not the next PC (which is a departure from the behaviour of an SMC that isn't trapped). Increment PC in the handler, as the guest is otherwise forever stuck... Cc: stable@vger.kernel.org Fixes: acfb3b883f6d ("arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls") Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kvm/handle_exit.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 520b0da..5493bbe 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -62,7 +62,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { + /* + * "If an SMC instruction executed at Non-secure EL1 is + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a + * Trap exception, not a Secure Monitor Call exception [...]" + * + * We need to advance the PC after the trap, as it would + * otherwise return to the same address... + */ vcpu_set_reg(vcpu, 0, ~0UL); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } -- cgit v1.1 From 1a2fb94e6a771ff94f4afa22497a4695187b820c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:08 +0000 Subject: arm/arm64: KVM: Consolidate the PSCI include files As we're about to update the PSCI support, and because I'm lazy, let's move the PSCI include file to include/kvm so that both ARM architectures can find it. Acked-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm/include/asm/kvm_psci.h | 27 --------------------------- arch/arm/kvm/handle_exit.c | 2 +- arch/arm64/include/asm/kvm_psci.h | 27 --------------------------- arch/arm64/kvm/handle_exit.c | 3 ++- include/kvm/arm_psci.h | 27 +++++++++++++++++++++++++++ virt/kvm/arm/arm.c | 2 +- virt/kvm/arm/psci.c | 3 ++- 7 files changed, 33 insertions(+), 58 deletions(-) delete mode 100644 arch/arm/include/asm/kvm_psci.h delete mode 100644 arch/arm64/include/asm/kvm_psci.h create mode 100644 include/kvm/arm_psci.h diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h deleted file mode 100644 index 6bda945..0000000 --- a/arch/arm/include/asm/kvm_psci.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_PSCI_H__ -#define __ARM_KVM_PSCI_H__ - -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 - -int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM_KVM_PSCI_H__ */ diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index a4bf0f6..230ae40 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include "trace.h" diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h deleted file mode 100644 index bc39e55..0000000 --- a/arch/arm64/include/asm/kvm_psci.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012,2013 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM64_KVM_PSCI_H__ -#define __ARM64_KVM_PSCI_H__ - -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 - -int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM64_KVM_PSCI_H__ */ diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 5493bbe..588f910 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -22,13 +22,14 @@ #include #include +#include + #include #include #include #include #include #include -#include #include #include diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h new file mode 100644 index 0000000..2042bb9 --- /dev/null +++ b/include/kvm/arm_psci.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __KVM_ARM_PSCI_H__ +#define __KVM_ARM_PSCI_H__ + +#define KVM_ARM_PSCI_0_1 1 +#define KVM_ARM_PSCI_0_2 2 + +int kvm_psci_version(struct kvm_vcpu *vcpu); +int kvm_psci_call(struct kvm_vcpu *vcpu); + +#endif /* __KVM_ARM_PSCI_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 15bf026..af3e98f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -31,6 +31,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -46,7 +47,6 @@ #include #include #include -#include #include #ifdef REQUIRES_VIRT diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index f1e363b..b322e46 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -21,9 +21,10 @@ #include #include -#include #include +#include + #include /* -- cgit v1.1 From d0a144f12a7ca8368933eae6583c096c363ec506 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:09 +0000 Subject: arm/arm64: KVM: Add PSCI_VERSION helper As we're about to trigger a PSCI version explosion, it doesn't hurt to introduce a PSCI_VERSION helper that is going to be used everywhere. Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- include/kvm/arm_psci.h | 6 ++++-- include/uapi/linux/psci.h | 3 +++ virt/kvm/arm/psci.c | 4 +--- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 2042bb9..5659343 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -18,8 +18,10 @@ #ifndef __KVM_ARM_PSCI_H__ #define __KVM_ARM_PSCI_H__ -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 +#include + +#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) +#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) int kvm_psci_version(struct kvm_vcpu *vcpu); int kvm_psci_call(struct kvm_vcpu *vcpu); diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index 760e52a..b3bcabe 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h @@ -88,6 +88,9 @@ (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) #define PSCI_VERSION_MINOR(ver) \ ((ver) & PSCI_VERSION_MINOR_MASK) +#define PSCI_VERSION(maj, min) \ + ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \ + ((min) & PSCI_VERSION_MINOR_MASK)) /* PSCI features decoding (>=1.0) */ #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1 diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index b322e46..999f94d 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -25,8 +25,6 @@ #include -#include - /* * This is an implementation of the Power State Coordination Interface * as described in ARM document number ARM DEN 0022A. @@ -222,7 +220,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) * Bits[31:16] = Major Version = 0 * Bits[15:0] = Minor Version = 2 */ - val = 2; + val = KVM_ARM_PSCI_0_2; break; case PSCI_0_2_FN_CPU_SUSPEND: case PSCI_0_2_FN64_CPU_SUSPEND: -- cgit v1.1 From 84684fecd7ea381824a96634a027b7719587fb77 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:10 +0000 Subject: arm/arm64: KVM: Add smccc accessors to PSCI code Instead of open coding the accesses to the various registers, let's add explicit SMCCC accessors. Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- virt/kvm/arm/psci.c | 52 ++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 999f94d..c41553d 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -32,6 +32,38 @@ #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) +static u32 smccc_get_function(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 0); +} + +static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 1); +} + +static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 2); +} + +static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 3); +} + +static void smccc_set_retval(struct kvm_vcpu *vcpu, + unsigned long a0, + unsigned long a1, + unsigned long a2, + unsigned long a3) +{ + vcpu_set_reg(vcpu, 0, a0); + vcpu_set_reg(vcpu, 1, a1); + vcpu_set_reg(vcpu, 2, a2); + vcpu_set_reg(vcpu, 3, a3); +} + static unsigned long psci_affinity_mask(unsigned long affinity_level) { if (affinity_level <= 3) @@ -77,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) unsigned long context_id; phys_addr_t target_pc; - cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; + cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; if (vcpu_mode_is_32bit(source_vcpu)) cpu_id &= ~((u32) 0); @@ -96,8 +128,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS; } - target_pc = vcpu_get_reg(source_vcpu, 2); - context_id = vcpu_get_reg(source_vcpu, 3); + target_pc = smccc_get_arg2(source_vcpu); + context_id = smccc_get_arg3(source_vcpu); kvm_reset_vcpu(vcpu); @@ -116,7 +148,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) * NOTE: We always update r0 (or x0) because for PSCI v0.1 * the general puspose registers are undefined upon CPU_ON. */ - vcpu_set_reg(vcpu, 0, context_id); + smccc_set_retval(vcpu, context_id, 0, 0, 0); vcpu->arch.power_off = false; smp_mb(); /* Make sure the above is visible */ @@ -136,8 +168,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tmp; - target_affinity = vcpu_get_reg(vcpu, 1); - lowest_affinity_level = vcpu_get_reg(vcpu, 2); + target_affinity = smccc_get_arg1(vcpu); + lowest_affinity_level = smccc_get_arg2(vcpu); /* Determine target affinity mask */ target_affinity_mask = psci_affinity_mask(lowest_affinity_level); @@ -210,7 +242,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu) static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); + u32 psci_fn = smccc_get_function(vcpu); unsigned long val; int ret = 1; @@ -277,14 +309,14 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) break; } - vcpu_set_reg(vcpu, 0, val); + smccc_set_retval(vcpu, val, 0, 0, 0); return ret; } static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); + u32 psci_fn = smccc_get_function(vcpu); unsigned long val; switch (psci_fn) { @@ -302,7 +334,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) break; } - vcpu_set_reg(vcpu, 0, val); + smccc_set_retval(vcpu, val, 0, 0, 0); return 1; } -- cgit v1.1 From 58e0b2239a4d997094ba63986ef4de29ddc91d87 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:11 +0000 Subject: arm/arm64: KVM: Implement PSCI 1.0 support PSCI 1.0 can be trivially implemented by providing the FEATURES call on top of PSCI 0.2 and returning 1.0 as the PSCI version. We happily ignore everything else, as they are either optional or are clarifications that do not require any additional change. PSCI 1.0 is now the default until we decide to add a userspace selection API. Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- include/kvm/arm_psci.h | 3 +++ virt/kvm/arm/psci.c | 45 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 5659343..3236043 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -22,6 +22,9 @@ #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) +#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) + +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 int kvm_psci_version(struct kvm_vcpu *vcpu); int kvm_psci_call(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index c41553d..3e7c63e 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -234,7 +234,7 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) int kvm_psci_version(struct kvm_vcpu *vcpu) { if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) - return KVM_ARM_PSCI_0_2; + return KVM_ARM_PSCI_LATEST; return KVM_ARM_PSCI_0_1; } @@ -313,6 +313,47 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) return ret; } +static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu) +{ + u32 psci_fn = smccc_get_function(vcpu); + u32 feature; + unsigned long val; + int ret = 1; + + switch(psci_fn) { + case PSCI_0_2_FN_PSCI_VERSION: + val = KVM_ARM_PSCI_1_0; + break; + case PSCI_1_0_FN_PSCI_FEATURES: + feature = smccc_get_arg1(vcpu); + switch(feature) { + case PSCI_0_2_FN_PSCI_VERSION: + case PSCI_0_2_FN_CPU_SUSPEND: + case PSCI_0_2_FN64_CPU_SUSPEND: + case PSCI_0_2_FN_CPU_OFF: + case PSCI_0_2_FN_CPU_ON: + case PSCI_0_2_FN64_CPU_ON: + case PSCI_0_2_FN_AFFINITY_INFO: + case PSCI_0_2_FN64_AFFINITY_INFO: + case PSCI_0_2_FN_MIGRATE_INFO_TYPE: + case PSCI_0_2_FN_SYSTEM_OFF: + case PSCI_0_2_FN_SYSTEM_RESET: + case PSCI_1_0_FN_PSCI_FEATURES: + val = 0; + break; + default: + val = PSCI_RET_NOT_SUPPORTED; + break; + } + break; + default: + return kvm_psci_0_2_call(vcpu); + } + + smccc_set_retval(vcpu, val, 0, 0, 0); + return ret; +} + static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -355,6 +396,8 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) int kvm_psci_call(struct kvm_vcpu *vcpu) { switch (kvm_psci_version(vcpu)) { + case KVM_ARM_PSCI_1_0: + return kvm_psci_1_0_call(vcpu); case KVM_ARM_PSCI_0_2: return kvm_psci_0_2_call(vcpu); case KVM_ARM_PSCI_0_1: -- cgit v1.1 From 09e6be12effdb33bf7210c8867bbd213b66a499e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:12 +0000 Subject: arm/arm64: KVM: Advertise SMCCC v1.1 The new SMC Calling Convention (v1.1) allows for a reduced overhead when calling into the firmware, and provides a new feature discovery mechanism. Make it visible to KVM guests. Tested-by: Ard Biesheuvel Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm/kvm/handle_exit.c | 2 +- arch/arm64/kvm/handle_exit.c | 2 +- include/kvm/arm_psci.h | 2 +- include/linux/arm-smccc.h | 13 +++++++++++++ virt/kvm/arm/psci.c | 24 +++++++++++++++++++++++- 5 files changed, 39 insertions(+), 4 deletions(-) diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 230ae40..910bd8d 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -36,7 +36,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; - ret = kvm_psci_call(vcpu); + ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { vcpu_set_reg(vcpu, 0, ~0UL); return 1; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 588f910..e5e741b 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -52,7 +52,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; - ret = kvm_psci_call(vcpu); + ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { vcpu_set_reg(vcpu, 0, ~0UL); return 1; diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 3236043..ed1dd80 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -27,6 +27,6 @@ #define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); #endif /* __KVM_ARM_PSCI_H__ */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 4c5bca38..dc68aa5 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -60,6 +60,19 @@ #define ARM_SMCCC_QUIRK_NONE 0 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ +#define ARM_SMCCC_VERSION_1_0 0x10000 +#define ARM_SMCCC_VERSION_1_1 0x10001 + +#define ARM_SMCCC_VERSION_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0) + +#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 1) + #ifndef __ASSEMBLY__ #include diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 3e7c63e..46a98fe 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -339,6 +340,7 @@ static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu) case PSCI_0_2_FN_SYSTEM_OFF: case PSCI_0_2_FN_SYSTEM_RESET: case PSCI_1_0_FN_PSCI_FEATURES: + case ARM_SMCCC_VERSION_FUNC_ID: val = 0; break; default: @@ -393,7 +395,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) * Errors: * -EINVAL: Unrecognized PSCI function */ -int kvm_psci_call(struct kvm_vcpu *vcpu) +static int kvm_psci_call(struct kvm_vcpu *vcpu) { switch (kvm_psci_version(vcpu)) { case KVM_ARM_PSCI_1_0: @@ -406,3 +408,23 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) return -EINVAL; }; } + +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) +{ + u32 func_id = smccc_get_function(vcpu); + u32 val = PSCI_RET_NOT_SUPPORTED; + + switch (func_id) { + case ARM_SMCCC_VERSION_FUNC_ID: + val = ARM_SMCCC_VERSION_1_1; + break; + case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: + /* Nothing supported yet */ + break; + default: + return kvm_psci_call(vcpu); + } + + smccc_set_retval(vcpu, val, 0, 0, 0); + return 1; +} -- cgit v1.1 From a4097b351118e821841941a79ec77d3ce3f1c5d9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:13 +0000 Subject: arm/arm64: KVM: Turn kvm_psci_version into a static inline We're about to need kvm_psci_version in HYP too. So let's turn it into a static inline, and pass the kvm structure as a second parameter (so that HYP can do a kern_hyp_va on it). Tested-by: Ard Biesheuvel Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kvm/hyp/switch.c | 20 ++++++++++++-------- include/kvm/arm_psci.h | 21 ++++++++++++++++++++- virt/kvm/arm/psci.c | 12 ++---------- 3 files changed, 34 insertions(+), 19 deletions(-) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 036e1f3..408c04d 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -19,6 +19,8 @@ #include #include +#include + #include #include #include @@ -350,14 +352,16 @@ again: if (exit_code == ARM_EXCEPTION_TRAP && (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 || - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) && - vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) { - u64 val = PSCI_RET_NOT_SUPPORTED; - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) - val = 2; - - vcpu_set_reg(vcpu, 0, val); - goto again; + kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) { + u32 val = vcpu_get_reg(vcpu, 0); + + if (val == PSCI_0_2_FN_PSCI_VERSION) { + val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm)); + if (unlikely(val == KVM_ARM_PSCI_0_1)) + val = PSCI_RET_NOT_SUPPORTED; + vcpu_set_reg(vcpu, 0, val); + goto again; + } } if (static_branch_unlikely(&vgic_v2_cpuif_trap) && diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index ed1dd80..e518e4e 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -18,6 +18,7 @@ #ifndef __KVM_ARM_PSCI_H__ #define __KVM_ARM_PSCI_H__ +#include #include #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) @@ -26,7 +27,25 @@ #define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 -int kvm_psci_version(struct kvm_vcpu *vcpu); +/* + * We need the KVM pointer independently from the vcpu as we can call + * this from HYP, and need to apply kern_hyp_va on it... + */ +static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) +{ + /* + * Our PSCI implementation stays the same across versions from + * v0.2 onward, only adding the few mandatory functions (such + * as FEATURES with 1.0) that are required by newer + * revisions. It is thus safe to return the latest. + */ + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) + return KVM_ARM_PSCI_LATEST; + + return KVM_ARM_PSCI_0_1; +} + + int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); #endif /* __KVM_ARM_PSCI_H__ */ diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 46a98fe..e105c11 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -123,7 +123,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) if (!vcpu) return PSCI_RET_INVALID_PARAMS; if (!vcpu->arch.power_off) { - if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) + if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1) return PSCI_RET_ALREADY_ON; else return PSCI_RET_INVALID_PARAMS; @@ -232,14 +232,6 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); } -int kvm_psci_version(struct kvm_vcpu *vcpu) -{ - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) - return KVM_ARM_PSCI_LATEST; - - return KVM_ARM_PSCI_0_1; -} - static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -397,7 +389,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) */ static int kvm_psci_call(struct kvm_vcpu *vcpu) { - switch (kvm_psci_version(vcpu)) { + switch (kvm_psci_version(vcpu, vcpu->kvm)) { case KVM_ARM_PSCI_1_0: return kvm_psci_1_0_call(vcpu); case KVM_ARM_PSCI_0_2: -- cgit v1.1 From 6167ec5c9145cdf493722dfd80a5d48bafc4a18a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:14 +0000 Subject: arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support A new feature of SMCCC 1.1 is that it offers firmware-based CPU workarounds. In particular, SMCCC_ARCH_WORKAROUND_1 provides BP hardening for CVE-2017-5715. If the host has some mitigation for this issue, report that we deal with it using SMCCC_ARCH_WORKAROUND_1, as we apply the host workaround on every guest exit. Tested-by: Ard Biesheuvel Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm/include/asm/kvm_host.h | 7 +++++++ arch/arm64/include/asm/kvm_host.h | 6 ++++++ include/linux/arm-smccc.h | 5 +++++ virt/kvm/arm/psci.c | 9 ++++++++- 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index acbf9ec..ef54013 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -306,4 +306,11 @@ static inline void kvm_fpsimd_flush_cpu_state(void) {} static inline void kvm_arm_vhe_guest_enter(void) {} static inline void kvm_arm_vhe_guest_exit(void) {} + +static inline bool kvm_arm_harden_branch_predictor(void) +{ + /* No way to detect it yet, pretend it is not there. */ + return false; +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4485ae8..a73f63a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -415,4 +415,10 @@ static inline void kvm_arm_vhe_guest_exit(void) { local_daif_restore(DAIF_PROCCTX_NOIRQ); } + +static inline bool kvm_arm_harden_branch_predictor(void) +{ + return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); +} + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index dc68aa5..e1ef944 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -73,6 +73,11 @@ ARM_SMCCC_SMC_32, \ 0, 1) +#define ARM_SMCCC_ARCH_WORKAROUND_1 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x8000) + #ifndef __ASSEMBLY__ #include diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index e105c11..6919352 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -405,13 +405,20 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) { u32 func_id = smccc_get_function(vcpu); u32 val = PSCI_RET_NOT_SUPPORTED; + u32 feature; switch (func_id) { case ARM_SMCCC_VERSION_FUNC_ID: val = ARM_SMCCC_VERSION_1_1; break; case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: - /* Nothing supported yet */ + feature = smccc_get_arg1(vcpu); + switch(feature) { + case ARM_SMCCC_ARCH_WORKAROUND_1: + if (kvm_arm_harden_branch_predictor()) + val = 0; + break; + } break; default: return kvm_psci_call(vcpu); -- cgit v1.1 From f72af90c3783d924337624659b43e2d36f1b36b4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:15 +0000 Subject: arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible. So let's intercept it as early as we can by testing for the function call number as soon as we've identified a HVC call coming from the guest. Tested-by: Ard Biesheuvel Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kvm/hyp/hyp-entry.S | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index e4f37b9..f36464b 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include @@ -64,10 +65,11 @@ alternative_endif lsr x0, x1, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 + ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap - mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest - cbnz x1, el1_trap // called HVC + mrs x1, vttbr_el2 // If vttbr is valid, the guest + cbnz x1, el1_hvc_guest // called HVC /* Here, we're pretty sure the host called HVC. */ ldp x0, x1, [sp], #16 @@ -100,6 +102,20 @@ alternative_endif eret +el1_hvc_guest: + /* + * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. + * The workaround has already been applied on the host, + * so let's quickly get back to the guest. We don't bother + * restoring x1, as it can be clobbered anyway. + */ + ldr x1, [sp] // Guest's x0 + eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 + cbnz w1, el1_trap + mov x0, x1 + add sp, sp, #16 + eret + el1_trap: /* * x0: ESR_EC -- cgit v1.1 From 09a8d6d48499f93e2abde691f5800081cd858726 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:16 +0000 Subject: firmware/psci: Expose PSCI conduit In order to call into the firmware to apply workarounds, it is useful to find out whether we're using HVC or SMC. Let's expose this through the psci_ops. Acked-by: Lorenzo Pieralisi Reviewed-by: Robin Murphy Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- drivers/firmware/psci.c | 28 +++++++++++++++++++++++----- include/linux/psci.h | 7 +++++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index 8b25d31..e9493da 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -59,7 +59,9 @@ bool psci_tos_resident_on(int cpu) return cpu == resident_cpu; } -struct psci_operations psci_ops; +struct psci_operations psci_ops = { + .conduit = PSCI_CONDUIT_NONE, +}; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); @@ -210,6 +212,22 @@ static unsigned long psci_migrate_info_up_cpu(void) 0, 0, 0); } +static void set_conduit(enum psci_conduit conduit) +{ + switch (conduit) { + case PSCI_CONDUIT_HVC: + invoke_psci_fn = __invoke_psci_fn_hvc; + break; + case PSCI_CONDUIT_SMC: + invoke_psci_fn = __invoke_psci_fn_smc; + break; + default: + WARN(1, "Unexpected PSCI conduit %d\n", conduit); + } + + psci_ops.conduit = conduit; +} + static int get_set_conduit_method(struct device_node *np) { const char *method; @@ -222,9 +240,9 @@ static int get_set_conduit_method(struct device_node *np) } if (!strcmp("hvc", method)) { - invoke_psci_fn = __invoke_psci_fn_hvc; + set_conduit(PSCI_CONDUIT_HVC); } else if (!strcmp("smc", method)) { - invoke_psci_fn = __invoke_psci_fn_smc; + set_conduit(PSCI_CONDUIT_SMC); } else { pr_warn("invalid \"method\" property: %s\n", method); return -EINVAL; @@ -654,9 +672,9 @@ int __init psci_acpi_init(void) pr_info("probing for conduit method from ACPI.\n"); if (acpi_psci_use_hvc()) - invoke_psci_fn = __invoke_psci_fn_hvc; + set_conduit(PSCI_CONDUIT_HVC); else - invoke_psci_fn = __invoke_psci_fn_smc; + set_conduit(PSCI_CONDUIT_SMC); return psci_probe(); } diff --git a/include/linux/psci.h b/include/linux/psci.h index f724fd8..f2679e5 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -25,6 +25,12 @@ bool psci_tos_resident_on(int cpu); int psci_cpu_init_idle(unsigned int cpu); int psci_cpu_suspend_enter(unsigned long index); +enum psci_conduit { + PSCI_CONDUIT_NONE, + PSCI_CONDUIT_SMC, + PSCI_CONDUIT_HVC, +}; + struct psci_operations { u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); @@ -34,6 +40,7 @@ struct psci_operations { int (*affinity_info)(unsigned long target_affinity, unsigned long lowest_affinity_level); int (*migrate_info_type)(void); + enum psci_conduit conduit; }; extern struct psci_operations psci_ops; -- cgit v1.1 From e78eef554a912ef6c1e0bbf97619dafbeae3339f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:17 +0000 Subject: firmware/psci: Expose SMCCC version through psci_ops Since PSCI 1.0 allows the SMCCC version to be (indirectly) probed, let's do that at boot time, and expose the version of the calling convention as part of the psci_ops structure. Acked-by: Lorenzo Pieralisi Reviewed-by: Robin Murphy Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- drivers/firmware/psci.c | 27 +++++++++++++++++++++++++++ include/linux/psci.h | 6 ++++++ 2 files changed, 33 insertions(+) diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index e9493da..c80ec1d 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -61,6 +61,7 @@ bool psci_tos_resident_on(int cpu) struct psci_operations psci_ops = { .conduit = PSCI_CONDUIT_NONE, + .smccc_version = SMCCC_VERSION_1_0, }; typedef unsigned long (psci_fn)(unsigned long, unsigned long, @@ -511,6 +512,31 @@ static void __init psci_init_migrate(void) pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); } +static void __init psci_init_smccc(void) +{ + u32 ver = ARM_SMCCC_VERSION_1_0; + int feature; + + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); + + if (feature != PSCI_RET_NOT_SUPPORTED) { + u32 ret; + ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); + if (ret == ARM_SMCCC_VERSION_1_1) { + psci_ops.smccc_version = SMCCC_VERSION_1_1; + ver = ret; + } + } + + /* + * Conveniently, the SMCCC and PSCI versions are encoded the + * same way. No, this isn't accidental. + */ + pr_info("SMC Calling Convention v%d.%d\n", + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); + +} + static void __init psci_0_2_set_functions(void) { pr_info("Using standard PSCI v0.2 function IDs\n"); @@ -559,6 +585,7 @@ static int __init psci_probe(void) psci_init_migrate(); if (PSCI_VERSION_MAJOR(ver) >= 1) { + psci_init_smccc(); psci_init_cpu_suspend(); psci_init_system_suspend(); } diff --git a/include/linux/psci.h b/include/linux/psci.h index f2679e5..8b1b3b5 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -31,6 +31,11 @@ enum psci_conduit { PSCI_CONDUIT_HVC, }; +enum smccc_version { + SMCCC_VERSION_1_0, + SMCCC_VERSION_1_1, +}; + struct psci_operations { u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); @@ -41,6 +46,7 @@ struct psci_operations { unsigned long lowest_affinity_level); int (*migrate_info_type)(void); enum psci_conduit conduit; + enum smccc_version smccc_version; }; extern struct psci_operations psci_ops; -- cgit v1.1 From ded4c39e93f3b72968fdb79baba27f3b83dad34c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:18 +0000 Subject: arm/arm64: smccc: Make function identifiers an unsigned quantity Function identifiers are a 32bit, unsigned quantity. But we never tell so to the compiler, resulting in the following: 4ac: b26187e0 mov x0, #0xffffffff80000001 We thus rely on the firmware narrowing it for us, which is not always a reasonable expectation. Cc: stable@vger.kernel.org Reported-by: Ard Biesheuvel Acked-by: Ard Biesheuvel Reviewed-by: Robin Murphy Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- include/linux/arm-smccc.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index e1ef944..dd44d84 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -14,14 +14,16 @@ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H +#include + /* * This file provides common defines for ARM SMC Calling Convention as * specified in * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html */ -#define ARM_SMCCC_STD_CALL 0 -#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_STD_CALL _AC(0,U) +#define ARM_SMCCC_FAST_CALL _AC(1,U) #define ARM_SMCCC_TYPE_SHIFT 31 #define ARM_SMCCC_SMC_32 0 -- cgit v1.1 From f2d3b2e8759a5833df6f022e42df2d581e6d843c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:19 +0000 Subject: arm/arm64: smccc: Implement SMCCC v1.1 inline primitive One of the major improvement of SMCCC v1.1 is that it only clobbers the first 4 registers, both on 32 and 64bit. This means that it becomes very easy to provide an inline version of the SMC call primitive, and avoid performing a function call to stash the registers that would otherwise be clobbered by SMCCC v1.0. Reviewed-by: Robin Murphy Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- include/linux/arm-smccc.h | 141 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index dd44d84..a031897 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -150,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) +/* SMCCC v1.1 implementation madness follows */ +#ifdef CONFIG_ARM64 + +#define SMCCC_SMC_INST "smc #0" +#define SMCCC_HVC_INST "hvc #0" + +#elif defined(CONFIG_ARM) +#include +#include + +#define SMCCC_SMC_INST __SMC(0) +#define SMCCC_HVC_INST __HVC(0) + +#endif + +#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x + +#define __count_args(...) \ + ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __constraint_write_0 \ + "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_1 \ + "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_2 \ + "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) +#define __constraint_write_3 \ + "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) +#define __constraint_write_4 __constraint_write_3 +#define __constraint_write_5 __constraint_write_4 +#define __constraint_write_6 __constraint_write_5 +#define __constraint_write_7 __constraint_write_6 + +#define __constraint_read_0 +#define __constraint_read_1 +#define __constraint_read_2 +#define __constraint_read_3 +#define __constraint_read_4 "r" (r4) +#define __constraint_read_5 __constraint_read_4, "r" (r5) +#define __constraint_read_6 __constraint_read_5, "r" (r6) +#define __constraint_read_7 __constraint_read_6, "r" (r7) + +#define __declare_arg_0(a0, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register unsigned long r1 asm("r1"); \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_1(a0, a1, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_2(a0, a1, a2, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register typeof(a2) r2 asm("r2") = a2; \ + register unsigned long r3 asm("r3") + +#define __declare_arg_3(a0, a1, a2, a3, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register typeof(a2) r2 asm("r2") = a2; \ + register typeof(a3) r3 asm("r3") = a3 + +#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + __declare_arg_3(a0, a1, a2, a3, res); \ + register typeof(a4) r4 asm("r4") = a4 + +#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + __declare_arg_4(a0, a1, a2, a3, a4, res); \ + register typeof(a5) r5 asm("r5") = a5 + +#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ + register typeof(a6) r6 asm("r6") = a6 + +#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ + register typeof(a7) r7 asm("r7") = a7 + +#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) +#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) + +#define ___constraints(count) \ + : __constraint_write_ ## count \ + : __constraint_read_ ## count \ + : "memory" +#define __constraints(count) ___constraints(count) + +/* + * We have an output list that is not necessarily used, and GCC feels + * entitled to optimise the whole sequence away. "volatile" is what + * makes it stick. + */ +#define __arm_smccc_1_1(inst, ...) \ + do { \ + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ + asm volatile(inst "\n" \ + __constraints(__count_args(__VA_ARGS__))); \ + if (___res) \ + *___res = (typeof(*___res)){r0, r1, r2, r3}; \ + } while (0) + +/* + * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make SMC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction if not NULL. + */ +#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) + +/* + * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make HVC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the HVC instruction. The return values are updated with the content + * from register 0 to 3 on return from the HVC instruction if not NULL. + */ +#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) + #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ -- cgit v1.1 From b092201e0020614127f495c092e0a12d26a2116e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:20 +0000 Subject: arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. It is lovely. Really. Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/bpi.S | 20 +++++++++++++ arch/arm64/kernel/cpu_errata.c | 68 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S index 76225c2..fdeed62 100644 --- a/arch/arm64/kernel/bpi.S +++ b/arch/arm64/kernel/bpi.S @@ -17,6 +17,7 @@ */ #include +#include .macro ventry target .rept 31 @@ -85,3 +86,22 @@ ENTRY(__qcom_hyp_sanitize_link_stack_start) .endr ldp x29, x30, [sp], #16 ENTRY(__qcom_hyp_sanitize_link_stack_end) + +.macro smccc_workaround_1 inst + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 + \inst #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +.endm + +ENTRY(__smccc_workaround_1_smc_start) + smccc_workaround_1 smc +ENTRY(__smccc_workaround_1_smc_end) + +ENTRY(__smccc_workaround_1_hvc_start) + smccc_workaround_1 hvc +ENTRY(__smccc_workaround_1_hvc_end) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ed68818..9e77809a 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -70,6 +70,10 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; extern char __qcom_hyp_sanitize_link_stack_start[]; extern char __qcom_hyp_sanitize_link_stack_end[]; +extern char __smccc_workaround_1_smc_start[]; +extern char __smccc_workaround_1_smc_end[]; +extern char __smccc_workaround_1_hvc_start[]; +extern char __smccc_workaround_1_hvc_end[]; static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) @@ -116,6 +120,10 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, #define __psci_hyp_bp_inval_end NULL #define __qcom_hyp_sanitize_link_stack_start NULL #define __qcom_hyp_sanitize_link_stack_end NULL +#define __smccc_workaround_1_smc_start NULL +#define __smccc_workaround_1_smc_end NULL +#define __smccc_workaround_1_hvc_start NULL +#define __smccc_workaround_1_hvc_end NULL static void __install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, @@ -142,17 +150,75 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); } +#include +#include #include +static void call_smc_arch_workaround_1(void) +{ + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static void call_hvc_arch_workaround_1(void) +{ + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) +{ + bp_hardening_cb_t cb; + void *smccc_start, *smccc_end; + struct arm_smccc_res res; + + if (!entry->matches(entry, SCOPE_LOCAL_CPU)) + return false; + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) + return false; + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if (res.a0) + return false; + cb = call_hvc_arch_workaround_1; + smccc_start = __smccc_workaround_1_hvc_start; + smccc_end = __smccc_workaround_1_hvc_end; + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if (res.a0) + return false; + cb = call_smc_arch_workaround_1; + smccc_start = __smccc_workaround_1_smc_start; + smccc_end = __smccc_workaround_1_smc_end; + break; + + default: + return false; + } + + install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); + + return true; +} + static int enable_psci_bp_hardening(void *data) { const struct arm64_cpu_capabilities *entry = data; - if (psci_ops.get_version) + if (psci_ops.get_version) { + if (check_smccc_arch_workaround_1(entry)) + return 0; + install_bp_hardening_cb(entry, (bp_hardening_cb_t)psci_ops.get_version, __psci_hyp_bp_inval_start, __psci_hyp_bp_inval_end); + } return 0; } -- cgit v1.1 From 3a0a397ff5ff8b56ca9f7908b75dee6bf0b5fabb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:21 +0000 Subject: arm64: Kill PSCI_GET_VERSION as a variant-2 workaround Now that we've standardised on SMCCC v1.1 to perform the branch prediction invalidation, let's drop the previous band-aid. If vendors haven't updated their firmware to do SMCCC 1.1, they haven't updated PSCI either, so we don't loose anything. Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/bpi.S | 24 ---------------------- arch/arm64/kernel/cpu_errata.c | 45 ++++++++++++------------------------------ arch/arm64/kvm/hyp/switch.c | 14 ------------- 3 files changed, 13 insertions(+), 70 deletions(-) diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S index fdeed62..e5de335 100644 --- a/arch/arm64/kernel/bpi.S +++ b/arch/arm64/kernel/bpi.S @@ -54,30 +54,6 @@ ENTRY(__bp_harden_hyp_vecs_start) vectors __kvm_hyp_vector .endr ENTRY(__bp_harden_hyp_vecs_end) -ENTRY(__psci_hyp_bp_inval_start) - sub sp, sp, #(8 * 18) - stp x16, x17, [sp, #(16 * 0)] - stp x14, x15, [sp, #(16 * 1)] - stp x12, x13, [sp, #(16 * 2)] - stp x10, x11, [sp, #(16 * 3)] - stp x8, x9, [sp, #(16 * 4)] - stp x6, x7, [sp, #(16 * 5)] - stp x4, x5, [sp, #(16 * 6)] - stp x2, x3, [sp, #(16 * 7)] - stp x0, x1, [sp, #(16 * 8)] - mov x0, #0x84000000 - smc #0 - ldp x16, x17, [sp, #(16 * 0)] - ldp x14, x15, [sp, #(16 * 1)] - ldp x12, x13, [sp, #(16 * 2)] - ldp x10, x11, [sp, #(16 * 3)] - ldp x8, x9, [sp, #(16 * 4)] - ldp x6, x7, [sp, #(16 * 5)] - ldp x4, x5, [sp, #(16 * 6)] - ldp x2, x3, [sp, #(16 * 7)] - ldp x0, x1, [sp, #(16 * 8)] - add sp, sp, #(8 * 18) -ENTRY(__psci_hyp_bp_inval_end) ENTRY(__qcom_hyp_sanitize_link_stack_start) stp x29, x30, [sp, #-16]! diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 9e77809a..0782359 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -67,7 +67,6 @@ static int cpu_enable_trap_ctr_access(void *__unused) DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); #ifdef CONFIG_KVM -extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; extern char __qcom_hyp_sanitize_link_stack_start[]; extern char __qcom_hyp_sanitize_link_stack_end[]; extern char __smccc_workaround_1_smc_start[]; @@ -116,8 +115,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, spin_unlock(&bp_lock); } #else -#define __psci_hyp_bp_inval_start NULL -#define __psci_hyp_bp_inval_end NULL #define __qcom_hyp_sanitize_link_stack_start NULL #define __qcom_hyp_sanitize_link_stack_end NULL #define __smccc_workaround_1_smc_start NULL @@ -164,24 +161,25 @@ static void call_hvc_arch_workaround_1(void) arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } -static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) +static int enable_smccc_arch_workaround_1(void *data) { + const struct arm64_cpu_capabilities *entry = data; bp_hardening_cb_t cb; void *smccc_start, *smccc_end; struct arm_smccc_res res; if (!entry->matches(entry, SCOPE_LOCAL_CPU)) - return false; + return 0; if (psci_ops.smccc_version == SMCCC_VERSION_1_0) - return false; + return 0; switch (psci_ops.conduit) { case PSCI_CONDUIT_HVC: arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); if (res.a0) - return false; + return 0; cb = call_hvc_arch_workaround_1; smccc_start = __smccc_workaround_1_hvc_start; smccc_end = __smccc_workaround_1_hvc_end; @@ -191,35 +189,18 @@ static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *e arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); if (res.a0) - return false; + return 0; cb = call_smc_arch_workaround_1; smccc_start = __smccc_workaround_1_smc_start; smccc_end = __smccc_workaround_1_smc_end; break; default: - return false; + return 0; } install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); - return true; -} - -static int enable_psci_bp_hardening(void *data) -{ - const struct arm64_cpu_capabilities *entry = data; - - if (psci_ops.get_version) { - if (check_smccc_arch_workaround_1(entry)) - return 0; - - install_bp_hardening_cb(entry, - (bp_hardening_cb_t)psci_ops.get_version, - __psci_hyp_bp_inval_start, - __psci_hyp_bp_inval_end); - } - return 0; } @@ -399,22 +380,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, @@ -428,12 +409,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, #endif { diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 408c04d..cac6a05 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -350,20 +350,6 @@ again: if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) goto again; - if (exit_code == ARM_EXCEPTION_TRAP && - (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 || - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) { - u32 val = vcpu_get_reg(vcpu, 0); - - if (val == PSCI_0_2_FN_PSCI_VERSION) { - val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm)); - if (unlikely(val == KVM_ARM_PSCI_0_1)) - val = PSCI_RET_NOT_SUPPORTED; - vcpu_set_reg(vcpu, 0, val); - goto again; - } - } - if (static_branch_unlikely(&vgic_v2_cpuif_trap) && exit_code == ARM_EXCEPTION_TRAP) { bool valid; -- cgit v1.1