From e1ccbbc9d5aa01a6c1c9c78acea6515db4f1be71 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 14 Oct 2014 16:34:47 +0200 Subject: efi: dmi: add support for SMBIOS 3.0 UEFI configuration table This adds support to the UEFI side for detecting the presence of a SMBIOS 3.0 64-bit entry point. This allows the actual SMBIOS structure table to reside at a physical offset over 4 GB, which cannot be supported by the legacy SMBIOS 32-bit entry point. Since the firmware can legally provide both entry points, store the SMBIOS 3.0 entry point in a separate variable, and let the DMI decoding layer decide which one will be used. Tested-by: Suravee Suthikulpanit Acked-by: Leif Lindholm Acked-by: Matt Fleming Signed-off-by: Ard Biesheuvel --- include/linux/efi.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/efi.h b/include/linux/efi.h index 0949f9c..0238d61 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -547,6 +547,9 @@ void efi_native_runtime_setup(void); #define SMBIOS_TABLE_GUID \ EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) +#define SMBIOS3_TABLE_GUID \ + EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 ) + #define SAL_SYSTEM_TABLE_GUID \ EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) @@ -810,7 +813,8 @@ extern struct efi { unsigned long mps; /* MPS table */ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ unsigned long acpi20; /* ACPI table (ACPI 2.0) */ - unsigned long smbios; /* SM BIOS table */ + unsigned long smbios; /* SMBIOS table (32 bit entry point) */ + unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ unsigned long sal_systab; /* SAL system table */ unsigned long boot_info; /* boot info table */ unsigned long hcdp; /* HCDP table */ -- cgit v1.1 From fb7332a9fedfd62b1ba6530c86f39f0fa38afd49 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 29 Oct 2014 10:03:09 +0000 Subject: mmu_gather: move minimal range calculations into generic code On architectures with hardware broadcasting of TLB invalidation messages , it makes sense to reduce the range of the mmu_gather structure when unmapping page ranges based on the dirty address information passed to tlb_remove_tlb_entry. arm64 already does this by directly manipulating the start/end fields of the gather structure, but this confuses the generic code which does not expect these fields to change and can end up calculating invalid, negative ranges when forcing a flush in zap_pte_range. This patch moves the minimal range calculation out of the arm64 code and into the generic implementation, simplifying zap_pte_range in the process (which no longer needs to care about start/end, since they will point to the appropriate ranges already). With the range being tracked by core code, the need_flush flag is dropped in favour of checking that the end of the range has actually been set. Cc: Benjamin Herrenschmidt Cc: Peter Zijlstra Cc: Russell King - ARM Linux Cc: Michal Simek Acked-by: Linus Torvalds Signed-off-by: Will Deacon --- include/asm-generic/tlb.h | 57 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 47 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 5672d7e..0884805 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -96,10 +96,9 @@ struct mmu_gather { #endif unsigned long start; unsigned long end; - unsigned int need_flush : 1, /* Did free PTEs */ /* we are in the middle of an operation to clear * a full mm and can make some optimizations */ - fullmm : 1, + unsigned int fullmm : 1, /* we have performed an operation which * requires a complete flush of the tlb */ need_flush_all : 1; @@ -128,16 +127,54 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) tlb_flush_mmu(tlb); } +static inline void __tlb_adjust_range(struct mmu_gather *tlb, + unsigned long address) +{ + tlb->start = min(tlb->start, address); + tlb->end = max(tlb->end, address + PAGE_SIZE); +} + +static inline void __tlb_reset_range(struct mmu_gather *tlb) +{ + tlb->start = TASK_SIZE; + tlb->end = 0; +} + +/* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. + */ +#ifndef tlb_start_vma +#define tlb_start_vma(tlb, vma) do { } while (0) +#endif + +#define __tlb_end_vma(tlb, vma) \ + do { \ + if (!tlb->fullmm && tlb->end) { \ + tlb_flush(tlb); \ + __tlb_reset_range(tlb); \ + } \ + } while (0) + +#ifndef tlb_end_vma +#define tlb_end_vma __tlb_end_vma +#endif + +#ifndef __tlb_remove_tlb_entry +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +#endif + /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * - * Record the fact that pte's were really umapped in ->need_flush, so we can - * later optimise away the tlb invalidate. This helps when userspace is - * unmapping already-unmapped pages, which happens quite a lot. + * Record the fact that pte's were really unmapped by updating the range, + * so we can later optimise away the tlb invalidate. This helps when + * userspace is unmapping already-unmapped pages, which happens quite a lot. */ #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) @@ -151,27 +188,27 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) #define pte_free_tlb(tlb, ptep, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #ifndef __ARCH_HAS_4LEVEL_HACK #define pud_free_tlb(tlb, pudp, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif #define pmd_free_tlb(tlb, pmdp, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) -- cgit v1.1 From 766a85d7bc5d7f1ddd6de28bdb844eae45ec63b0 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Fri, 28 Nov 2014 05:26:34 +0000 Subject: arm64: ptrace: add NT_ARM_SYSTEM_CALL regset This regeset is intended to be used to get and set a system call number while tracing. There was some discussion about possible approaches to do so: (1) modify x8 register with ptrace(PTRACE_SETREGSET) indirectly, and update regs->syscallno later on in syscall_trace_enter(), or (2) define a dedicated regset for this purpose as on s390, or (3) support ptrace(PTRACE_SET_SYSCALL) as on arch/arm Thinking of the fact that user_pt_regs doesn't expose 'syscallno' to tracer as well as that secure_computing() expects a changed syscall number, especially case of -1, to be visible before this function returns in syscall_trace_enter(), (1) doesn't work well. We will take (2) since it looks much cleaner. Signed-off-by: AKASHI Takahiro Signed-off-by: Will Deacon --- include/uapi/linux/elf.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index ea9bf25..71e1d0e 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -397,6 +397,7 @@ typedef struct elf64_shdr { #define NT_ARM_TLS 0x401 /* ARM TLS register */ #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ +#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ #define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ #define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ #define NT_METAG_TLS 0x502 /* Metag TLS pointer */ -- cgit v1.1 From 65a2ae8d5bd0ab9fb5846c0223d5dcf74e87f9d2 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Fri, 28 Nov 2014 05:26:36 +0000 Subject: asm-generic: add generic seccomp.h for secure computing mode 1 Those values (__NR_seccomp_*) are used solely in secure_computing() to identify mode 1 system calls. If compat system calls have different syscall numbers, asm/seccomp.h may override them. Acked-by: Arnd Bergmann Reviewed-by: Kees Cook Signed-off-by: AKASHI Takahiro Signed-off-by: Will Deacon --- include/asm-generic/seccomp.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 include/asm-generic/seccomp.h (limited to 'include') diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h new file mode 100644 index 0000000..9fa1f65 --- /dev/null +++ b/include/asm-generic/seccomp.h @@ -0,0 +1,30 @@ +/* + * include/asm-generic/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_GENERIC_SECCOMP_H +#define _ASM_GENERIC_SECCOMP_H + +#include + +#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32) +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn +#endif /* CONFIG_COMPAT && ! already defined */ + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#ifndef __NR_seccomp_sigreturn +#define __NR_seccomp_sigreturn __NR_rt_sigreturn +#endif + +#endif /* _ASM_GENERIC_SECCOMP_H */ -- cgit v1.1