From d4edcf0d56958db0aca0196314ca38a5e730ea92 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:01:56 -0800 Subject: mm/gup: Switch all callers of get_user_pages() to not pass tsk/mm We will soon modify the vanilla get_user_pages() so it can no longer be used on mm/tasks other than 'current/current->mm', which is by far the most common way it is called. For now, we allow the old-style calls, but warn when they are used. (implemented in previous patch) This patch switches all callers of: get_user_pages() get_user_pages_unlocked() get_user_pages_locked() to stop passing tsk/mm so they will no longer see the warnings. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrea Arcangeli Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Naoya Horiguchi Cc: Peter Zijlstra Cc: Rik van Riel Cc: Srikar Dronamraju Cc: Vlastimil Babka Cc: jack@suse.cz Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210156.113E9407@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/cris/arch-v32/drivers/cryptocop.c | 8 ++------ arch/ia64/kernel/err_inject.c | 3 +-- arch/mips/mm/gup.c | 3 +-- arch/s390/mm/gup.c | 4 +--- arch/sh/mm/gup.c | 2 +- arch/sparc/mm/gup.c | 2 +- arch/x86/mm/gup.c | 2 +- arch/x86/mm/mpx.c | 4 ++-- 8 files changed, 10 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index 877da19..617645d 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c @@ -2719,9 +2719,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig /* Acquire the mm page semaphore. */ down_read(¤t->mm->mmap_sem); - err = get_user_pages(current, - current->mm, - (unsigned long int)(oper.indata + prev_ix), + err = get_user_pages((unsigned long int)(oper.indata + prev_ix), noinpages, 0, /* read access only for in data */ 0, /* no force */ @@ -2736,9 +2734,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig } noinpages = err; if (oper.do_cipher){ - err = get_user_pages(current, - current->mm, - (unsigned long int)oper.cipher_outdata, + err = get_user_pages((unsigned long int)oper.cipher_outdata, nooutpages, 1, /* write access for out data */ 0, /* no force */ diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 0c161ed..09f8457 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -142,8 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, u64 virt_addr=simple_strtoull(buf, NULL, 16); int ret; - ret = get_user_pages(current, current->mm, virt_addr, - 1, VM_READ, 0, NULL, NULL); + ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG printk("Virtual address %lx is not existing.\n",virt_addr); diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 1afd87c..982e83f 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -286,8 +286,7 @@ slow_irqon: start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(current, mm, start, - (end - start) >> PAGE_SHIFT, + ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, write, 0, pages); /* Have to be a bit careful with return values */ diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 13dab0c..49a1c84 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -210,7 +210,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { - struct mm_struct *mm = current->mm; int nr, ret; might_sleep(); @@ -222,8 +221,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(current, mm, start, - nr_pages - nr, write, 0, pages); + ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); /* Have to be a bit careful with return values */ if (nr > 0) ret = (ret < 0) ? nr : ret + nr; diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index e7af6a6..40fa6c8 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c @@ -257,7 +257,7 @@ slow_irqon: start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(current, mm, start, + ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, write, 0, pages); /* Have to be a bit careful with return values */ diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index eb3d8e8..4e06750 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -237,7 +237,7 @@ slow: start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(current, mm, start, + ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, write, 0, pages); /* Have to be a bit careful with return values */ diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 6d5eb59..ce5e454 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -422,7 +422,7 @@ slow_irqon: start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(current, mm, start, + ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, write, 0, pages); diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index b2fd67d..84fa4a4 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -546,8 +546,8 @@ static int mpx_resolve_fault(long __user *addr, int write) int nr_pages = 1; int force = 0; - gup_ret = get_user_pages(current, current->mm, (unsigned long)addr, - nr_pages, write, force, NULL, NULL); + gup_ret = get_user_pages((unsigned long)addr, nr_pages, write, + force, NULL, NULL); /* * get_user_pages() returns number of pages gotten. * 0 means we failed to fault in and get anything, -- cgit v1.1 From 1f96b1efbad4bb753e7fd265753f6cac1cdc5648 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:01:58 -0800 Subject: x86/fpu: Add placeholder for 'Processor Trace' XSAVE state There is an XSAVE state component for Intel Processor Trace (PT). But, we do not currently use it. We add a placeholder in the code for it so it is not a mystery and also so we do not need an explicit enum initialization for Protection Keys in a moment. Why don't we use it? We might end up using this at _some_ point in the future. But, this is a "system" state which requires using the currently unsupported XSAVES feature. Unlike all the other XSAVE states, PT state is also not directly tied to a thread. You might context-switch between threads, but not want to change any of the PT state. Or, you might switch between threads, and *do* want to change PT state, all depending on what is being traced. We currently just manually set some MSRs to do this PT context switching, and it is unclear whether replacing our direct MSR use with XSAVE will be a net win or loss, both in code complexity and performance. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: fenghua.yu@intel.com Cc: linux-mm@kvack.org Cc: yu-cheng.yu@intel.com Link: http://lkml.kernel.org/r/20160212210158.5E4BCAE2@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu/types.h | 1 + arch/x86/kernel/fpu/xstate.c | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 1c6f6ac..aad3181 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -108,6 +108,7 @@ enum xfeature { XFEATURE_OPMASK, XFEATURE_ZMM_Hi256, XFEATURE_Hi16_ZMM, + XFEATURE_PT_UNIMPLEMENTED_SO_FAR, XFEATURE_MAX, }; diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index d425cda5..c2e2349 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -13,6 +13,11 @@ #include +/* + * Although we spell it out in here, the Processor Trace + * xfeature is completely unused. We use other mechanisms + * to save/restore PT state in Linux. + */ static const char *xfeature_names[] = { "x87 floating point registers" , @@ -23,7 +28,7 @@ static const char *xfeature_names[] = "AVX-512 opmask" , "AVX-512 Hi256" , "AVX-512 ZMM_Hi256" , - "unknown xstate feature" , + "Processor Trace (unused)" , }; /* @@ -470,7 +475,8 @@ static void check_xstate_against_struct(int nr) * numbers. */ if ((nr < XFEATURE_YMM) || - (nr >= XFEATURE_MAX)) { + (nr >= XFEATURE_MAX) || + (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR)) { WARN_ONCE(1, "no structure for xstate: %d\n", nr); XSTATE_WARN_ON(1); } -- cgit v1.1 From 35e97790f5f1e5cf2b5522c55e3e31d5c81bd226 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:00 -0800 Subject: x86/mm/pkeys: Add Kconfig option I don't have a strong opinion on whether we need a Kconfig prompt or not. Protection Keys has relatively little code associated with it, and it is not a heavyweight feature to keep enabled. However, I can imagine that folks would still appreciate being able to disable it. Note that, with disabled-features.h, the checks in the code for protection keys are always the same: cpu_has(c, X86_FEATURE_PKU) With the config option disabled, this essentially turns into an We will hide the prompt for now. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210200.DB7055E8@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ab2ed53..3632cdd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1714,6 +1714,10 @@ config X86_INTEL_MPX If unsure, say N. +config X86_INTEL_MEMORY_PROTECTION_KEYS + def_bool y + depends on CPU_SUP_INTEL && X86_64 + config EFI bool "EFI runtime service support" depends on ACPI -- cgit v1.1 From dfb4a70f20c5b3880da56ee4c9484bdb4e8f1e65 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:01 -0800 Subject: x86/cpufeature, x86/mm/pkeys: Add protection keys related CPUID definitions There are two CPUID bits for protection keys. One is for whether the CPU contains the feature, and the other will appear set once the OS enables protection keys. Specifically: Bit 04: OSPKE. If 1, OS has set CR4.PKE to enable Protection keys (and the RDPKRU/WRPKRU instructions) This is because userspace can not see CR4 contents, but it can see CPUID contents. X86_FEATURE_PKU is referred to as "PKU" in the hardware documentation: CPUID.(EAX=07H,ECX=0H):ECX.PKU [bit 3] X86_FEATURE_OSPKE is "OSPKU": CPUID.(EAX=07H,ECX=0H):ECX.OSPKE [bit 4] These are the first CPU features which need to look at the ECX word in CPUID leaf 0x7, so this patch also includes fetching that word in to the cpuinfo->x86_capability[] array. Add it to the disabled-features mask when its config option is off. Even though we are not using it here, we also extend the REQUIRED_MASK_BIT_SET() macro to keep it mirroring the DISABLED_MASK_BIT_SET() version. This means that in almost all code, you should use: cpu_has(c, X86_FEATURE_PKU) and *not* the CONFIG option. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210201.7714C250@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeature.h | 59 +++++++++++++++++++++----------- arch/x86/include/asm/cpufeatures.h | 2 +- arch/x86/include/asm/disabled-features.h | 15 ++++++++ arch/x86/include/asm/required-features.h | 7 ++++ arch/x86/kernel/cpu/common.c | 1 + 5 files changed, 63 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 68e4e82..50e292a 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -26,6 +26,7 @@ enum cpuid_leafs CPUID_8000_0008_EBX, CPUID_6_EAX, CPUID_8000_000A_EDX, + CPUID_7_ECX, }; #ifdef CONFIG_X86_FEATURE_NAMES @@ -48,28 +49,42 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; test_bit(bit, (unsigned long *)((c)->x86_capability)) #define REQUIRED_MASK_BIT_SET(bit) \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ - (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) + ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0 )) || \ + (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1 )) || \ + (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2 )) || \ + (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3 )) || \ + (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4 )) || \ + (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5 )) || \ + (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6 )) || \ + (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7 )) || \ + (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8 )) || \ + (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9 )) || \ + (((bit)>>5)==10 && (1UL<<((bit)&31) & REQUIRED_MASK10)) || \ + (((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \ + (((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \ + (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \ + (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \ + (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \ + (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK16)) ) #define DISABLED_MASK_BIT_SET(bit) \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \ - (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) ) + ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0 )) || \ + (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1 )) || \ + (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2 )) || \ + (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3 )) || \ + (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4 )) || \ + (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5 )) || \ + (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6 )) || \ + (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7 )) || \ + (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8 )) || \ + (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9 )) || \ + (((bit)>>5)==10 && (1UL<<((bit)&31) & DISABLED_MASK10)) || \ + (((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \ + (((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \ + (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \ + (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \ + (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \ + (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK16)) ) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ @@ -79,6 +94,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ +#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ +#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ + /* * This macro is for detection of features which need kernel * infrastructure to be used. It may *not* directly test the CPU diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0ceb6ad..cbb2c56 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -12,7 +12,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 16 /* N 32-bit words worth of info */ +#define NCAPINTS 17 /* N 32-bit words worth of info */ #define NBUGINTS 1 /* N 32-bit bug flags */ /* diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index f226df0..39343be 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -28,6 +28,14 @@ # define DISABLE_CENTAUR_MCR 0 #endif /* CONFIG_X86_64 */ +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +# define DISABLE_PKU (1<<(X86_FEATURE_PKU)) +# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE)) +#else +# define DISABLE_PKU 0 +# define DISABLE_OSPKE 0 +#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ + /* * Make sure to add features to the correct mask */ @@ -41,5 +49,12 @@ #define DISABLED_MASK7 0 #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_MPX) +#define DISABLED_MASK10 0 +#define DISABLED_MASK11 0 +#define DISABLED_MASK12 0 +#define DISABLED_MASK13 0 +#define DISABLED_MASK14 0 +#define DISABLED_MASK15 0 +#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 5c6e4fb..4916144 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -92,5 +92,12 @@ #define REQUIRED_MASK7 0 #define REQUIRED_MASK8 0 #define REQUIRED_MASK9 0 +#define REQUIRED_MASK10 0 +#define REQUIRED_MASK11 0 +#define REQUIRED_MASK12 0 +#define REQUIRED_MASK13 0 +#define REQUIRED_MASK14 0 +#define REQUIRED_MASK15 0 +#define REQUIRED_MASK16 0 #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f4bb2c4..a719ad7 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -627,6 +627,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_7_0_EBX] = ebx; c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); + c->x86_capability[CPUID_7_ECX] = ecx; } /* Extended state features: level 0x0000000d */ -- cgit v1.1 From f28b49d2bcdb9ef9e771b3d6750f40be9d453316 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:02 -0800 Subject: x86/cpu, x86/mm/pkeys: Define new CR4 bit There is a new bit in CR4 for enabling protection keys. We will actually enable it later in the series. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210202.3CFC3DB2@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/uapi/asm/processor-flags.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 79887ab..567de50 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -118,6 +118,8 @@ #define X86_CR4_SMEP _BITUL(X86_CR4_SMEP_BIT) #define X86_CR4_SMAP_BIT 21 /* enable SMAP support */ #define X86_CR4_SMAP _BITUL(X86_CR4_SMAP_BIT) +#define X86_CR4_PKE_BIT 22 /* enable Protection Keys support */ +#define X86_CR4_PKE _BITUL(X86_CR4_PKE_BIT) /* * x86-64 Task Priority Register, CR8 -- cgit v1.1 From c8df40098451ba18a43f22b563c9129182353158 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:04 -0800 Subject: x86/fpu, x86/mm/pkeys: Add PKRU xsave fields and data structures The protection keys register (PKRU) is saved and restored using xsave. Define the data structure that we will use to access it inside the xsave buffer. Note that we also have to widen the printk of the xsave feature masks since this is feature 0x200 and we only did two characters before. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210204.56DF8F7B@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu/types.h | 11 +++++++++++ arch/x86/include/asm/fpu/xstate.h | 3 ++- arch/x86/kernel/fpu/xstate.c | 7 ++++++- 3 files changed, 19 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index aad3181..36b90bb 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -109,6 +109,7 @@ enum xfeature { XFEATURE_ZMM_Hi256, XFEATURE_Hi16_ZMM, XFEATURE_PT_UNIMPLEMENTED_SO_FAR, + XFEATURE_PKRU, XFEATURE_MAX, }; @@ -121,6 +122,7 @@ enum xfeature { #define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK) #define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256) #define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM) +#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU) #define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE) #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \ @@ -213,6 +215,15 @@ struct avx_512_hi16_state { struct reg_512_bit hi16_zmm[16]; } __packed; +/* + * State component 9: 32-bit PKRU register. The state is + * 8 bytes long but only 4 bytes is used currently. + */ +struct pkru_state { + u32 pkru; + u32 pad; +} __packed; + struct xstate_header { u64 xfeatures; u64 xcomp_bv; diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index af30fde..9994d42 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -28,7 +28,8 @@ XFEATURE_MASK_YMM | \ XFEATURE_MASK_OPMASK | \ XFEATURE_MASK_ZMM_Hi256 | \ - XFEATURE_MASK_Hi16_ZMM) + XFEATURE_MASK_Hi16_ZMM | \ + XFEATURE_MASK_PKRU) /* All currently supported features */ #define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index c2e2349..a63ca80 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -29,6 +29,8 @@ static const char *xfeature_names[] = "AVX-512 Hi256" , "AVX-512 ZMM_Hi256" , "Processor Trace (unused)" , + "Protection Keys User registers", + "unknown xstate feature" , }; /* @@ -58,6 +60,7 @@ void fpu__xstate_clear_all_cpu_caps(void) setup_clear_cpu_cap(X86_FEATURE_AVX512CD); setup_clear_cpu_cap(X86_FEATURE_MPX); setup_clear_cpu_cap(X86_FEATURE_XGETBV1); + setup_clear_cpu_cap(X86_FEATURE_PKU); } /* @@ -236,7 +239,7 @@ static void __init print_xstate_feature(u64 xstate_mask) const char *feature_name; if (cpu_has_xfeatures(xstate_mask, &feature_name)) - pr_info("x86/fpu: Supporting XSAVE feature 0x%02Lx: '%s'\n", xstate_mask, feature_name); + pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name); } /* @@ -252,6 +255,7 @@ static void __init print_xstate_features(void) print_xstate_feature(XFEATURE_MASK_OPMASK); print_xstate_feature(XFEATURE_MASK_ZMM_Hi256); print_xstate_feature(XFEATURE_MASK_Hi16_ZMM); + print_xstate_feature(XFEATURE_MASK_PKRU); } /* @@ -468,6 +472,7 @@ static void check_xstate_against_struct(int nr) XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state); XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state); XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state); + XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state); /* * Make *SURE* to add any feature numbers in below if -- cgit v1.1 From 5c1d90f51027e197e1299ab1235a2fed78910905 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:05 -0800 Subject: x86/mm/pkeys: Add PTE bits for storing protection key Previous documentation has referred to these 4 bits as "ignored". That means that software could have made use of them. But, as far as I know, the kernel never used them. They are still ignored when protection keys is not enabled, so they could theoretically still get used for software purposes. We also implement "empty" versions so that code that references to them can be optimized away by the compiler when the config option is not enabled. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210205.81E33ED6@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_types.h | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 4432ab7..cae10ba 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -20,13 +20,18 @@ #define _PAGE_BIT_SOFTW2 10 /* " */ #define _PAGE_BIT_SOFTW3 11 /* " */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ +#define _PAGE_BIT_SOFTW4 58 /* available for programmer */ +#define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */ +#define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */ +#define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */ +#define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */ +#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ + #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 #define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ -#define _PAGE_BIT_SOFTW4 58 /* available for programmer */ -#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 -#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ +#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 /* If _PAGE_BIT_PRESENT is clear, we use these: */ /* - if the user mapped it with PROT_NONE; pte_present gives true */ @@ -47,6 +52,17 @@ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0) +#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1) +#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2) +#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3) +#else +#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0)) +#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0)) +#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0)) +#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0)) +#endif #define __HAVE_ARCH_PTE_SPECIAL #ifdef CONFIG_KMEMCHECK -- cgit v1.1 From b3ecd51559ae7a8f40b10443773b9cd0e6a50f5e Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:07 -0800 Subject: x86/mm/pkeys: Add new 'PF_PK' page fault error code bit Note: "PK" is how the Intel SDM refers to this bit, so we also use that nomenclature. This only defines the bit, it does not plumb it anywhere to be handled. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210207.DA7B43E6@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index eef44d9..9f72f9c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -33,6 +33,7 @@ * bit 2 == 0: kernel-mode access 1: user-mode access * bit 3 == 1: use of reserved bit detected * bit 4 == 1: fault was an instruction fetch + * bit 5 == 1: protection keys block access */ enum x86_pf_error_code { @@ -41,6 +42,7 @@ enum x86_pf_error_code { PF_USER = 1 << 2, PF_RSVD = 1 << 3, PF_INSTR = 1 << 4, + PF_PK = 1 << 5, }; /* @@ -916,6 +918,12 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) if ((error_code & PF_INSTR) && !pte_exec(*pte)) return 0; + /* + * Note: We do not do lazy flushing on protection key + * changes, so no spurious fault will ever set PF_PK. + */ + if ((error_code & PF_PK)) + return 1; return 1; } -- cgit v1.1 From 63c17fb8e5a46a16e10e82005748837fd11a2024 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:08 -0800 Subject: mm/core, x86/mm/pkeys: Store protection bits in high VMA flags vma->vm_flags is an 'unsigned long', so has space for 32 flags on 32-bit architectures. The high 32 bits are unused on 64-bit platforms. We've steered away from using the unused high VMA bits for things because we would have difficulty supporting it on 32-bit. Protection Keys are not available in 32-bit mode, so there is no concern about supporting this feature in 32-bit mode or on 32-bit CPUs. This patch carves out 4 bits from the high half of vma->vm_flags and allows architectures to set config option to make them available. Sparse complains about these constants unless we explicitly call them "UL". Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dan Williams Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jan Kara Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Linus Torvalds Cc: Mel Gorman Cc: Michal Hocko Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Rik van Riel Cc: Sasha Levin Cc: Valentin Rothberg Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Xie XiuQi Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210208.81AF00D5@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3632cdd..fb2ebeb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -155,6 +155,7 @@ config X86 select VIRT_TO_BUS select X86_DEV_DMA_OPS if X86_64 select X86_FEATURE_NAMES if PROC_FS + select ARCH_USES_HIGH_VMA_FLAGS if X86_INTEL_MEMORY_PROTECTION_KEYS config INSTRUCTION_DECODER def_bool y -- cgit v1.1 From 8f62c883222c9e3c06d60b5e55e307a3d1f18257 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:10 -0800 Subject: x86/mm/pkeys: Add arch-specific VMA protection bits Lots of things seem to do: vma->vm_page_prot = vm_get_page_prot(flags); and the ptes get created right from things we pull out of ->vm_page_prot. So it is very convenient if we can store the protection key in flags and vm_page_prot, just like the existing permission bits (_PAGE_RW/PRESENT). It greatly reduces the amount of plumbing and arch-specific hacking we have to do in generic code. This also takes the new PROT_PKEY{0,1,2,3} flags and turns *those* in to VM_ flags for vma->vm_flags. The protection key values are stored in 4 places: 1. "prot" argument to system calls 2. vma->vm_flags, filled from the mmap "prot" 3. vma->vm_page prot, filled from vma->vm_flags 4. the PTE itself. The pseudocode for these for steps are as follows: mmap(PROT_PKEY*) vma->vm_flags = ... | arch_calc_vm_prot_bits(mmap_prot); vma->vm_page_prot = ... | arch_vm_get_page_prot(vma->vm_flags); pte = pfn | vma->vm_page_prot Note that this provides a new definitions for x86: arch_vm_get_page_prot() Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210210.FE483A42@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mmu_context.h | 11 +++++++++++ arch/x86/include/asm/pgtable_types.h | 12 ++++++++++-- arch/x86/include/uapi/asm/mman.h | 16 ++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index bfd9b2a..94c4c8b 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -275,4 +275,15 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, mpx_notify_unmap(mm, vma, start, end); } +static inline int vma_pkey(struct vm_area_struct *vma) +{ + u16 pkey = 0; +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS + unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | + VM_PKEY_BIT2 | VM_PKEY_BIT3; + pkey = (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; +#endif + return pkey; +} + #endif /* _ASM_X86_MMU_CONTEXT_H */ diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index cae10ba..8c35cf0 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -115,7 +115,12 @@ #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ _PAGE_DIRTY) -/* Set of bits not changed in pte_modify */ +/* + * Set of bits not changed in pte_modify. The pte's + * protection key is treated like _PAGE_RW, for + * instance, and is *not* included in this mask since + * pte_modify() does modify it. + */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ _PAGE_SOFT_DIRTY) @@ -231,7 +236,10 @@ enum page_cache_mode { /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) -/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */ +/* + * Extracts the flags from a (pte|pmd|pud|pgd)val_t + * This includes the protection key value. + */ #define PTE_FLAGS_MASK (~PTE_PFN_MASK) typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h index 513b05f..e8562e0 100644 --- a/arch/x86/include/uapi/asm/mman.h +++ b/arch/x86/include/uapi/asm/mman.h @@ -6,6 +6,22 @@ #define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) #define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT) +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +/* + * Take the 4 protection key bits out of the vma->vm_flags + * value and turn them in to the bits that we can put in + * to a pte. + * + * Only override these if Protection Keys are available + * (which is only on 64-bit). + */ +#define arch_vm_get_page_prot(vm_flags) __pgprot( \ + ((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) | \ + ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \ + ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \ + ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0)) +#endif + #include #endif /* _ASM_X86_MMAN_H */ -- cgit v1.1 From 7b2d0dbac4890c8ca4a8acc57709639fc8b158e9 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:11 -0800 Subject: x86/mm/pkeys: Pass VMA down in to fault signal generation code During a page fault, we look up the VMA to ensure that the fault is in a region with a valid mapping. But, in the top-level page fault code we don't need the VMA for much else. Once we have decided that an access is bad, we are going to send a signal no matter what and do not need the VMA any more. So we do not pass it down in to the signal generation code. But, for protection keys, we need the VMA. It tells us *which* protection key we violated if we get a PF_PK. So, we need to pass the VMA down and fill in siginfo->si_pkey. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210211.AD3B36A3@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 50 ++++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9f72f9c..3c51c66 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -171,7 +171,8 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) static void force_sig_info_fault(int si_signo, int si_code, unsigned long address, - struct task_struct *tsk, int fault) + struct task_struct *tsk, struct vm_area_struct *vma, + int fault) { unsigned lsb = 0; siginfo_t info; @@ -656,6 +657,8 @@ no_context(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; unsigned long flags; int sig; + /* No context means no VMA to pass down */ + struct vm_area_struct *vma = NULL; /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) { @@ -679,7 +682,8 @@ no_context(struct pt_regs *regs, unsigned long error_code, tsk->thread.cr2 = address; /* XXX: hwpoison faults will set the wrong code. */ - force_sig_info_fault(signal, si_code, address, tsk, 0); + force_sig_info_fault(signal, si_code, address, + tsk, vma, 0); } /* @@ -756,7 +760,8 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address, int si_code) + unsigned long address, struct vm_area_struct *vma, + int si_code) { struct task_struct *tsk = current; @@ -799,7 +804,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_PF; - force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); + force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); return; } @@ -812,14 +817,14 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, static noinline void bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address) + unsigned long address, struct vm_area_struct *vma) { - __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); + __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); } static void __bad_area(struct pt_regs *regs, unsigned long error_code, - unsigned long address, int si_code) + unsigned long address, struct vm_area_struct *vma, int si_code) { struct mm_struct *mm = current->mm; @@ -829,25 +834,25 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, */ up_read(&mm->mmap_sem); - __bad_area_nosemaphore(regs, error_code, address, si_code); + __bad_area_nosemaphore(regs, error_code, address, vma, si_code); } static noinline void bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) { - __bad_area(regs, error_code, address, SEGV_MAPERR); + __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); } static noinline void bad_area_access_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address) + unsigned long address, struct vm_area_struct *vma) { - __bad_area(regs, error_code, address, SEGV_ACCERR); + __bad_area(regs, error_code, address, vma, SEGV_ACCERR); } static void do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, - unsigned int fault) + struct vm_area_struct *vma, unsigned int fault) { struct task_struct *tsk = current; int code = BUS_ADRERR; @@ -874,12 +879,13 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, code = BUS_MCEERR_AR; } #endif - force_sig_info_fault(SIGBUS, code, address, tsk, fault); + force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); } static noinline void mm_fault_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address, unsigned int fault) + unsigned long address, struct vm_area_struct *vma, + unsigned int fault) { if (fatal_signal_pending(current) && !(error_code & PF_USER)) { no_context(regs, error_code, address, 0, 0); @@ -903,9 +909,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| VM_FAULT_HWPOISON_LARGE)) - do_sigbus(regs, error_code, address, fault); + do_sigbus(regs, error_code, address, vma, fault); else if (fault & VM_FAULT_SIGSEGV) - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, vma); else BUG(); } @@ -1119,7 +1125,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock: */ - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, NULL); return; } @@ -1132,7 +1138,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, pgtable_bad(regs, error_code, address); if (unlikely(smap_violation(error_code, regs))) { - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, NULL); return; } @@ -1141,7 +1147,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * in a region with pagefaults disabled then we must not take the fault */ if (unlikely(faulthandler_disabled() || !mm)) { - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, NULL); return; } @@ -1185,7 +1191,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (unlikely(!down_read_trylock(&mm->mmap_sem))) { if ((error_code & PF_USER) == 0 && !search_exception_tables(regs->ip)) { - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, NULL); return; } retry: @@ -1233,7 +1239,7 @@ retry: */ good_area: if (unlikely(access_error(error_code, vma))) { - bad_area_access_error(regs, error_code, address); + bad_area_access_error(regs, error_code, address, vma); return; } @@ -1271,7 +1277,7 @@ good_area: up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { - mm_fault_error(regs, error_code, address, fault); + mm_fault_error(regs, error_code, address, vma, fault); return; } -- cgit v1.1 From b376cd0256f86db3078409dc51963b315c7843d8 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Wed, 17 Feb 2016 10:17:03 -0800 Subject: signals, ia64, mips: Update arch-specific siginfos with pkeys field ia64 and mips have separate definitions for siginfo from the generic one. Patch them to have the pkey fields. Note that this is exactly what we did for MPX as well. [ This fixes a compile error that Ingo was hitting with MIPS when the x86 pkeys patch set is applied. ] Signed-off-by: Dave Hansen Cc: Dave Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Petr Malat Cc: Ralf Baechle Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-ia64@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160217181703.E99B6656@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/ia64/include/uapi/asm/siginfo.h | 13 +++++++++---- arch/mips/include/uapi/asm/siginfo.h | 13 +++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h index bce9bc1..0151cfa 100644 --- a/arch/ia64/include/uapi/asm/siginfo.h +++ b/arch/ia64/include/uapi/asm/siginfo.h @@ -63,10 +63,15 @@ typedef struct siginfo { unsigned int _flags; /* see below */ unsigned long _isr; /* isr */ short _addr_lsb; /* lsb of faulting address */ - struct { - void __user *_lower; - void __user *_upper; - } _addr_bnd; + union { + /* used when si_code=SEGV_BNDERR */ + struct { + void __user *_lower; + void __user *_upper; + } _addr_bnd; + /* used when si_code=SEGV_PKUERR */ + u64 _pkey; + }; } _sigfault; /* SIGPOLL */ diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 2cb7fde..6f4edf0 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -86,10 +86,15 @@ typedef struct siginfo { int _trapno; /* TRAP # which caused the signal */ #endif short _addr_lsb; - struct { - void __user *_lower; - void __user *_upper; - } _addr_bnd; + union { + /* used when si_code=SEGV_BNDERR */ + struct { + void __user *_lower; + void __user *_upper; + } _addr_bnd; + /* used when si_code=SEGV_PKUERR */ + u64 _pkey; + }; } _sigfault; /* SIGPOLL, SIGXFSZ (To do ...) */ -- cgit v1.1 From 019132ff3daf36c97a4006655dfd00ee42f2b590 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:14 -0800 Subject: x86/mm/pkeys: Fill in pkey field in siginfo This fills in the new siginfo field: si_pkey to indicate to userspace which protection key was set on the PTE that we faulted on. Note though that *ALL* protection key faults have to be generated by a valid, present PTE at some point. But this code does no PTE lookups which seeds odd. The reason is that we take advantage of the way we generate PTEs from VMAs. All PTEs under a VMA share some attributes. For instance, they are _all_ either PROT_READ *OR* PROT_NONE. They also always share a protection key, so we never have to walk the page tables; we just use the VMA. Note that _pkey is a 64-bit value. The current hardware only supports 4-bit protection keys. We do this because there is _plenty_ of space in _sigfault and it is possible that future processors would support more than 4 bits of protection keys. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210213.ABC488FA@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_types.h | 5 +++ arch/x86/mm/fault.c | 64 +++++++++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 8c35cf0..7b5efe2 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -65,6 +65,11 @@ #endif #define __HAVE_ARCH_PTE_SPECIAL +#define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \ + _PAGE_PKEY_BIT1 | \ + _PAGE_PKEY_BIT2 | \ + _PAGE_PKEY_BIT3) + #ifdef CONFIG_KMEMCHECK #define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) #else diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 3c51c66..6e71dcf 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -15,12 +15,14 @@ #include /* exception_enter(), ... */ #include /* faulthandler_disabled() */ +#include /* boot_cpu_has, ... */ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ #include /* kmemcheck_*(), ... */ #include /* VSYSCALL_ADDR */ #include /* emulate_vsyscall */ #include /* struct vm86 */ +#include /* vma_pkey() */ #define CREATE_TRACE_POINTS #include @@ -169,6 +171,56 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) return prefetch; } +/* + * A protection key fault means that the PKRU value did not allow + * access to some PTE. Userspace can figure out what PKRU was + * from the XSAVE state, and this function fills out a field in + * siginfo so userspace can discover which protection key was set + * on the PTE. + * + * If we get here, we know that the hardware signaled a PF_PK + * fault and that there was a VMA once we got in the fault + * handler. It does *not* guarantee that the VMA we find here + * was the one that we faulted on. + * + * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); + * 2. T1 : set PKRU to deny access to pkey=4, touches page + * 3. T1 : faults... + * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); + * 5. T1 : enters fault handler, takes mmap_sem, etc... + * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really + * faulted on a pte with its pkey=4. + */ +static void fill_sig_info_pkey(int si_code, siginfo_t *info, + struct vm_area_struct *vma) +{ + /* This is effectively an #ifdef */ + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return; + + /* Fault not from Protection Keys: nothing to do */ + if (si_code != SEGV_PKUERR) + return; + /* + * force_sig_info_fault() is called from a number of + * contexts, some of which have a VMA and some of which + * do not. The PF_PK handing happens after we have a + * valid VMA, so we should never reach this without a + * valid VMA. + */ + if (!vma) { + WARN_ONCE(1, "PKU fault with no VMA passed in"); + info->si_pkey = 0; + return; + } + /* + * si_pkey should be thought of as a strong hint, but not + * absolutely guranteed to be 100% accurate because of + * the race explained above. + */ + info->si_pkey = vma_pkey(vma); +} + static void force_sig_info_fault(int si_signo, int si_code, unsigned long address, struct task_struct *tsk, struct vm_area_struct *vma, @@ -187,6 +239,8 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, lsb = PAGE_SHIFT; info.si_addr_lsb = lsb; + fill_sig_info_pkey(si_code, &info, vma); + force_sig_info(si_signo, &info, tsk); } @@ -847,7 +901,15 @@ static noinline void bad_area_access_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct vm_area_struct *vma) { - __bad_area(regs, error_code, address, vma, SEGV_ACCERR); + /* + * This OSPKE check is not strictly necessary at runtime. + * But, doing it this way allows compiler optimizations + * if pkeys are compiled out. + */ + if (boot_cpu_has(X86_FEATURE_OSPKE) && (error_code & PF_PK)) + __bad_area(regs, error_code, address, vma, SEGV_PKUERR); + else + __bad_area(regs, error_code, address, vma, SEGV_ACCERR); } static void -- cgit v1.1 From a927cb83f3300bcb1ae18672e58029acddd18b33 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:15 -0800 Subject: x86/mm/pkeys: Add functions to fetch PKRU This adds the raw instruction to access PKRU as well as some accessor functions that correctly handle when the CPU does not support the instruction. We don't use it here, but we will use read_pkru() in the next patch. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210215.15238D34@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable.h | 8 ++++++++ arch/x86/include/asm/special_insns.h | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 0687c47..e997dcc 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -99,6 +99,14 @@ static inline int pte_dirty(pte_t pte) return pte_flags(pte) & _PAGE_DIRTY; } + +static inline u32 read_pkru(void) +{ + if (boot_cpu_has(X86_FEATURE_OSPKE)) + return __read_pkru(); + return 0; +} + static inline int pte_young(pte_t pte) { return pte_flags(pte) & _PAGE_ACCESSED; diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 2270e41..aee6e76 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -98,6 +98,28 @@ static inline void native_write_cr8(unsigned long val) } #endif +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +static inline u32 __read_pkru(void) +{ + u32 ecx = 0; + u32 edx, pkru; + + /* + * "rdpkru" instruction. Places PKRU contents in to EAX, + * clears EDX and requires that ecx=0. + */ + asm volatile(".byte 0x0f,0x01,0xee\n\t" + : "=a" (pkru), "=d" (edx) + : "c" (ecx)); + return pkru; +} +#else +static inline u32 __read_pkru(void) +{ + return 0; +} +#endif + static inline void native_wbinvd(void) { asm volatile("wbinvd": : :"memory"); -- cgit v1.1 From 1874f6895c92d991ccf85edcc55a0d9dd552d71c Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:18 -0800 Subject: x86/mm/gup: Simplify get_user_pages() PTE bit handling The current get_user_pages() code is a wee bit more complicated than it needs to be for pte bit checking. Currently, it establishes a mask of required pte _PAGE_* bits and ensures that the pte it goes after has all those bits. This consolidates the three identical copies of this code. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210218.3A2D4045@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/gup.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index ce5e454..2f0a329 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -75,6 +75,24 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) } /* + * 'pteval' can come from a pte, pmd or pud. We only check + * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the + * same value on all 3 types. + */ +static inline int pte_allows_gup(unsigned long pteval, int write) +{ + unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER; + + if (write) + need_pte_bits |= _PAGE_RW; + + if ((pteval & need_pte_bits) != need_pte_bits) + return 0; + + return 1; +} + +/* * The performance critical leaf functions are made noinline otherwise gcc * inlines everything into a single function which results in too much * register pressure. @@ -83,14 +101,9 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; - unsigned long mask; int nr_start = *nr; pte_t *ptep; - mask = _PAGE_PRESENT|_PAGE_USER; - if (write) - mask |= _PAGE_RW; - ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); @@ -110,7 +123,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, pte_unmap(ptep); return 0; } - } else if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { + } else if (!pte_allows_gup(pte_val(pte), write) || + pte_special(pte)) { pte_unmap(ptep); return 0; } @@ -164,14 +178,10 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { - unsigned long mask; struct page *head, *page; int refs; - mask = _PAGE_PRESENT|_PAGE_USER; - if (write) - mask |= _PAGE_RW; - if ((pmd_flags(pmd) & mask) != mask) + if (!pte_allows_gup(pmd_val(pmd), write)) return 0; VM_BUG_ON(!pfn_valid(pmd_pfn(pmd))); @@ -231,14 +241,10 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, static noinline int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { - unsigned long mask; struct page *head, *page; int refs; - mask = _PAGE_PRESENT|_PAGE_USER; - if (write) - mask |= _PAGE_RW; - if ((pud_flags(pud) & mask) != mask) + if (!pte_allows_gup(pud_val(pud), write)) return 0; /* hugepages are never "special" */ VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL); -- cgit v1.1 From 33a709b25a760b91184bb335cf7d7c32b8123013 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:19 -0800 Subject: mm/gup, x86/mm/pkeys: Check VMAs and PTEs for protection keys Today, for normal faults and page table walks, we check the VMA and/or PTE to ensure that it is compatible with the action. For instance, if we get a write fault on a non-writeable VMA, we SIGSEGV. We try to do the same thing for protection keys. Basically, we try to make sure that if a user does this: mprotect(ptr, size, PROT_NONE); *ptr = foo; they see the same effects with protection keys when they do this: mprotect(ptr, size, PROT_READ|PROT_WRITE); set_pkey(ptr, size, 4); wrpkru(0xffffff3f); // access disable pkey 4 *ptr = foo; The state to do that checking is in the VMA, but we also sometimes have to do it on the page tables only, like when doing a get_user_pages_fast() where we have no VMA. We add two functions and expose them to generic code: arch_pte_access_permitted(pte_flags, write) arch_vma_access_permitted(vma, write) These are, of course, backed up in x86 arch code with checks against the PTE or VMA's protection key. But, there are also cases where we do not want to respect protection keys. When we ptrace(), for instance, we do not want to apply the tracer's PKRU permissions to the PTEs from the process being traced. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Alexey Kardashevskiy Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Boaz Harrosh Cc: Borislav Petkov Cc: Brian Gerst Cc: Dan Williams Cc: Dave Hansen Cc: David Gibson Cc: David Hildenbrand Cc: David Vrabel Cc: Denys Vlasenko Cc: Dominik Dingel Cc: Dominik Vogt Cc: Guan Xuetao Cc: H. Peter Anvin Cc: Heiko Carstens Cc: Hugh Dickins Cc: Jason Low Cc: Jerome Marchand Cc: Juergen Gross Cc: Kirill A. Shutemov Cc: Laurent Dufour Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michael Ellerman Cc: Michal Hocko Cc: Mikulas Patocka Cc: Minchan Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Sasha Levin Cc: Shachar Raindel Cc: Stephen Smalley Cc: Toshi Kani Cc: Vlastimil Babka Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/20160212210219.14D5D715@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/mmu_context.h | 11 +++++++ arch/s390/include/asm/mmu_context.h | 11 +++++++ arch/unicore32/include/asm/mmu_context.h | 11 +++++++ arch/x86/include/asm/mmu_context.h | 49 ++++++++++++++++++++++++++++++++ arch/x86/include/asm/pgtable.h | 29 +++++++++++++++++++ arch/x86/mm/fault.c | 21 +++++++++++++- arch/x86/mm/gup.c | 5 ++++ 7 files changed, 136 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 878c277..a0f1838 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -148,5 +148,16 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write) +{ + /* by default, allow everything */ + return true; +} + +static inline bool arch_pte_access_permitted(pte_t pte, bool write) +{ + /* by default, allow everything */ + return true; +} #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index fb1b93e..2627b33 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -130,4 +130,15 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write) +{ + /* by default, allow everything */ + return true; +} + +static inline bool arch_pte_access_permitted(pte_t pte, bool write) +{ + /* by default, allow everything */ + return true; +} #endif /* __S390_MMU_CONTEXT_H */ diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index 1cb5220..3133f94 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -97,4 +97,15 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write) +{ + /* by default, allow everything */ + return true; +} + +static inline bool arch_pte_access_permitted(pte_t pte, bool write) +{ + /* by default, allow everything */ + return true; +} #endif diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 94c4c8b..19036cd 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -286,4 +286,53 @@ static inline int vma_pkey(struct vm_area_struct *vma) return pkey; } +static inline bool __pkru_allows_pkey(u16 pkey, bool write) +{ + u32 pkru = read_pkru(); + + if (!__pkru_allows_read(pkru, pkey)) + return false; + if (write && !__pkru_allows_write(pkru, pkey)) + return false; + + return true; +} + +/* + * We only want to enforce protection keys on the current process + * because we effectively have no access to PKRU for other + * processes or any way to tell *which * PKRU in a threaded + * process we could use. + * + * So do not enforce things if the VMA is not from the current + * mm, or if we are in a kernel thread. + */ +static inline bool vma_is_foreign(struct vm_area_struct *vma) +{ + if (!current->mm) + return true; + /* + * Should PKRU be enforced on the access to this VMA? If + * the VMA is from another process, then PKRU has no + * relevance and should not be enforced. + */ + if (current->mm != vma->vm_mm) + return true; + + return false; +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write) +{ + /* allow access if the VMA is not one from this process */ + if (vma_is_foreign(vma)) + return true; + return __pkru_allows_pkey(vma_pkey(vma), write); +} + +static inline bool arch_pte_access_permitted(pte_t pte, bool write) +{ + return __pkru_allows_pkey(pte_flags_pkey(pte_flags(pte)), write); +} + #endif /* _ASM_X86_MMU_CONTEXT_H */ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index e997dcc..3cbfae8 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -919,6 +919,35 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) } #endif +#define PKRU_AD_BIT 0x1 +#define PKRU_WD_BIT 0x2 + +static inline bool __pkru_allows_read(u32 pkru, u16 pkey) +{ + int pkru_pkey_bits = pkey * 2; + return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits)); +} + +static inline bool __pkru_allows_write(u32 pkru, u16 pkey) +{ + int pkru_pkey_bits = pkey * 2; + /* + * Access-disable disables writes too so we need to check + * both bits here. + */ + return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits)); +} + +static inline u16 pte_flags_pkey(unsigned long pte_flags) +{ +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS + /* ifdef to avoid doing 59-bit shift on 32-bit values */ + return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0; +#else + return 0; +#endif +} + #include #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6e71dcf..319331a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -897,6 +897,16 @@ bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); } +static inline bool bad_area_access_from_pkeys(unsigned long error_code, + struct vm_area_struct *vma) +{ + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return false; + if (error_code & PF_PK) + return true; + return false; +} + static noinline void bad_area_access_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct vm_area_struct *vma) @@ -906,7 +916,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, * But, doing it this way allows compiler optimizations * if pkeys are compiled out. */ - if (boot_cpu_has(X86_FEATURE_OSPKE) && (error_code & PF_PK)) + if (bad_area_access_from_pkeys(error_code, vma)) __bad_area(regs, error_code, address, vma, SEGV_PKUERR); else __bad_area(regs, error_code, address, vma, SEGV_ACCERR); @@ -1081,6 +1091,15 @@ int show_unhandled_signals = 1; static inline int access_error(unsigned long error_code, struct vm_area_struct *vma) { + /* + * Access or read was blocked by protection keys. We do + * this check before any others because we do not want + * to, for instance, confuse a protection-key-denied + * write with one for which we should do a COW. + */ + if (error_code & PF_PK) + return 1; + if (error_code & PF_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 2f0a329..bab259e 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -11,6 +11,7 @@ #include #include +#include #include static inline pte_t gup_get_pte(pte_t *ptep) @@ -89,6 +90,10 @@ static inline int pte_allows_gup(unsigned long pteval, int write) if ((pteval & need_pte_bits) != need_pte_bits) return 0; + /* Check memory protection keys permissions. */ + if (!__pkru_allows_pkey(pte_flags_pkey(pteval), write)) + return 0; + return 1; } -- cgit v1.1 From 9d95b1759e0504890049deb2de62e31d7c241c30 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 18 Feb 2016 10:35:57 -0800 Subject: um, pkeys: Add UML arch_*_access_permitted() methods UML has a special mmu_context.h and needs updates whenever the generic one is updated. Signed-off-by: Dave Hansen Cc: Dave Hansen Cc: Jeff Dike Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Richard Weinberger Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Cc: user-mode-linux-devel@lists.sourceforge.net Cc: user-mode-linux-user@lists.sourceforge.net Link: http://lkml.kernel.org/r/20160218183557.AE1DB383@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/um/include/asm/mmu_context.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch') diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index 941527e..1a60e13 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -27,6 +27,20 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, struct vm_area_struct *vma) { } + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, bool foreign) +{ + /* by default, allow everything */ + return true; +} + +static inline bool arch_pte_access_permitted(pte_t pte, bool write) +{ + /* by default, allow everything */ + return true; +} + /* * end asm-generic/mm_hooks.h functions */ -- cgit v1.1 From 1b2ee1266ea647713dbaf44825967c180dfc8d76 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:21 -0800 Subject: mm/core: Do not enforce PKEY permissions on remote mm access We try to enforce protection keys in software the same way that we do in hardware. (See long example below). But, we only want to do this when accessing our *own* process's memory. If GDB set PKRU[6].AD=1 (disable access to PKEY 6), then tried to PTRACE_POKE a target process which just happened to have some mprotect_pkey(pkey=6) memory, we do *not* want to deny the debugger access to that memory. PKRU is fundamentally a thread-local structure and we do not want to enforce it on access to _another_ thread's data. This gets especially tricky when we have workqueues or other delayed-work mechanisms that might run in a random process's context. We can check that we only enforce pkeys when operating on our *own* mm, but delayed work gets performed when a random user context is active. We might end up with a situation where a delayed-work gup fails when running randomly under its "own" task but succeeds when running under another process. We want to avoid that. To avoid that, we use the new GUP flag: FOLL_REMOTE and add a fault flag: FAULT_FLAG_REMOTE. They indicate that we are walking an mm which is not guranteed to be the same as current->mm and should not be subject to protection key enforcement. Thanks to Jerome Glisse for pointing out this scenario. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Alexey Kardashevskiy Cc: Andrea Arcangeli Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Boaz Harrosh Cc: Borislav Petkov Cc: Brian Gerst Cc: Dan Williams Cc: Dave Chinner Cc: Dave Hansen Cc: David Gibson Cc: Denys Vlasenko Cc: Dominik Dingel Cc: Dominik Vogt Cc: Eric B Munson Cc: Geliang Tang Cc: Guan Xuetao Cc: H. Peter Anvin Cc: Heiko Carstens Cc: Hugh Dickins Cc: Jan Kara Cc: Jason Low Cc: Jerome Marchand Cc: Joerg Roedel Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Laurent Dufour Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michael Ellerman Cc: Michal Hocko Cc: Mikulas Patocka Cc: Minchan Kim Cc: Oleg Nesterov Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Sasha Levin Cc: Shachar Raindel Cc: Vlastimil Babka Cc: Xie XiuQi Cc: iommu@lists.linux-foundation.org Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/mmu_context.h | 3 ++- arch/s390/include/asm/mmu_context.h | 3 ++- arch/unicore32/include/asm/mmu_context.h | 3 ++- arch/x86/include/asm/mmu_context.h | 5 +++-- 4 files changed, 9 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index a0f1838..df9bf3e 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -148,7 +148,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } -static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write) +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool foreign) { /* by default, allow everything */ return true; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 2627b33..8906600 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -130,7 +130,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } -static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write) +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool foreign) { /* by default, allow everything */ return true; diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index 3133f94..e35632e 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -97,7 +97,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } -static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write) +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool foreign) { /* by default, allow everything */ return true; diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 19036cd..b4d939a 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -322,10 +322,11 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma) return false; } -static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write) +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool foreign) { /* allow access if the VMA is not one from this process */ - if (vma_is_foreign(vma)) + if (foreign || vma_is_foreign(vma)) return true; return __pkru_allows_pkey(vma_pkey(vma), write); } -- cgit v1.1 From 07f146f53e8de826e4afa3a88ea65bdb13c24959 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:22 -0800 Subject: x86/mm/pkeys: Optimize fault handling in access_error() We might not strictly have to make modifictions to access_error() to check the VMA here. If we do not, we will do this: 1. app sets VMA pkey to K 2. app touches a !present page 3. do_page_fault(), allocates and maps page, sets pte.pkey=K 4. return to userspace 5. touch instruction reexecutes, but triggers PF_PK 6. do PKEY signal What happens with this patch applied: 1. app sets VMA pkey to K 2. app touches a !present page 3. do_page_fault() notices that K is inaccessible 4. do PKEY signal We basically skip the fault that does an allocation. So what this lets us do is protect areas from even being *populated* unless it is accessible according to protection keys. That seems handy to me and makes protection keys work more like an mprotect()'d mapping. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210222.EBB63D8C@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 319331a..68ecdff 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -900,10 +900,16 @@ bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) static inline bool bad_area_access_from_pkeys(unsigned long error_code, struct vm_area_struct *vma) { + /* This code is always called on the current mm */ + bool foreign = false; + if (!boot_cpu_has(X86_FEATURE_OSPKE)) return false; if (error_code & PF_PK) return true; + /* this checks permission keys on the VMA: */ + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + return true; return false; } @@ -1091,6 +1097,8 @@ int show_unhandled_signals = 1; static inline int access_error(unsigned long error_code, struct vm_area_struct *vma) { + /* This is only called for the current mm, so: */ + bool foreign = false; /* * Access or read was blocked by protection keys. We do * this check before any others because we do not want @@ -1099,6 +1107,13 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) */ if (error_code & PF_PK) return 1; + /* + * Make sure to check the VMA so that we do not perform + * faults just to hit a PF_PK as soon as we fill in a + * page. + */ + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + return 1; if (error_code & PF_WRITE) { /* write, present and write, not present: */ -- cgit v1.1 From d61172b4b695b821388cdb6088a41d431bcbb93b Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:24 -0800 Subject: mm/core, x86/mm/pkeys: Differentiate instruction fetches As discussed earlier, we attempt to enforce protection keys in software. However, the code checks all faults to ensure that they are not violating protection key permissions. It was assumed that all faults are either write faults where we check PKRU[key].WD (write disable) or read faults where we check the AD (access disable) bit. But, there is a third category of faults for protection keys: instruction faults. Instruction faults never run afoul of protection keys because they do not affect instruction fetches. So, plumb the PF_INSTR bit down in to the arch_vma_access_permitted() function where we do the protection key checks. We also add a new FAULT_FLAG_INSTRUCTION. This is because handle_mm_fault() is not passed the architecture-specific error_code where we keep PF_INSTR, so we need to encode the instruction fetch information in to the arch-generic fault flags. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210224.96928009@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/mmu_context.h | 2 +- arch/s390/include/asm/mmu_context.h | 2 +- arch/x86/include/asm/mmu_context.h | 5 ++++- arch/x86/mm/fault.c | 8 ++++++-- 4 files changed, 12 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index df9bf3e..4eaab40 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -149,7 +149,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool foreign) + bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 8906600..fa66b6d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -131,7 +131,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool foreign) + bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index b4d939a..6572b94 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -323,8 +323,11 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma) } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool foreign) + bool write, bool execute, bool foreign) { + /* pkeys never affect instruction fetches */ + if (execute) + return true; /* allow access if the VMA is not one from this process */ if (foreign || vma_is_foreign(vma)) return true; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 68ecdff..d81744e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -908,7 +908,8 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, if (error_code & PF_PK) return true; /* this checks permission keys on the VMA: */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), + (error_code & PF_INSTR), foreign)) return true; return false; } @@ -1112,7 +1113,8 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) * faults just to hit a PF_PK as soon as we fill in a * page. */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), + (error_code & PF_INSTR), foreign)) return 1; if (error_code & PF_WRITE) { @@ -1267,6 +1269,8 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (error_code & PF_WRITE) flags |= FAULT_FLAG_WRITE; + if (error_code & PF_INSTR) + flags |= FAULT_FLAG_INSTRUCTION; /* * When running in the kernel we expect faults to occur only to -- cgit v1.1 From c0b17b5bd4b7b98e7c6b67c9f69343b64711271b Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:25 -0800 Subject: x86/mm/pkeys: Dump PKRU with other kernel registers Protection Keys never affect kernel mappings. But, they can affect whether the kernel will fault when it touches a user mapping. The kernel doesn't touch user mappings without some careful choreography and these accesses don't generally result in oopses. But, if one does, we definitely want to have PKRU available so we can figure out if protection keys played a role. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210225.BF0D4482@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/process_64.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index b9d99e0..776229e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -116,6 +116,8 @@ void __show_regs(struct pt_regs *regs, int all) printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); + if (boot_cpu_has(X86_FEATURE_OSPKE)) + printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru()); } void release_thread(struct task_struct *dead_task) -- cgit v1.1 From c1192f8428414679c8126180e690f8daa1d4d98a Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:27 -0800 Subject: x86/mm/pkeys: Dump pkey from VMA in /proc/pid/smaps The protection key can now be just as important as read/write permissions on a VMA. We need some debug mechanism to help figure out if it is in play. smaps seems like a logical place to expose it. arch/x86/kernel/setup.c is a bit of a weirdo place to put this code, but it already had seq_file.h and there was not a much better existing place to put it. We also use no #ifdef. If protection keys is .config'd out we will effectively get the same function as if we used the weak generic function. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Al Viro Cc: Andrew Morton Cc: Andy Lutomirski Cc: Baoquan He Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Young Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jerome Marchand Cc: Jiri Kosina Cc: Joerg Roedel Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Laurent Dufour Cc: Linus Torvalds Cc: Mark Salter Cc: Mark Williamson Cc: Michal Hocko Cc: Naoya Horiguchi Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Rik van Riel Cc: Vlastimil Babka Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210227.4F8EB3F8@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d3d80e6..7260f99 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -112,6 +112,7 @@ #include #include #include +#include /* * max_low_pfn_mapped: highest direct mapped pfn under 4GB @@ -1282,3 +1283,11 @@ static int __init register_kernel_offset_dumper(void) return 0; } __initcall(register_kernel_offset_dumper); + +void arch_show_smap(struct seq_file *m, struct vm_area_struct *vma) +{ + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return; + + seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); +} -- cgit v1.1 From 284244a9876225eb73102aff41d4492f65cb2868 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:28 -0800 Subject: x86/mm/pkeys: Add Kconfig prompt to existing config option I don't have a strong opinion on whether we need this or not. Protection Keys has relatively little code associated with it, and it is not a heavyweight feature to keep enabled. However, I can imagine that folks would still appreciate being able to disable it. Here's the option if folks want it. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210228.7E79386C@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fb2ebeb..b875434 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1716,8 +1716,18 @@ config X86_INTEL_MPX If unsure, say N. config X86_INTEL_MEMORY_PROTECTION_KEYS + prompt "Intel Memory Protection Keys" def_bool y + # Note: only available in 64-bit mode depends on CPU_SUP_INTEL && X86_64 + ---help--- + Memory Protection Keys provides a mechanism for enforcing + page-based protections, but without requiring modification of the + page tables when an application changes protection domains. + + For details, see Documentation/x86/protection-keys.txt + + If unsure, say y. config EFI bool "EFI runtime service support" -- cgit v1.1 From 0697694564c84f4c9320e5d103d0191297a20023 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:29 -0800 Subject: x86/mm/pkeys: Actually enable Memory Protection Keys in the CPU This sets the bit in 'cr4' to actually enable the protection keys feature. We also include a boot-time disable for the feature "nopku". Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE cpuid bit to appear set. At this point in boot, identify_cpu() has already run the actual CPUID instructions and populated the "cpu features" structures. We need to go back and re-run identify_cpu() to make sure it gets updated values. We *could* simply re-populate the 11th word of the cpuid data, but this is probably quick enough. Also note that with the cpu_has() check and X86_FEATURE_PKU present in disabled-features.h, we do not need an #ifdef for setup_pku(). Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210229.6708027C@viggo.jf.intel.com [ Small readability edits. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a719ad7..4fac263 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -304,6 +304,48 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) } /* + * Protection Keys are not available in 32-bit mode. + */ +static bool pku_disabled; + +static __always_inline void setup_pku(struct cpuinfo_x86 *c) +{ + if (!cpu_has(c, X86_FEATURE_PKU)) + return; + if (pku_disabled) + return; + + cr4_set_bits(X86_CR4_PKE); + /* + * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE + * cpuid bit to be set. We need to ensure that we + * update that bit in this CPU's "cpu_info". + */ + get_cpu_cap(c); +} + +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +static __init int setup_disable_pku(char *arg) +{ + /* + * Do not clear the X86_FEATURE_PKU bit. All of the + * runtime checks are against OSPKE so clearing the + * bit does nothing. + * + * This way, we will see "pku" in cpuinfo, but not + * "ospke", which is exactly what we want. It shows + * that the CPU has PKU, but the OS has not enabled it. + * This happens to be exactly how a system would look + * if we disabled the config option. + */ + pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); + pku_disabled = true; + return 1; +} +__setup("nopku", setup_disable_pku); +#endif /* CONFIG_X86_64 */ + +/* * Some CPU features depend on higher CPUID levels, which may not always * be available due to CPUID level capping or broken virtualization * software. Add those features to this table to auto-disable them. @@ -960,6 +1002,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) init_hypervisor(c); x86_init_rdrand(c); x86_init_cache_qos(c); + setup_pku(c); /* * Clear/Set all flags overriden by options, need do it -- cgit v1.1 From e6bfb70959a0ca6ddedb29e779a293c6f71ed0e7 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:31 -0800 Subject: mm/core, arch, powerpc: Pass a protection key in to calc_vm_flag_bits() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This plumbs a protection key through calc_vm_flag_bits(). We could have done this in calc_vm_prot_bits(), but I did not feel super strongly which way to go. It was pretty arbitrary which one to use. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrea Arcangeli Cc: Andrew Morton Cc: Andy Lutomirski Cc: Arve Hjønnevåg Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Brian Gerst Cc: Chen Gang Cc: Dan Williams Cc: Dave Chinner Cc: Dave Hansen Cc: David Airlie Cc: Denys Vlasenko Cc: Eric W. Biederman Cc: Geliang Tang Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Leon Romanovsky Cc: Linus Torvalds Cc: Masahiro Yamada Cc: Maxime Coquelin Cc: Mel Gorman Cc: Michael Ellerman Cc: Oleg Nesterov Cc: Paul Gortmaker Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Riley Andrews Cc: Vladimir Davydov Cc: devel@driverdev.osuosl.org Cc: linux-api@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/20160212210231.E6F1F0D6@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/mman.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 8565c25..2563c43 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -18,11 +18,12 @@ * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits() * here. How important is the optimization? */ -static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) +static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, + unsigned long pkey) { return (prot & PROT_SAO) ? VM_SAO : 0; } -#define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) +#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { -- cgit v1.1 From 66d375709d2c891acc639538fd3179fa0cbb0daf Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:32 -0800 Subject: mm/core, x86/mm/pkeys: Add arch_validate_pkey() The syscall-level code is passed a protection key and need to return an appropriate error code if the protection key is bogus. We will be using this in subsequent patches. Note that this also begins a series of arch-specific calls that we need to expose in otherwise arch-independent code. We create a linux/pkeys.h header where we will put *all* the stubs for these functions. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210232.774EEAAB@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + arch/x86/include/asm/pkeys.h | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 arch/x86/include/asm/pkeys.h (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b875434..eda18ce 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -156,6 +156,7 @@ config X86 select X86_DEV_DMA_OPS if X86_64 select X86_FEATURE_NAMES if PROC_FS select ARCH_USES_HIGH_VMA_FLAGS if X86_INTEL_MEMORY_PROTECTION_KEYS + select ARCH_HAS_PKEYS if X86_INTEL_MEMORY_PROTECTION_KEYS config INSTRUCTION_DECODER def_bool y diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h new file mode 100644 index 0000000..04243c2 --- /dev/null +++ b/arch/x86/include/asm/pkeys.h @@ -0,0 +1,6 @@ +#ifndef _ASM_X86_PKEYS_H +#define _ASM_X86_PKEYS_H + +#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) + +#endif /*_ASM_X86_PKEYS_H */ -- cgit v1.1 From 39a0526fb3f7d93433d146304278477eb463f8af Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:34 -0800 Subject: x86/mm: Factor out LDT init from context init The arch-specific mm_context_t is a great place to put protection-key allocation state. But, we need to initialize the allocation state because pkey 0 is always "allocated". All of the runtime initialization of mm_context_t is done in *_ldt() manipulation functions. This renames the existing LDT functions like this: init_new_context() -> init_new_context_ldt() destroy_context() -> destroy_context_ldt() and makes init_new_context() and destroy_context() available for generic use. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210234.DB34FCC5@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mmu_context.h | 21 ++++++++++++++++----- arch/x86/kernel/ldt.c | 4 ++-- 2 files changed, 18 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 6572b94..8428002 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -52,15 +52,15 @@ struct ldt_struct { /* * Used for LDT copy/destruction. */ -int init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void destroy_context(struct mm_struct *mm); +int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); +void destroy_context_ldt(struct mm_struct *mm); #else /* CONFIG_MODIFY_LDT_SYSCALL */ -static inline int init_new_context(struct task_struct *tsk, - struct mm_struct *mm) +static inline int init_new_context_ldt(struct task_struct *tsk, + struct mm_struct *mm) { return 0; } -static inline void destroy_context(struct mm_struct *mm) {} +static inline void destroy_context_ldt(struct mm_struct *mm) {} #endif static inline void load_mm_ldt(struct mm_struct *mm) @@ -104,6 +104,17 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) #endif } +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + init_new_context_ldt(tsk, mm); + return 0; +} +static inline void destroy_context(struct mm_struct *mm) +{ + destroy_context_ldt(mm); +} + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 6acc9dd..6707039 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -103,7 +103,7 @@ static void free_ldt_struct(struct ldt_struct *ldt) * we do not have to muck with descriptors here, that is * done in switch_mm() as needed. */ -int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) { struct ldt_struct *new_ldt; struct mm_struct *old_mm; @@ -144,7 +144,7 @@ out_unlock: * * 64bit: Don't touch the LDT register - we're already in the next thread. */ -void destroy_context(struct mm_struct *mm) +void destroy_context_ldt(struct mm_struct *mm) { free_ldt_struct(mm->context.ldt); mm->context.ldt = NULL; -- cgit v1.1 From b8b9b6ba9dec3f155c7555cb208ba4078e97aedb Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:35 -0800 Subject: x86/fpu: Allow setting of XSAVE state We want to modify the Protection Key rights inside the kernel, so we need to change PKRU's contents. But, if we do a plain 'wrpkru', when we return to userspace we might do an XRSTOR and wipe out the kernel's 'wrpkru'. So, we need to go after PKRU in the xsave buffer. We do this by: 1. Ensuring that we have the XSAVE registers (fpregs) in the kernel FPU buffer (fpstate) 2. Looking up the location of a given state in the buffer 3. Filling in the stat 4. Ensuring that the hardware knows that state is present there (basically that the 'init optimization' is not in place). 5. Copying the newly-modified state back to the registers if necessary. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Quentin Casasnovas Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210235.5A3139BF@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu/internal.h | 2 + arch/x86/kernel/fpu/core.c | 63 ++++++++++++++++++++++++ arch/x86/kernel/fpu/xstate.c | 98 ++++++++++++++++++++++++++++++++++++- 3 files changed, 161 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index a212434..31ac8e6 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -25,6 +25,8 @@ extern void fpu__activate_curr(struct fpu *fpu); extern void fpu__activate_fpstate_read(struct fpu *fpu); extern void fpu__activate_fpstate_write(struct fpu *fpu); +extern void fpu__current_fpstate_write_begin(void); +extern void fpu__current_fpstate_write_end(void); extern void fpu__save(struct fpu *fpu); extern void fpu__restore(struct fpu *fpu); extern int fpu__restore_sig(void __user *buf, int ia32_frame); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 299b58b..dea8e76 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -354,6 +354,69 @@ void fpu__activate_fpstate_write(struct fpu *fpu) } /* + * This function must be called before we write the current + * task's fpstate. + * + * This call gets the current FPU register state and moves + * it in to the 'fpstate'. Preemption is disabled so that + * no writes to the 'fpstate' can occur from context + * swiches. + * + * Must be followed by a fpu__current_fpstate_write_end(). + */ +void fpu__current_fpstate_write_begin(void) +{ + struct fpu *fpu = ¤t->thread.fpu; + + /* + * Ensure that the context-switching code does not write + * over the fpstate while we are doing our update. + */ + preempt_disable(); + + /* + * Move the fpregs in to the fpu's 'fpstate'. + */ + fpu__activate_fpstate_read(fpu); + + /* + * The caller is about to write to 'fpu'. Ensure that no + * CPU thinks that its fpregs match the fpstate. This + * ensures we will not be lazy and skip a XRSTOR in the + * future. + */ + fpu->last_cpu = -1; +} + +/* + * This function must be paired with fpu__current_fpstate_write_begin() + * + * This will ensure that the modified fpstate gets placed back in + * the fpregs if necessary. + * + * Note: This function may be called whether or not an _actual_ + * write to the fpstate occurred. + */ +void fpu__current_fpstate_write_end(void) +{ + struct fpu *fpu = ¤t->thread.fpu; + + /* + * 'fpu' now has an updated copy of the state, but the + * registers may still be out of date. Update them with + * an XRSTOR if they are active. + */ + if (fpregs_active()) + copy_kernel_to_fpregs(&fpu->state); + + /* + * Our update is done and the fpregs/fpstate are in sync + * if necessary. Context switches can happen again. + */ + preempt_enable(); +} + +/* * 'fpu__restore()' is called to copy FPU registers from * the FPU fpstate to the live hw registers and to activate * access to the hardware registers, so that FPU instructions diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index a63ca80..30d144f 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -679,6 +679,19 @@ void fpu__resume_cpu(void) } /* + * Given an xstate feature mask, calculate where in the xsave + * buffer the state is. Callers should ensure that the buffer + * is valid. + * + * Note: does not work for compacted buffers. + */ +void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) +{ + int feature_nr = fls64(xstate_feature_mask) - 1; + + return (void *)xsave + xstate_comp_offsets[feature_nr]; +} +/* * Given the xsave area and a state inside, this function returns the * address of the state. * @@ -698,7 +711,6 @@ void fpu__resume_cpu(void) */ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature) { - int feature_nr = fls64(xstate_feature) - 1; /* * Do we even *have* xsave state? */ @@ -726,7 +738,7 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature) if (!(xsave->header.xfeatures & xstate_feature)) return NULL; - return (void *)xsave + xstate_comp_offsets[feature_nr]; + return __raw_xsave_addr(xsave, xstate_feature); } EXPORT_SYMBOL_GPL(get_xsave_addr); @@ -761,3 +773,85 @@ const void *get_xsave_field_ptr(int xsave_state) return get_xsave_addr(&fpu->state.xsave, xsave_state); } + + +/* + * Set xfeatures (aka XSTATE_BV) bit for a feature that we want + * to take out of its "init state". This will ensure that an + * XRSTOR actually restores the state. + */ +static void fpu__xfeature_set_non_init(struct xregs_state *xsave, + int xstate_feature_mask) +{ + xsave->header.xfeatures |= xstate_feature_mask; +} + +/* + * This function is safe to call whether the FPU is in use or not. + * + * Note that this only works on the current task. + * + * Inputs: + * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP, + * XFEATURE_MASK_SSE, etc...) + * @xsave_state_ptr: a pointer to a copy of the state that you would + * like written in to the current task's FPU xsave state. This pointer + * must not be located in the current tasks's xsave area. + * Output: + * address of the state in the xsave area or NULL if the state + * is not present or is in its 'init state'. + */ +static void fpu__xfeature_set_state(int xstate_feature_mask, + void *xstate_feature_src, size_t len) +{ + struct xregs_state *xsave = ¤t->thread.fpu.state.xsave; + struct fpu *fpu = ¤t->thread.fpu; + void *dst; + + if (!boot_cpu_has(X86_FEATURE_XSAVE)) { + WARN_ONCE(1, "%s() attempted with no xsave support", __func__); + return; + } + + /* + * Tell the FPU code that we need the FPU state to be in + * 'fpu' (not in the registers), and that we need it to + * be stable while we write to it. + */ + fpu__current_fpstate_write_begin(); + + /* + * This method *WILL* *NOT* work for compact-format + * buffers. If the 'xstate_feature_mask' is unset in + * xcomp_bv then we may need to move other feature state + * "up" in the buffer. + */ + if (xsave->header.xcomp_bv & xstate_feature_mask) { + WARN_ON_ONCE(1); + goto out; + } + + /* find the location in the xsave buffer of the desired state */ + dst = __raw_xsave_addr(&fpu->state.xsave, xstate_feature_mask); + + /* + * Make sure that the pointer being passed in did not + * come from the xsave buffer itself. + */ + WARN_ONCE(xstate_feature_src == dst, "set from xsave buffer itself"); + + /* put the caller-provided data in the location */ + memcpy(dst, xstate_feature_src, len); + + /* + * Mark the xfeature so that the CPU knows there is state + * in the buffer now. + */ + fpu__xfeature_set_non_init(xsave, xstate_feature_mask); +out: + /* + * We are done writing to the 'fpu'. Reenable preeption + * and (possibly) move the fpstate back in to the fpregs. + */ + fpu__current_fpstate_write_end(); +} -- cgit v1.1 From 8459429693395ca9e8d18101300b120ad9171795 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:36 -0800 Subject: x86/mm/pkeys: Allow kernel to modify user pkey rights register The Protection Key Rights for User memory (PKRU) is a 32-bit user-accessible register. It contains two bits for each protection key: one to write-disable (WD) access to memory covered by the key and another to access-disable (AD). Userspace can read/write the register with the RDPKRU and WRPKRU instructions. But, the register is saved and restored with the XSAVE family of instructions, which means we have to treat it like a floating point register. The kernel needs to write to the register if it wants to implement execute-only memory or if it implements a system call to change PKRU. To do this, we need to create a 'pkru_state' buffer, read the old contents in to it, modify it, and then tell the FPU code that there is modified data in there so it can (possibly) move the buffer back in to the registers. This uses the fpu__xfeature_set_state() function that we defined in the previous patch. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210236.0BE13217@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable.h | 5 +-- arch/x86/include/asm/pkeys.h | 3 ++ arch/x86/kernel/fpu/xstate.c | 74 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3cbfae8..1ff49ec 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -921,16 +921,17 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) #define PKRU_AD_BIT 0x1 #define PKRU_WD_BIT 0x2 +#define PKRU_BITS_PER_PKEY 2 static inline bool __pkru_allows_read(u32 pkru, u16 pkey) { - int pkru_pkey_bits = pkey * 2; + int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits)); } static inline bool __pkru_allows_write(u32 pkru, u16 pkey) { - int pkru_pkey_bits = pkey * 2; + int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; /* * Access-disable disables writes too so we need to check * both bits here. diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 04243c2..5061aec 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -3,4 +3,7 @@ #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) +extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + unsigned long init_val); + #endif /*_ASM_X86_PKEYS_H */ diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 30d144f..50813c3 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -5,6 +5,7 @@ */ #include #include +#include #include #include @@ -855,3 +856,76 @@ out: */ fpu__current_fpstate_write_end(); } + +#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) +#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) + +/* + * This will go out and modify the XSAVE buffer so that PKRU is + * set to a particular state for access to 'pkey'. + * + * PKRU state does affect kernel access to user memory. We do + * not modfiy PKRU *itself* here, only the XSAVE state that will + * be restored in to PKRU when we return back to userspace. + */ +int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + unsigned long init_val) +{ + struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; + struct pkru_state *old_pkru_state; + struct pkru_state new_pkru_state; + int pkey_shift = (pkey * PKRU_BITS_PER_PKEY); + u32 new_pkru_bits = 0; + + if (!validate_pkey(pkey)) + return -EINVAL; + /* + * This check implies XSAVE support. OSPKE only gets + * set if we enable XSAVE and we enable PKU in XCR0. + */ + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return -EINVAL; + + /* Set the bits we need in PKRU */ + if (init_val & PKEY_DISABLE_ACCESS) + new_pkru_bits |= PKRU_AD_BIT; + if (init_val & PKEY_DISABLE_WRITE) + new_pkru_bits |= PKRU_WD_BIT; + + /* Shift the bits in to the correct place in PKRU for pkey. */ + new_pkru_bits <<= pkey_shift; + + /* Locate old copy of the state in the xsave buffer */ + old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU); + + /* + * When state is not in the buffer, it is in the init + * state, set it manually. Otherwise, copy out the old + * state. + */ + if (!old_pkru_state) + new_pkru_state.pkru = 0; + else + new_pkru_state.pkru = old_pkru_state->pkru; + + /* mask off any old bits in place */ + new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift); + /* Set the newly-requested bits */ + new_pkru_state.pkru |= new_pkru_bits; + + /* + * We could theoretically live without zeroing pkru.pad. + * The current XSAVE feature state definition says that + * only bytes 0->3 are used. But we do not want to + * chance leaking kernel stack out to userspace in case a + * memcpy() of the whole xsave buffer was done. + * + * They're in the same cacheline anyway. + */ + new_pkru_state.pad = 0; + + fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, + sizeof(new_pkru_state)); + + return 0; +} -- cgit v1.1 From 878ba03932d757ce4e954db4defec74a0de0435b Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:37 -0800 Subject: x86/mm/pkeys: Create an x86 arch_calc_vm_prot_bits() for VMA flags calc_vm_prot_bits() takes PROT_{READ,WRITE,EXECUTE} bits and turns them in to the vma->vm_flags/VM_* bits. We need to do a similar thing for protection keys. We take a protection key (4 bits) and encode it in to the 4 VM_PKEY_* bits. Note: this code is not new. It was simply a part of the mprotect_pkey() patch in the past. I broke it out for use in the execute-only support. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210237.CFB94AD5@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/uapi/asm/mman.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h index e8562e0..39bca7f 100644 --- a/arch/x86/include/uapi/asm/mman.h +++ b/arch/x86/include/uapi/asm/mman.h @@ -20,6 +20,12 @@ ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \ ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \ ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0)) + +#define arch_calc_vm_prot_bits(prot, key) ( \ + ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \ + ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \ + ((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \ + ((key) & 0x8 ? VM_PKEY_BIT3 : 0)) #endif #include -- cgit v1.1 From 62b5f7d013fc455b8db26cf01e421f4c0d264b92 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:40 -0800 Subject: mm/core, x86/mm/pkeys: Add execute-only protection keys support Protection keys provide new page-based protection in hardware. But, they have an interesting attribute: they only affect data accesses and never affect instruction fetches. That means that if we set up some memory which is set as "access-disabled" via protection keys, we can still execute from it. This patch uses protection keys to set up mappings to do just that. If a user calls: mmap(..., PROT_EXEC); or mprotect(ptr, sz, PROT_EXEC); (note PROT_EXEC-only without PROT_READ/WRITE), the kernel will notice this, and set a special protection key on the memory. It also sets the appropriate bits in the Protection Keys User Rights (PKRU) register so that the memory becomes unreadable and unwritable. I haven't found any userspace that does this today. With this facility in place, we expect userspace to move to use it eventually. Userspace _could_ start doing this today. Any PROT_EXEC calls get converted to PROT_READ inside the kernel, and would transparently be upgraded to "true" PROT_EXEC with this code. IOW, userspace never has to do any PROT_EXEC runtime detection. This feature provides enhanced protection against leaking executable memory contents. This helps thwart attacks which are attempting to find ROP gadgets on the fly. But, the security provided by this approach is not comprehensive. The PKRU register which controls access permissions is a normal user register writable from unprivileged userspace. An attacker who can execute the 'wrpkru' instruction can easily disable the protection provided by this feature. The protection key that is used for execute-only support is permanently dedicated at compile time. This is fine for now because there is currently no API to set a protection key other than this one. Despite there being a constant PKRU value across the entire system, we do not set it unless this feature is in use in a process. That is to preserve the PKRU XSAVE 'init state', which can lead to faster context switches. PKRU *is* a user register and the kernel is modifying it. That means that code doing: pkru = rdpkru() pkru |= 0x100; mmap(..., PROT_EXEC); wrpkru(pkru); could lose the bits in PKRU that enforce execute-only permissions. To avoid this, we suggest avoiding ever calling mmap() or mprotect() when the PKRU value is expected to be unstable. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrea Arcangeli Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Chen Gang Cc: Dan Williams Cc: Dave Chinner Cc: Dave Hansen Cc: David Hildenbrand Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Linus Torvalds Cc: Mel Gorman Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Piotr Kwapulinski Cc: Rik van Riel Cc: Stephen Smalley Cc: Vladimir Murzin Cc: Will Deacon Cc: keescook@google.com Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210240.CB4BB5CA@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pkeys.h | 25 +++++++++++ arch/x86/kernel/fpu/xstate.c | 2 - arch/x86/mm/Makefile | 2 + arch/x86/mm/fault.c | 10 +++++ arch/x86/mm/pkeys.c | 101 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 138 insertions(+), 2 deletions(-) create mode 100644 arch/x86/mm/pkeys.c (limited to 'arch') diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 5061aec..7b84565 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -6,4 +6,29 @@ extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val); +/* + * Try to dedicate one of the protection keys to be used as an + * execute-only protection key. + */ +#define PKEY_DEDICATED_EXECUTE_ONLY 15 +extern int __execute_only_pkey(struct mm_struct *mm); +static inline int execute_only_pkey(struct mm_struct *mm) +{ + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return 0; + + return __execute_only_pkey(mm); +} + +extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma, + int prot, int pkey); +static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, + int prot, int pkey) +{ + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return 0; + + return __arch_override_mprotect_pkey(vma, prot, pkey); +} + #endif /*_ASM_X86_PKEYS_H */ diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 50813c3..1b19818 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -877,8 +877,6 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, int pkey_shift = (pkey * PKRU_BITS_PER_PKEY); u32 new_pkru_bits = 0; - if (!validate_pkey(pkey)) - return -EINVAL; /* * This check implies XSAVE support. OSPKE only gets * set if we enable XSAVE and we enable PKU in XCR0. diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index f9d38a4..67cf2e1 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -34,3 +34,5 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_X86_INTEL_MPX) += mpx.o +obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o + diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index d81744e..5877b92 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1108,6 +1108,16 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) */ if (error_code & PF_PK) return 1; + + if (!(error_code & PF_INSTR)) { + /* + * Assume all accesses require either read or execute + * permissions. This is not an instruction access, so + * it requires read permissions. + */ + if (!(vma->vm_flags & VM_READ)) + return 1; + } /* * Make sure to check the VMA so that we do not perform * faults just to hit a PF_PK as soon as we fill in a diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c new file mode 100644 index 0000000..e8c4744 --- /dev/null +++ b/arch/x86/mm/pkeys.c @@ -0,0 +1,101 @@ +/* + * Intel Memory Protection Keys management + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include /* mm_struct, vma, etc... */ +#include /* PKEY_* */ +#include + +#include /* boot_cpu_has, ... */ +#include /* vma_pkey() */ +#include /* fpregs_active() */ + +int __execute_only_pkey(struct mm_struct *mm) +{ + int ret; + + /* + * We do not want to go through the relatively costly + * dance to set PKRU if we do not need to. Check it + * first and assume that if the execute-only pkey is + * write-disabled that we do not have to set it + * ourselves. We need preempt off so that nobody + * can make fpregs inactive. + */ + preempt_disable(); + if (fpregs_active() && + !__pkru_allows_read(read_pkru(), PKEY_DEDICATED_EXECUTE_ONLY)) { + preempt_enable(); + return PKEY_DEDICATED_EXECUTE_ONLY; + } + preempt_enable(); + ret = arch_set_user_pkey_access(current, PKEY_DEDICATED_EXECUTE_ONLY, + PKEY_DISABLE_ACCESS); + /* + * If the PKRU-set operation failed somehow, just return + * 0 and effectively disable execute-only support. + */ + if (ret) + return 0; + + return PKEY_DEDICATED_EXECUTE_ONLY; +} + +static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) +{ + /* Do this check first since the vm_flags should be hot */ + if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) + return false; + if (vma_pkey(vma) != PKEY_DEDICATED_EXECUTE_ONLY) + return false; + + return true; +} + +/* + * This is only called for *plain* mprotect calls. + */ +int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey) +{ + /* + * Is this an mprotect_pkey() call? If so, never + * override the value that came from the user. + */ + if (pkey != -1) + return pkey; + /* + * Look for a protection-key-drive execute-only mapping + * which is now being given permissions that are not + * execute-only. Move it back to the default pkey. + */ + if (vma_is_pkey_exec_only(vma) && + (prot & (PROT_READ|PROT_WRITE))) { + return 0; + } + /* + * The mapping is execute-only. Go try to get the + * execute-only protection key. If we fail to do that, + * fall through as if we do not have execute-only + * support. + */ + if (prot == PROT_EXEC) { + pkey = execute_only_pkey(vma->vm_mm); + if (pkey > 0) + return pkey; + } + /* + * This is a vanilla, non-pkey mprotect (or we failed to + * setup execute-only), inherit the pkey from the VMA we + * are working on. + */ + return vma_pkey(vma); +} -- cgit v1.1 From e21555436f196c241503c7c6240272e57783235c Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 1 Mar 2016 11:41:33 -0800 Subject: x86/mm/pkeys: Fix access_error() denial of writes to write-only VMA Andrey Wagin reported that a simple test case was broken by: 2b5f7d013fc ("mm/core, x86/mm/pkeys: Add execute-only protection keys support") This test case creates an unreadable VMA and my patch assumed that all writes must be to readable VMAs. The simplest fix for this is to remove the pkey-related bits in access_error(). For execute-only support, I believe the existing version is sufficient because the permissions we are trying to enforce are entirely expressed in vma->vm_flags. We just depend on pkeys to get *an* exception, it does not matter that PF_PK was set, or even what state PKRU is in. I will re-add the necessary bits with the full pkeys implementation that includes the new syscalls. The three cases that matter are: 1. If a write to an execute-only VMA occurs, we will see PF_WRITE set, but !VM_WRITE on the VMA, and return 1. All execute-only VMAs have VM_WRITE clear by definition. 2. If a read occurs on a present PTE, we will fall in to the "read, present" case and return 1. 3. If a read occurs to a non-present PTE, we will miss the "read, not present" case, because the execute-only VMA will have VM_EXEC set, and we will properly return 0 allowing the PTE to be populated. Test program: int main() { int *p; p = mmap(NULL, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); p[0] = 1; return 0; } Reported-by: Andrey Wagin , Signed-off-by: Dave Hansen Acked-by: Kirill A. Shutemov Cc: Dave Hansen Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Cc: linux-next@vger.kernel.org Fixes: 62b5f7d013fc ("mm/core, x86/mm/pkeys: Add execute-only protection keys support") Link: http://lkml.kernel.org/r/20160301194133.65D0110C@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 5877b92..6138db4 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1101,24 +1101,6 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) /* This is only called for the current mm, so: */ bool foreign = false; /* - * Access or read was blocked by protection keys. We do - * this check before any others because we do not want - * to, for instance, confuse a protection-key-denied - * write with one for which we should do a COW. - */ - if (error_code & PF_PK) - return 1; - - if (!(error_code & PF_INSTR)) { - /* - * Assume all accesses require either read or execute - * permissions. This is not an instruction access, so - * it requires read permissions. - */ - if (!(vma->vm_flags & VM_READ)) - return 1; - } - /* * Make sure to check the VMA so that we do not perform * faults just to hit a PF_PK as soon as we fill in a * page. -- cgit v1.1 From 49cd53bf14aeb471c4a2682300dfc05ef2fd09f2 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 1 Mar 2016 04:54:51 -0800 Subject: mm/pkeys: Fix siginfo ABI breakage caused by new u64 field Stephen Rothwell reported this linux-next build failure: http://lkml.kernel.org/r/20160226164406.065a1ffc@canb.auug.org.au ... caused by the Memory Protection Keys patches from the tip tree triggering a newly introduced build-time sanity check on an ARM build, because they changed the ABI of siginfo in an unexpected way. If u64 has a natural alignment of 8 bytes (which is the case on most mainstream platforms, with the notable exception of x86-32), then the leadup to the _sifields union matters: typedef struct siginfo { int si_signo; int si_errno; int si_code; union { ... } _sifields; } __ARCH_SI_ATTRIBUTES siginfo_t; Note how the first 3 fields give us 12 bytes, so _sifields is not 8 naturally bytes aligned. Before the _pkey field addition the largest element of _sifields (on 32-bit platforms) was 32 bits. With the u64 added, the minimum alignment requirement increased to 8 bytes on those (rare) 32-bit platforms. Thus GCC padded the space after si_code with 4 extra bytes, and shifted all _sifields offsets by 4 bytes - breaking the ABI of all of those remaining fields. On 64-bit platforms this problem was hidden due to _sifields already having numerous fields with natural 8 bytes alignment (pointers). To fix this, we replace the u64 with an '__u32'. The __u32 does not increase the minimum alignment requirement of the union, and it is also large enough to store the 16-bit pkey we have today on x86. Reported-by: Stehen Rothwell Signed-off-by: Dave Hansen Acked-by: Stehen Rothwell Cc: Andrew Morton Cc: Dave Hansen Cc: Helge Deller Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-next@vger.kernel.org Fixes: cd0ea35ff551 ("signals, pkeys: Notify userspace about protection key faults") Link: http://lkml.kernel.org/r/20160301125451.02C7426D@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/ia64/include/uapi/asm/siginfo.h | 2 +- arch/mips/include/uapi/asm/siginfo.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h index 0151cfa..f72bf01 100644 --- a/arch/ia64/include/uapi/asm/siginfo.h +++ b/arch/ia64/include/uapi/asm/siginfo.h @@ -70,7 +70,7 @@ typedef struct siginfo { void __user *_upper; } _addr_bnd; /* used when si_code=SEGV_PKUERR */ - u64 _pkey; + __u32 _pkey; }; } _sigfault; diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 6f4edf0..cc49dc2 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -93,7 +93,7 @@ typedef struct siginfo { void __user *_upper; } _addr_bnd; /* used when si_code=SEGV_PKUERR */ - u64 _pkey; + __u32 _pkey; }; } _sigfault; -- cgit v1.1 From 0d47638f80a02b15869f1fe1fc09e5bf996750fd Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 10 Mar 2016 14:12:13 -0800 Subject: x86/mm/pkeys: Fix mismerge of protection keys CPUID bits Kirill Shutemov pointed this out to me. The tip tree currently has commit: dfb4a70f2 [x86/cpufeature, x86/mm/pkeys: Add protection keys related CPUID definitions] whioch added support for two new CPUID bits: X86_FEATURE_PKU and X86_FEATURE_OSPKE. But, those bits were mis-merged and put in cpufeature.h instead of cpufeatures.h. This didn't cause any breakage *except* it keeps the "ospke" and "pku" bits from showing up in cpuinfo. Now cpuinfo has the two new flags: flags : ... pku ospke BTW, is it really wise to have cpufeature.h and cpufeatures.h? It seems like they can only cause confusion and mahem with tab completion. Reported-by: Kirill A. Shutemov Signed-off-by: Dave Hansen Acked-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20160310221213.06F9DB53@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeature.h | 4 ---- arch/x86/include/asm/cpufeatures.h | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 50e292a..3636ec0 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -94,10 +94,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ -#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ -#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ - /* * This macro is for detection of features which need kernel * infrastructure to be used. It may *not* directly test the CPU diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index cbb2c56..89949a2 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -270,6 +270,10 @@ #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ +#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ +#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ + /* * BUG word(s) */ -- cgit v1.1