From d4edcf0d56958db0aca0196314ca38a5e730ea92 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:01:56 -0800 Subject: mm/gup: Switch all callers of get_user_pages() to not pass tsk/mm We will soon modify the vanilla get_user_pages() so it can no longer be used on mm/tasks other than 'current/current->mm', which is by far the most common way it is called. For now, we allow the old-style calls, but warn when they are used. (implemented in previous patch) This patch switches all callers of: get_user_pages() get_user_pages_unlocked() get_user_pages_locked() to stop passing tsk/mm so they will no longer see the warnings. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrea Arcangeli Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Naoya Horiguchi Cc: Peter Zijlstra Cc: Rik van Riel Cc: Srikar Dronamraju Cc: Vlastimil Babka Cc: jack@suse.cz Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210156.113E9407@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/gup.c | 2 +- arch/x86/mm/mpx.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 6d5eb59..ce5e454 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -422,7 +422,7 @@ slow_irqon: start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(current, mm, start, + ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, write, 0, pages); diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index b2fd67d..84fa4a4 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -546,8 +546,8 @@ static int mpx_resolve_fault(long __user *addr, int write) int nr_pages = 1; int force = 0; - gup_ret = get_user_pages(current, current->mm, (unsigned long)addr, - nr_pages, write, force, NULL, NULL); + gup_ret = get_user_pages((unsigned long)addr, nr_pages, write, + force, NULL, NULL); /* * get_user_pages() returns number of pages gotten. * 0 means we failed to fault in and get anything, -- cgit v1.1 From b3ecd51559ae7a8f40b10443773b9cd0e6a50f5e Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:07 -0800 Subject: x86/mm/pkeys: Add new 'PF_PK' page fault error code bit Note: "PK" is how the Intel SDM refers to this bit, so we also use that nomenclature. This only defines the bit, it does not plumb it anywhere to be handled. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210207.DA7B43E6@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index eef44d9..9f72f9c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -33,6 +33,7 @@ * bit 2 == 0: kernel-mode access 1: user-mode access * bit 3 == 1: use of reserved bit detected * bit 4 == 1: fault was an instruction fetch + * bit 5 == 1: protection keys block access */ enum x86_pf_error_code { @@ -41,6 +42,7 @@ enum x86_pf_error_code { PF_USER = 1 << 2, PF_RSVD = 1 << 3, PF_INSTR = 1 << 4, + PF_PK = 1 << 5, }; /* @@ -916,6 +918,12 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) if ((error_code & PF_INSTR) && !pte_exec(*pte)) return 0; + /* + * Note: We do not do lazy flushing on protection key + * changes, so no spurious fault will ever set PF_PK. + */ + if ((error_code & PF_PK)) + return 1; return 1; } -- cgit v1.1 From 7b2d0dbac4890c8ca4a8acc57709639fc8b158e9 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:11 -0800 Subject: x86/mm/pkeys: Pass VMA down in to fault signal generation code During a page fault, we look up the VMA to ensure that the fault is in a region with a valid mapping. But, in the top-level page fault code we don't need the VMA for much else. Once we have decided that an access is bad, we are going to send a signal no matter what and do not need the VMA any more. So we do not pass it down in to the signal generation code. But, for protection keys, we need the VMA. It tells us *which* protection key we violated if we get a PF_PK. So, we need to pass the VMA down and fill in siginfo->si_pkey. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210211.AD3B36A3@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 50 ++++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 22 deletions(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9f72f9c..3c51c66 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -171,7 +171,8 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) static void force_sig_info_fault(int si_signo, int si_code, unsigned long address, - struct task_struct *tsk, int fault) + struct task_struct *tsk, struct vm_area_struct *vma, + int fault) { unsigned lsb = 0; siginfo_t info; @@ -656,6 +657,8 @@ no_context(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; unsigned long flags; int sig; + /* No context means no VMA to pass down */ + struct vm_area_struct *vma = NULL; /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) { @@ -679,7 +682,8 @@ no_context(struct pt_regs *regs, unsigned long error_code, tsk->thread.cr2 = address; /* XXX: hwpoison faults will set the wrong code. */ - force_sig_info_fault(signal, si_code, address, tsk, 0); + force_sig_info_fault(signal, si_code, address, + tsk, vma, 0); } /* @@ -756,7 +760,8 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address, int si_code) + unsigned long address, struct vm_area_struct *vma, + int si_code) { struct task_struct *tsk = current; @@ -799,7 +804,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_PF; - force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); + force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); return; } @@ -812,14 +817,14 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, static noinline void bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address) + unsigned long address, struct vm_area_struct *vma) { - __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); + __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); } static void __bad_area(struct pt_regs *regs, unsigned long error_code, - unsigned long address, int si_code) + unsigned long address, struct vm_area_struct *vma, int si_code) { struct mm_struct *mm = current->mm; @@ -829,25 +834,25 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, */ up_read(&mm->mmap_sem); - __bad_area_nosemaphore(regs, error_code, address, si_code); + __bad_area_nosemaphore(regs, error_code, address, vma, si_code); } static noinline void bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) { - __bad_area(regs, error_code, address, SEGV_MAPERR); + __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); } static noinline void bad_area_access_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address) + unsigned long address, struct vm_area_struct *vma) { - __bad_area(regs, error_code, address, SEGV_ACCERR); + __bad_area(regs, error_code, address, vma, SEGV_ACCERR); } static void do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, - unsigned int fault) + struct vm_area_struct *vma, unsigned int fault) { struct task_struct *tsk = current; int code = BUS_ADRERR; @@ -874,12 +879,13 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, code = BUS_MCEERR_AR; } #endif - force_sig_info_fault(SIGBUS, code, address, tsk, fault); + force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); } static noinline void mm_fault_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address, unsigned int fault) + unsigned long address, struct vm_area_struct *vma, + unsigned int fault) { if (fatal_signal_pending(current) && !(error_code & PF_USER)) { no_context(regs, error_code, address, 0, 0); @@ -903,9 +909,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| VM_FAULT_HWPOISON_LARGE)) - do_sigbus(regs, error_code, address, fault); + do_sigbus(regs, error_code, address, vma, fault); else if (fault & VM_FAULT_SIGSEGV) - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, vma); else BUG(); } @@ -1119,7 +1125,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock: */ - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, NULL); return; } @@ -1132,7 +1138,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, pgtable_bad(regs, error_code, address); if (unlikely(smap_violation(error_code, regs))) { - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, NULL); return; } @@ -1141,7 +1147,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * in a region with pagefaults disabled then we must not take the fault */ if (unlikely(faulthandler_disabled() || !mm)) { - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, NULL); return; } @@ -1185,7 +1191,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (unlikely(!down_read_trylock(&mm->mmap_sem))) { if ((error_code & PF_USER) == 0 && !search_exception_tables(regs->ip)) { - bad_area_nosemaphore(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address, NULL); return; } retry: @@ -1233,7 +1239,7 @@ retry: */ good_area: if (unlikely(access_error(error_code, vma))) { - bad_area_access_error(regs, error_code, address); + bad_area_access_error(regs, error_code, address, vma); return; } @@ -1271,7 +1277,7 @@ good_area: up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { - mm_fault_error(regs, error_code, address, fault); + mm_fault_error(regs, error_code, address, vma, fault); return; } -- cgit v1.1 From 019132ff3daf36c97a4006655dfd00ee42f2b590 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:14 -0800 Subject: x86/mm/pkeys: Fill in pkey field in siginfo This fills in the new siginfo field: si_pkey to indicate to userspace which protection key was set on the PTE that we faulted on. Note though that *ALL* protection key faults have to be generated by a valid, present PTE at some point. But this code does no PTE lookups which seeds odd. The reason is that we take advantage of the way we generate PTEs from VMAs. All PTEs under a VMA share some attributes. For instance, they are _all_ either PROT_READ *OR* PROT_NONE. They also always share a protection key, so we never have to walk the page tables; we just use the VMA. Note that _pkey is a 64-bit value. The current hardware only supports 4-bit protection keys. We do this because there is _plenty_ of space in _sigfault and it is possible that future processors would support more than 4 bits of protection keys. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210213.ABC488FA@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 3c51c66..6e71dcf 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -15,12 +15,14 @@ #include /* exception_enter(), ... */ #include /* faulthandler_disabled() */ +#include /* boot_cpu_has, ... */ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ #include /* kmemcheck_*(), ... */ #include /* VSYSCALL_ADDR */ #include /* emulate_vsyscall */ #include /* struct vm86 */ +#include /* vma_pkey() */ #define CREATE_TRACE_POINTS #include @@ -169,6 +171,56 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) return prefetch; } +/* + * A protection key fault means that the PKRU value did not allow + * access to some PTE. Userspace can figure out what PKRU was + * from the XSAVE state, and this function fills out a field in + * siginfo so userspace can discover which protection key was set + * on the PTE. + * + * If we get here, we know that the hardware signaled a PF_PK + * fault and that there was a VMA once we got in the fault + * handler. It does *not* guarantee that the VMA we find here + * was the one that we faulted on. + * + * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); + * 2. T1 : set PKRU to deny access to pkey=4, touches page + * 3. T1 : faults... + * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); + * 5. T1 : enters fault handler, takes mmap_sem, etc... + * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really + * faulted on a pte with its pkey=4. + */ +static void fill_sig_info_pkey(int si_code, siginfo_t *info, + struct vm_area_struct *vma) +{ + /* This is effectively an #ifdef */ + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return; + + /* Fault not from Protection Keys: nothing to do */ + if (si_code != SEGV_PKUERR) + return; + /* + * force_sig_info_fault() is called from a number of + * contexts, some of which have a VMA and some of which + * do not. The PF_PK handing happens after we have a + * valid VMA, so we should never reach this without a + * valid VMA. + */ + if (!vma) { + WARN_ONCE(1, "PKU fault with no VMA passed in"); + info->si_pkey = 0; + return; + } + /* + * si_pkey should be thought of as a strong hint, but not + * absolutely guranteed to be 100% accurate because of + * the race explained above. + */ + info->si_pkey = vma_pkey(vma); +} + static void force_sig_info_fault(int si_signo, int si_code, unsigned long address, struct task_struct *tsk, struct vm_area_struct *vma, @@ -187,6 +239,8 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, lsb = PAGE_SHIFT; info.si_addr_lsb = lsb; + fill_sig_info_pkey(si_code, &info, vma); + force_sig_info(si_signo, &info, tsk); } @@ -847,7 +901,15 @@ static noinline void bad_area_access_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct vm_area_struct *vma) { - __bad_area(regs, error_code, address, vma, SEGV_ACCERR); + /* + * This OSPKE check is not strictly necessary at runtime. + * But, doing it this way allows compiler optimizations + * if pkeys are compiled out. + */ + if (boot_cpu_has(X86_FEATURE_OSPKE) && (error_code & PF_PK)) + __bad_area(regs, error_code, address, vma, SEGV_PKUERR); + else + __bad_area(regs, error_code, address, vma, SEGV_ACCERR); } static void -- cgit v1.1 From 1874f6895c92d991ccf85edcc55a0d9dd552d71c Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:18 -0800 Subject: x86/mm/gup: Simplify get_user_pages() PTE bit handling The current get_user_pages() code is a wee bit more complicated than it needs to be for pte bit checking. Currently, it establishes a mask of required pte _PAGE_* bits and ensures that the pte it goes after has all those bits. This consolidates the three identical copies of this code. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210218.3A2D4045@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/gup.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index ce5e454..2f0a329 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -75,6 +75,24 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) } /* + * 'pteval' can come from a pte, pmd or pud. We only check + * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the + * same value on all 3 types. + */ +static inline int pte_allows_gup(unsigned long pteval, int write) +{ + unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER; + + if (write) + need_pte_bits |= _PAGE_RW; + + if ((pteval & need_pte_bits) != need_pte_bits) + return 0; + + return 1; +} + +/* * The performance critical leaf functions are made noinline otherwise gcc * inlines everything into a single function which results in too much * register pressure. @@ -83,14 +101,9 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; - unsigned long mask; int nr_start = *nr; pte_t *ptep; - mask = _PAGE_PRESENT|_PAGE_USER; - if (write) - mask |= _PAGE_RW; - ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); @@ -110,7 +123,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, pte_unmap(ptep); return 0; } - } else if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { + } else if (!pte_allows_gup(pte_val(pte), write) || + pte_special(pte)) { pte_unmap(ptep); return 0; } @@ -164,14 +178,10 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { - unsigned long mask; struct page *head, *page; int refs; - mask = _PAGE_PRESENT|_PAGE_USER; - if (write) - mask |= _PAGE_RW; - if ((pmd_flags(pmd) & mask) != mask) + if (!pte_allows_gup(pmd_val(pmd), write)) return 0; VM_BUG_ON(!pfn_valid(pmd_pfn(pmd))); @@ -231,14 +241,10 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, static noinline int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { - unsigned long mask; struct page *head, *page; int refs; - mask = _PAGE_PRESENT|_PAGE_USER; - if (write) - mask |= _PAGE_RW; - if ((pud_flags(pud) & mask) != mask) + if (!pte_allows_gup(pud_val(pud), write)) return 0; /* hugepages are never "special" */ VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL); -- cgit v1.1 From 33a709b25a760b91184bb335cf7d7c32b8123013 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:19 -0800 Subject: mm/gup, x86/mm/pkeys: Check VMAs and PTEs for protection keys Today, for normal faults and page table walks, we check the VMA and/or PTE to ensure that it is compatible with the action. For instance, if we get a write fault on a non-writeable VMA, we SIGSEGV. We try to do the same thing for protection keys. Basically, we try to make sure that if a user does this: mprotect(ptr, size, PROT_NONE); *ptr = foo; they see the same effects with protection keys when they do this: mprotect(ptr, size, PROT_READ|PROT_WRITE); set_pkey(ptr, size, 4); wrpkru(0xffffff3f); // access disable pkey 4 *ptr = foo; The state to do that checking is in the VMA, but we also sometimes have to do it on the page tables only, like when doing a get_user_pages_fast() where we have no VMA. We add two functions and expose them to generic code: arch_pte_access_permitted(pte_flags, write) arch_vma_access_permitted(vma, write) These are, of course, backed up in x86 arch code with checks against the PTE or VMA's protection key. But, there are also cases where we do not want to respect protection keys. When we ptrace(), for instance, we do not want to apply the tracer's PKRU permissions to the PTEs from the process being traced. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Alexey Kardashevskiy Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Boaz Harrosh Cc: Borislav Petkov Cc: Brian Gerst Cc: Dan Williams Cc: Dave Hansen Cc: David Gibson Cc: David Hildenbrand Cc: David Vrabel Cc: Denys Vlasenko Cc: Dominik Dingel Cc: Dominik Vogt Cc: Guan Xuetao Cc: H. Peter Anvin Cc: Heiko Carstens Cc: Hugh Dickins Cc: Jason Low Cc: Jerome Marchand Cc: Juergen Gross Cc: Kirill A. Shutemov Cc: Laurent Dufour Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michael Ellerman Cc: Michal Hocko Cc: Mikulas Patocka Cc: Minchan Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Sasha Levin Cc: Shachar Raindel Cc: Stephen Smalley Cc: Toshi Kani Cc: Vlastimil Babka Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/20160212210219.14D5D715@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 21 ++++++++++++++++++++- arch/x86/mm/gup.c | 5 +++++ 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6e71dcf..319331a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -897,6 +897,16 @@ bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); } +static inline bool bad_area_access_from_pkeys(unsigned long error_code, + struct vm_area_struct *vma) +{ + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return false; + if (error_code & PF_PK) + return true; + return false; +} + static noinline void bad_area_access_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct vm_area_struct *vma) @@ -906,7 +916,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, * But, doing it this way allows compiler optimizations * if pkeys are compiled out. */ - if (boot_cpu_has(X86_FEATURE_OSPKE) && (error_code & PF_PK)) + if (bad_area_access_from_pkeys(error_code, vma)) __bad_area(regs, error_code, address, vma, SEGV_PKUERR); else __bad_area(regs, error_code, address, vma, SEGV_ACCERR); @@ -1081,6 +1091,15 @@ int show_unhandled_signals = 1; static inline int access_error(unsigned long error_code, struct vm_area_struct *vma) { + /* + * Access or read was blocked by protection keys. We do + * this check before any others because we do not want + * to, for instance, confuse a protection-key-denied + * write with one for which we should do a COW. + */ + if (error_code & PF_PK) + return 1; + if (error_code & PF_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 2f0a329..bab259e 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -11,6 +11,7 @@ #include #include +#include #include static inline pte_t gup_get_pte(pte_t *ptep) @@ -89,6 +90,10 @@ static inline int pte_allows_gup(unsigned long pteval, int write) if ((pteval & need_pte_bits) != need_pte_bits) return 0; + /* Check memory protection keys permissions. */ + if (!__pkru_allows_pkey(pte_flags_pkey(pteval), write)) + return 0; + return 1; } -- cgit v1.1 From 07f146f53e8de826e4afa3a88ea65bdb13c24959 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:22 -0800 Subject: x86/mm/pkeys: Optimize fault handling in access_error() We might not strictly have to make modifictions to access_error() to check the VMA here. If we do not, we will do this: 1. app sets VMA pkey to K 2. app touches a !present page 3. do_page_fault(), allocates and maps page, sets pte.pkey=K 4. return to userspace 5. touch instruction reexecutes, but triggers PF_PK 6. do PKEY signal What happens with this patch applied: 1. app sets VMA pkey to K 2. app touches a !present page 3. do_page_fault() notices that K is inaccessible 4. do PKEY signal We basically skip the fault that does an allocation. So what this lets us do is protect areas from even being *populated* unless it is accessible according to protection keys. That seems handy to me and makes protection keys work more like an mprotect()'d mapping. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210222.EBB63D8C@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 319331a..68ecdff 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -900,10 +900,16 @@ bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) static inline bool bad_area_access_from_pkeys(unsigned long error_code, struct vm_area_struct *vma) { + /* This code is always called on the current mm */ + bool foreign = false; + if (!boot_cpu_has(X86_FEATURE_OSPKE)) return false; if (error_code & PF_PK) return true; + /* this checks permission keys on the VMA: */ + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + return true; return false; } @@ -1091,6 +1097,8 @@ int show_unhandled_signals = 1; static inline int access_error(unsigned long error_code, struct vm_area_struct *vma) { + /* This is only called for the current mm, so: */ + bool foreign = false; /* * Access or read was blocked by protection keys. We do * this check before any others because we do not want @@ -1099,6 +1107,13 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) */ if (error_code & PF_PK) return 1; + /* + * Make sure to check the VMA so that we do not perform + * faults just to hit a PF_PK as soon as we fill in a + * page. + */ + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + return 1; if (error_code & PF_WRITE) { /* write, present and write, not present: */ -- cgit v1.1 From d61172b4b695b821388cdb6088a41d431bcbb93b Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:24 -0800 Subject: mm/core, x86/mm/pkeys: Differentiate instruction fetches As discussed earlier, we attempt to enforce protection keys in software. However, the code checks all faults to ensure that they are not violating protection key permissions. It was assumed that all faults are either write faults where we check PKRU[key].WD (write disable) or read faults where we check the AD (access disable) bit. But, there is a third category of faults for protection keys: instruction faults. Instruction faults never run afoul of protection keys because they do not affect instruction fetches. So, plumb the PF_INSTR bit down in to the arch_vma_access_permitted() function where we do the protection key checks. We also add a new FAULT_FLAG_INSTRUCTION. This is because handle_mm_fault() is not passed the architecture-specific error_code where we keep PF_INSTR, so we need to encode the instruction fetch information in to the arch-generic fault flags. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210224.96928009@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 68ecdff..d81744e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -908,7 +908,8 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, if (error_code & PF_PK) return true; /* this checks permission keys on the VMA: */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), + (error_code & PF_INSTR), foreign)) return true; return false; } @@ -1112,7 +1113,8 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) * faults just to hit a PF_PK as soon as we fill in a * page. */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), + (error_code & PF_INSTR), foreign)) return 1; if (error_code & PF_WRITE) { @@ -1267,6 +1269,8 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (error_code & PF_WRITE) flags |= FAULT_FLAG_WRITE; + if (error_code & PF_INSTR) + flags |= FAULT_FLAG_INSTRUCTION; /* * When running in the kernel we expect faults to occur only to -- cgit v1.1 From 62b5f7d013fc455b8db26cf01e421f4c0d264b92 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:40 -0800 Subject: mm/core, x86/mm/pkeys: Add execute-only protection keys support Protection keys provide new page-based protection in hardware. But, they have an interesting attribute: they only affect data accesses and never affect instruction fetches. That means that if we set up some memory which is set as "access-disabled" via protection keys, we can still execute from it. This patch uses protection keys to set up mappings to do just that. If a user calls: mmap(..., PROT_EXEC); or mprotect(ptr, sz, PROT_EXEC); (note PROT_EXEC-only without PROT_READ/WRITE), the kernel will notice this, and set a special protection key on the memory. It also sets the appropriate bits in the Protection Keys User Rights (PKRU) register so that the memory becomes unreadable and unwritable. I haven't found any userspace that does this today. With this facility in place, we expect userspace to move to use it eventually. Userspace _could_ start doing this today. Any PROT_EXEC calls get converted to PROT_READ inside the kernel, and would transparently be upgraded to "true" PROT_EXEC with this code. IOW, userspace never has to do any PROT_EXEC runtime detection. This feature provides enhanced protection against leaking executable memory contents. This helps thwart attacks which are attempting to find ROP gadgets on the fly. But, the security provided by this approach is not comprehensive. The PKRU register which controls access permissions is a normal user register writable from unprivileged userspace. An attacker who can execute the 'wrpkru' instruction can easily disable the protection provided by this feature. The protection key that is used for execute-only support is permanently dedicated at compile time. This is fine for now because there is currently no API to set a protection key other than this one. Despite there being a constant PKRU value across the entire system, we do not set it unless this feature is in use in a process. That is to preserve the PKRU XSAVE 'init state', which can lead to faster context switches. PKRU *is* a user register and the kernel is modifying it. That means that code doing: pkru = rdpkru() pkru |= 0x100; mmap(..., PROT_EXEC); wrpkru(pkru); could lose the bits in PKRU that enforce execute-only permissions. To avoid this, we suggest avoiding ever calling mmap() or mprotect() when the PKRU value is expected to be unstable. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrea Arcangeli Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Chen Gang Cc: Dan Williams Cc: Dave Chinner Cc: Dave Hansen Cc: David Hildenbrand Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Linus Torvalds Cc: Mel Gorman Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Piotr Kwapulinski Cc: Rik van Riel Cc: Stephen Smalley Cc: Vladimir Murzin Cc: Will Deacon Cc: keescook@google.com Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210240.CB4BB5CA@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/Makefile | 2 + arch/x86/mm/fault.c | 10 +++++ arch/x86/mm/pkeys.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+) create mode 100644 arch/x86/mm/pkeys.c (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index f9d38a4..67cf2e1 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -34,3 +34,5 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_X86_INTEL_MPX) += mpx.o +obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o + diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index d81744e..5877b92 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1108,6 +1108,16 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) */ if (error_code & PF_PK) return 1; + + if (!(error_code & PF_INSTR)) { + /* + * Assume all accesses require either read or execute + * permissions. This is not an instruction access, so + * it requires read permissions. + */ + if (!(vma->vm_flags & VM_READ)) + return 1; + } /* * Make sure to check the VMA so that we do not perform * faults just to hit a PF_PK as soon as we fill in a diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c new file mode 100644 index 0000000..e8c4744 --- /dev/null +++ b/arch/x86/mm/pkeys.c @@ -0,0 +1,101 @@ +/* + * Intel Memory Protection Keys management + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include /* mm_struct, vma, etc... */ +#include /* PKEY_* */ +#include + +#include /* boot_cpu_has, ... */ +#include /* vma_pkey() */ +#include /* fpregs_active() */ + +int __execute_only_pkey(struct mm_struct *mm) +{ + int ret; + + /* + * We do not want to go through the relatively costly + * dance to set PKRU if we do not need to. Check it + * first and assume that if the execute-only pkey is + * write-disabled that we do not have to set it + * ourselves. We need preempt off so that nobody + * can make fpregs inactive. + */ + preempt_disable(); + if (fpregs_active() && + !__pkru_allows_read(read_pkru(), PKEY_DEDICATED_EXECUTE_ONLY)) { + preempt_enable(); + return PKEY_DEDICATED_EXECUTE_ONLY; + } + preempt_enable(); + ret = arch_set_user_pkey_access(current, PKEY_DEDICATED_EXECUTE_ONLY, + PKEY_DISABLE_ACCESS); + /* + * If the PKRU-set operation failed somehow, just return + * 0 and effectively disable execute-only support. + */ + if (ret) + return 0; + + return PKEY_DEDICATED_EXECUTE_ONLY; +} + +static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) +{ + /* Do this check first since the vm_flags should be hot */ + if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) + return false; + if (vma_pkey(vma) != PKEY_DEDICATED_EXECUTE_ONLY) + return false; + + return true; +} + +/* + * This is only called for *plain* mprotect calls. + */ +int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey) +{ + /* + * Is this an mprotect_pkey() call? If so, never + * override the value that came from the user. + */ + if (pkey != -1) + return pkey; + /* + * Look for a protection-key-drive execute-only mapping + * which is now being given permissions that are not + * execute-only. Move it back to the default pkey. + */ + if (vma_is_pkey_exec_only(vma) && + (prot & (PROT_READ|PROT_WRITE))) { + return 0; + } + /* + * The mapping is execute-only. Go try to get the + * execute-only protection key. If we fail to do that, + * fall through as if we do not have execute-only + * support. + */ + if (prot == PROT_EXEC) { + pkey = execute_only_pkey(vma->vm_mm); + if (pkey > 0) + return pkey; + } + /* + * This is a vanilla, non-pkey mprotect (or we failed to + * setup execute-only), inherit the pkey from the VMA we + * are working on. + */ + return vma_pkey(vma); +} -- cgit v1.1 From e21555436f196c241503c7c6240272e57783235c Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 1 Mar 2016 11:41:33 -0800 Subject: x86/mm/pkeys: Fix access_error() denial of writes to write-only VMA Andrey Wagin reported that a simple test case was broken by: 2b5f7d013fc ("mm/core, x86/mm/pkeys: Add execute-only protection keys support") This test case creates an unreadable VMA and my patch assumed that all writes must be to readable VMAs. The simplest fix for this is to remove the pkey-related bits in access_error(). For execute-only support, I believe the existing version is sufficient because the permissions we are trying to enforce are entirely expressed in vma->vm_flags. We just depend on pkeys to get *an* exception, it does not matter that PF_PK was set, or even what state PKRU is in. I will re-add the necessary bits with the full pkeys implementation that includes the new syscalls. The three cases that matter are: 1. If a write to an execute-only VMA occurs, we will see PF_WRITE set, but !VM_WRITE on the VMA, and return 1. All execute-only VMAs have VM_WRITE clear by definition. 2. If a read occurs on a present PTE, we will fall in to the "read, present" case and return 1. 3. If a read occurs to a non-present PTE, we will miss the "read, not present" case, because the execute-only VMA will have VM_EXEC set, and we will properly return 0 allowing the PTE to be populated. Test program: int main() { int *p; p = mmap(NULL, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); p[0] = 1; return 0; } Reported-by: Andrey Wagin , Signed-off-by: Dave Hansen Acked-by: Kirill A. Shutemov Cc: Dave Hansen Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Cc: linux-next@vger.kernel.org Fixes: 62b5f7d013fc ("mm/core, x86/mm/pkeys: Add execute-only protection keys support") Link: http://lkml.kernel.org/r/20160301194133.65D0110C@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 5877b92..6138db4 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1101,24 +1101,6 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) /* This is only called for the current mm, so: */ bool foreign = false; /* - * Access or read was blocked by protection keys. We do - * this check before any others because we do not want - * to, for instance, confuse a protection-key-denied - * write with one for which we should do a COW. - */ - if (error_code & PF_PK) - return 1; - - if (!(error_code & PF_INSTR)) { - /* - * Assume all accesses require either read or execute - * permissions. This is not an instruction access, so - * it requires read permissions. - */ - if (!(vma->vm_flags & VM_READ)) - return 1; - } - /* * Make sure to check the VMA so that we do not perform * faults just to hit a PF_PK as soon as we fill in a * page. -- cgit v1.1