summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c1078
-rw-r--r--arch/x86/mm/iomap_32.c58
-rw-r--r--arch/x86/mm/memtest.c156
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/pageattr.c22
-rw-r--r--arch/x86/mm/pat.c46
7 files changed, 781 insertions, 583 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2964417..a03b727 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1,74 +1,79 @@
/*
* Copyright (C) 1995 Linus Torvalds
- * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
+ * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
+ * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
*/
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mmiotrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
+#include <linux/mmiotrace.h>
+#include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/highmem.h>
-#include <linux/bootmem.h> /* for max_low_pfn */
-#include <linux/vmalloc.h>
-#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/vt_kern.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/module.h>
#include <linux/kdebug.h>
+#include <linux/errno.h>
#include <linux/magic.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mman.h>
+#include <linux/tty.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm-generic/sections.h>
-#include <asm/system.h>
-#include <asm/desc.h>
-#include <asm/segment.h>
-#include <asm/pgalloc.h>
-#include <asm/smp.h>
#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm/segment.h>
+#include <asm/system.h>
#include <asm/proto.h>
-#include <asm-generic/sections.h>
#include <asm/traps.h>
+#include <asm/desc.h>
/*
- * Page fault error code bits
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- * bit 3 == 1 means use of reserved bit detected
- * bit 4 == 1 means fault was an instruction fetch
+ * Page fault error code bits:
+ *
+ * bit 0 == 0: no page found 1: protection fault
+ * bit 1 == 0: read access 1: write access
+ * bit 2 == 0: kernel-mode access 1: user-mode access
+ * bit 3 == 1: use of reserved bit detected
+ * bit 4 == 1: fault was an instruction fetch
*/
-#define PF_PROT (1<<0)
-#define PF_WRITE (1<<1)
-#define PF_USER (1<<2)
-#define PF_RSVD (1<<3)
-#define PF_INSTR (1<<4)
+enum x86_pf_error_code {
+ PF_PROT = 1 << 0,
+ PF_WRITE = 1 << 1,
+ PF_USER = 1 << 2,
+ PF_RSVD = 1 << 3,
+ PF_INSTR = 1 << 4,
+};
+
+/*
+ * Returns 0 if mmiotrace is disabled, or if the fault is not
+ * handled by mmiotrace:
+ */
static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
{
-#ifdef CONFIG_MMIOTRACE
if (unlikely(is_kmmio_active()))
if (kmmio_handler(regs, addr) == 1)
return -1;
-#endif
return 0;
}
static inline int notify_page_fault(struct pt_regs *regs)
{
-#ifdef CONFIG_KPROBES
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
- if (!user_mode_vm(regs)) {
+ if (kprobes_built_in() && !user_mode_vm(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
@@ -76,29 +81,76 @@ static inline int notify_page_fault(struct pt_regs *regs)
}
return ret;
-#else
- return 0;
-#endif
}
/*
- * X86_32
- * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
- * Check that here and ignore it.
+ * Prefetch quirks:
*
- * X86_64
- * Sometimes the CPU reports invalid exceptions on prefetch.
- * Check that here and ignore it.
+ * 32-bit mode:
*
- * Opcode checker based on code by Richard Brunner
+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+ * Check that here and ignore it.
+ *
+ * 64-bit mode:
+ *
+ * Sometimes the CPU reports invalid exceptions on prefetch.
+ * Check that here and ignore it.
+ *
+ * Opcode checker based on code by Richard Brunner.
*/
-static int is_prefetch(struct pt_regs *regs, unsigned long error_code,
- unsigned long addr)
+static inline int
+check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+ unsigned char opcode, int *prefetch)
{
+ unsigned char instr_hi = opcode & 0xf0;
+ unsigned char instr_lo = opcode & 0x0f;
+
+ switch (instr_hi) {
+ case 0x20:
+ case 0x30:
+ /*
+ * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
+ * In X86_64 long mode, the CPU will signal invalid
+ * opcode if some of these prefixes are present so
+ * X86_64 will never get here anyway
+ */
+ return ((instr_lo & 7) == 0x6);
+#ifdef CONFIG_X86_64
+ case 0x40:
+ /*
+ * In AMD64 long mode 0x40..0x4F are valid REX prefixes
+ * Need to figure out under what instruction mode the
+ * instruction was issued. Could check the LDT for lm,
+ * but for now it's good enough to assume that long
+ * mode only uses well known segments or kernel.
+ */
+ return (!user_mode(regs)) || (regs->cs == __USER_CS);
+#endif
+ case 0x60:
+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
+ return (instr_lo & 0xC) == 0x4;
+ case 0xF0:
+ /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
+ return !instr_lo || (instr_lo>>1) == 1;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+ if (probe_kernel_address(instr, opcode))
+ return 0;
+
+ *prefetch = (instr_lo == 0xF) &&
+ (opcode == 0x0D || opcode == 0x18);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+{
+ unsigned char *max_instr;
unsigned char *instr;
- int scan_more = 1;
int prefetch = 0;
- unsigned char *max_instr;
/*
* If it was a exec (instruction fetch) fault on NX page, then
@@ -107,106 +159,170 @@ static int is_prefetch(struct pt_regs *regs, unsigned long error_code,
if (error_code & PF_INSTR)
return 0;
- instr = (unsigned char *)convert_ip_to_linear(current, regs);
+ instr = (void *)convert_ip_to_linear(current, regs);
max_instr = instr + 15;
if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
return 0;
- while (scan_more && instr < max_instr) {
+ while (instr < max_instr) {
unsigned char opcode;
- unsigned char instr_hi;
- unsigned char instr_lo;
if (probe_kernel_address(instr, opcode))
break;
- instr_hi = opcode & 0xf0;
- instr_lo = opcode & 0x0f;
instr++;
- switch (instr_hi) {
- case 0x20:
- case 0x30:
- /*
- * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
- * In X86_64 long mode, the CPU will signal invalid
- * opcode if some of these prefixes are present so
- * X86_64 will never get here anyway
- */
- scan_more = ((instr_lo & 7) == 0x6);
- break;
-#ifdef CONFIG_X86_64
- case 0x40:
- /*
- * In AMD64 long mode 0x40..0x4F are valid REX prefixes
- * Need to figure out under what instruction mode the
- * instruction was issued. Could check the LDT for lm,
- * but for now it's good enough to assume that long
- * mode only uses well known segments or kernel.
- */
- scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
- break;
-#endif
- case 0x60:
- /* 0x64 thru 0x67 are valid prefixes in all modes. */
- scan_more = (instr_lo & 0xC) == 0x4;
- break;
- case 0xF0:
- /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
- scan_more = !instr_lo || (instr_lo>>1) == 1;
- break;
- case 0x00:
- /* Prefetch instruction is 0x0F0D or 0x0F18 */
- scan_more = 0;
-
- if (probe_kernel_address(instr, opcode))
- break;
- prefetch = (instr_lo == 0xF) &&
- (opcode == 0x0D || opcode == 0x18);
+ if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
break;
- default:
- scan_more = 0;
- break;
- }
}
return prefetch;
}
-static void force_sig_info_fault(int si_signo, int si_code,
- unsigned long address, struct task_struct *tsk)
+static void
+force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+ struct task_struct *tsk)
{
siginfo_t info;
- info.si_signo = si_signo;
- info.si_errno = 0;
- info.si_code = si_code;
- info.si_addr = (void __user *)address;
+ info.si_signo = si_signo;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void __user *)address;
+
force_sig_info(si_signo, &info, tsk);
}
-#ifdef CONFIG_X86_64
-static int bad_address(void *p)
+DEFINE_SPINLOCK(pgd_lock);
+LIST_HEAD(pgd_list);
+
+#ifdef CONFIG_X86_32
+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
- unsigned long dummy;
- return probe_kernel_address((unsigned long *)p, dummy);
+ unsigned index = pgd_index(address);
+ pgd_t *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd += index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ return NULL;
+
+ /*
+ * set_pgd(pgd, *pgd_k); here would be useless on PAE
+ * and redundant with the set_pmd() on non-PAE. As would
+ * set_pud.
+ */
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ return NULL;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ return NULL;
+
+ if (!pmd_present(*pmd)) {
+ set_pmd(pmd, *pmd_k);
+ arch_flush_lazy_mmu_mode();
+ } else {
+ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+ }
+
+ return pmd_k;
+}
+
+void vmalloc_sync_all(void)
+{
+ unsigned long address;
+
+ if (SHARED_KERNEL_PMD)
+ return;
+
+ for (address = VMALLOC_START & PMD_MASK;
+ address >= TASK_SIZE && address < FIXADDR_TOP;
+ address += PMD_SIZE) {
+
+ unsigned long flags;
+ struct page *page;
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ list_for_each_entry(page, &pgd_list, lru) {
+ if (!vmalloc_sync_one(page_address(page), address))
+ break;
+ }
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ }
+}
+
+/*
+ * 32-bit:
+ *
+ * Handle a fault on the vmalloc or module mapping area
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+ unsigned long pgd_paddr;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ /* Make sure we are in vmalloc area: */
+ if (!(address >= VMALLOC_START && address < VMALLOC_END))
+ return -1;
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "current" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3();
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Did it hit the DOS screen memory VA from vm86 mode?
+ */
+static inline void
+check_v8086_mode(struct pt_regs *regs, unsigned long address,
+ struct task_struct *tsk)
+{
+ unsigned long bit;
+
+ if (!v8086_mode(regs))
+ return;
+
+ bit = (address - 0xA0000) >> PAGE_SHIFT;
+ if (bit < 32)
+ tsk->thread.screen_bitmap |= 1 << bit;
}
-#endif
static void dump_pagetable(unsigned long address)
{
-#ifdef CONFIG_X86_32
__typeof__(pte_val(__pte(0))) page;
page = read_cr3();
page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
+
#ifdef CONFIG_X86_PAE
printk("*pdpt = %016Lx ", page);
if ((page >> PAGE_SHIFT) < max_low_pfn
&& page & _PAGE_PRESENT) {
page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
- & (PTRS_PER_PMD - 1)];
+ & (PTRS_PER_PMD - 1)];
printk(KERN_CONT "*pde = %016Lx ", page);
page &= ~_PAGE_NX;
}
@@ -218,19 +334,145 @@ static void dump_pagetable(unsigned long address)
* We must not directly access the pte in the highpte
* case if the page table is located in highmem.
* And let's rather not kmap-atomic the pte, just in case
- * it's allocated already.
+ * it's allocated already:
*/
if ((page >> PAGE_SHIFT) < max_low_pfn
&& (page & _PAGE_PRESENT)
&& !(page & _PAGE_PSE)) {
+
page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
- & (PTRS_PER_PTE - 1)];
+ & (PTRS_PER_PTE - 1)];
printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
}
printk("\n");
-#else /* CONFIG_X86_64 */
+}
+
+#else /* CONFIG_X86_64: */
+
+void vmalloc_sync_all(void)
+{
+ unsigned long address;
+
+ for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
+ address += PGDIR_SIZE) {
+
+ const pgd_t *pgd_ref = pgd_offset_k(address);
+ unsigned long flags;
+ struct page *page;
+
+ if (pgd_none(*pgd_ref))
+ continue;
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ else
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ }
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ }
+}
+
+/*
+ * 64-bit:
+ *
+ * Handle a fault on the vmalloc area
+ *
+ * This assumes no large pages in there.
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+ pgd_t *pgd, *pgd_ref;
+ pud_t *pud, *pud_ref;
+ pmd_t *pmd, *pmd_ref;
+ pte_t *pte, *pte_ref;
+
+ /* Make sure we are in vmalloc area: */
+ if (!(address >= VMALLOC_START && address < VMALLOC_END))
+ return -1;
+
+ /*
+ * Copy kernel mappings over when needed. This can also
+ * happen within a race in page table update. In the later
+ * case just flush:
+ */
+ pgd = pgd_offset(current->active_mm, address);
+ pgd_ref = pgd_offset_k(address);
+ if (pgd_none(*pgd_ref))
+ return -1;
+
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ else
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+
+ /*
+ * Below here mismatches are bugs because these lower tables
+ * are shared:
+ */
+
+ pud = pud_offset(pgd, address);
+ pud_ref = pud_offset(pgd_ref, address);
+ if (pud_none(*pud_ref))
+ return -1;
+
+ if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+ BUG();
+
+ pmd = pmd_offset(pud, address);
+ pmd_ref = pmd_offset(pud_ref, address);
+ if (pmd_none(*pmd_ref))
+ return -1;
+
+ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+ BUG();
+
+ pte_ref = pte_offset_kernel(pmd_ref, address);
+ if (!pte_present(*pte_ref))
+ return -1;
+
+ pte = pte_offset_kernel(pmd, address);
+
+ /*
+ * Don't use pte_page here, because the mappings can point
+ * outside mem_map, and the NUMA hash lookup cannot handle
+ * that:
+ */
+ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
+ BUG();
+
+ return 0;
+}
+
+static const char errata93_warning[] =
+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
+KERN_ERR "******* Please consider a BIOS update.\n"
+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
+
+/*
+ * No vm86 mode in 64-bit mode:
+ */
+static inline void
+check_v8086_mode(struct pt_regs *regs, unsigned long address,
+ struct task_struct *tsk)
+{
+}
+
+static int bad_address(void *p)
+{
+ unsigned long dummy;
+
+ return probe_kernel_address((unsigned long *)p, dummy);
+}
+
+static void dump_pagetable(unsigned long address)
+{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
@@ -239,102 +481,77 @@ static void dump_pagetable(unsigned long address)
pgd = (pgd_t *)read_cr3();
pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
+
pgd += pgd_index(address);
- if (bad_address(pgd)) goto bad;
+ if (bad_address(pgd))
+ goto bad;
+
printk("PGD %lx ", pgd_val(*pgd));
- if (!pgd_present(*pgd)) goto ret;
+
+ if (!pgd_present(*pgd))
+ goto out;
pud = pud_offset(pgd, address);
- if (bad_address(pud)) goto bad;
+ if (bad_address(pud))
+ goto bad;
+
printk("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud) || pud_large(*pud))
- goto ret;
+ goto out;
pmd = pmd_offset(pud, address);
- if (bad_address(pmd)) goto bad;
+ if (bad_address(pmd))
+ goto bad;
+
printk("PMD %lx ", pmd_val(*pmd));
- if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
+ if (!pmd_present(*pmd) || pmd_large(*pmd))
+ goto out;
pte = pte_offset_kernel(pmd, address);
- if (bad_address(pte)) goto bad;
+ if (bad_address(pte))
+ goto bad;
+
printk("PTE %lx", pte_val(*pte));
-ret:
+out:
printk("\n");
return;
bad:
printk("BAD\n");
-#endif
}
-#ifdef CONFIG_X86_32
-static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
-{
- unsigned index = pgd_index(address);
- pgd_t *pgd_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
-
- pgd += index;
- pgd_k = init_mm.pgd + index;
-
- if (!pgd_present(*pgd_k))
- return NULL;
+#endif /* CONFIG_X86_64 */
- /*
- * set_pgd(pgd, *pgd_k); here would be useless on PAE
- * and redundant with the set_pmd() on non-PAE. As would
- * set_pud.
- */
-
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
- if (!pud_present(*pud_k))
- return NULL;
-
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- return NULL;
- if (!pmd_present(*pmd)) {
- set_pmd(pmd, *pmd_k);
- arch_flush_lazy_mmu_mode();
- } else
- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
- return pmd_k;
-}
-#endif
-
-#ifdef CONFIG_X86_64
-static const char errata93_warning[] =
-KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
-KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
-KERN_ERR "******* Please consider a BIOS update.\n"
-KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
-#endif
-
-/* Workaround for K8 erratum #93 & buggy BIOS.
- BIOS SMM functions are required to use a specific workaround
- to avoid corruption of the 64bit RIP register on C stepping K8.
- A lot of BIOS that didn't get tested properly miss this.
- The OS sees this as a page fault with the upper 32bits of RIP cleared.
- Try to work around it here.
- Note we only handle faults in kernel here.
- Does nothing for X86_32
+/*
+ * Workaround for K8 erratum #93 & buggy BIOS.
+ *
+ * BIOS SMM functions are required to use a specific workaround
+ * to avoid corruption of the 64bit RIP register on C stepping K8.
+ *
+ * A lot of BIOS that didn't get tested properly miss this.
+ *
+ * The OS sees this as a page fault with the upper 32bits of RIP cleared.
+ * Try to work around it here.
+ *
+ * Note we only handle faults in kernel here.
+ * Does nothing on 32-bit.
*/
static int is_errata93(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
- static int warned;
+ static int once;
+
if (address != regs->ip)
return 0;
+
if ((address >> 32) != 0)
return 0;
+
address |= 0xffffffffUL << 32;
if ((address >= (u64)_stext && address <= (u64)_etext) ||
(address >= MODULES_VADDR && address <= MODULES_END)) {
- if (!warned) {
+ if (!once) {
printk(errata93_warning);
- warned = 1;
+ once = 1;
}
regs->ip = address;
return 1;
@@ -344,16 +561,17 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
}
/*
- * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
- * addresses >4GB. We catch this in the page fault handler because these
- * addresses are not reachable. Just detect this case and return. Any code
+ * Work around K8 erratum #100 K8 in compat mode occasionally jumps
+ * to illegal addresses >4GB.
+ *
+ * We catch this in the page fault handler because these addresses
+ * are not reachable. Just detect this case and return. Any code
* segment in LDT is compatibility mode.
*/
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
- (address >> 32))
+ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
return 1;
#endif
return 0;
@@ -363,8 +581,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
unsigned long nr;
+
/*
- * Pentium F0 0F C7 C8 bug workaround.
+ * Pentium F0 0F C7 C8 bug workaround:
*/
if (boot_cpu_data.f00f_bug) {
nr = (address - idt_descr.address) >> 3;
@@ -378,80 +597,87 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
return 0;
}
-static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
+static const char nx_warning[] = KERN_CRIT
+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
+
+static void
+show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
-#ifdef CONFIG_X86_32
if (!oops_may_print())
return;
-#endif
-#ifdef CONFIG_X86_PAE
if (error_code & PF_INSTR) {
unsigned int level;
+
pte_t *pte = lookup_address(address, &level);
if (pte && pte_present(*pte) && !pte_exec(*pte))
- printk(KERN_CRIT "kernel tried to execute "
- "NX-protected page - exploit attempt? "
- "(uid: %d)\n", current_uid());
+ printk(nx_warning, current_uid());
}
-#endif
printk(KERN_ALERT "BUG: unable to handle kernel ");
if (address < PAGE_SIZE)
printk(KERN_CONT "NULL pointer dereference");
else
printk(KERN_CONT "paging request");
+
printk(KERN_CONT " at %p\n", (void *) address);
printk(KERN_ALERT "IP:");
printk_address(regs->ip, 1);
+
dump_pagetable(address);
}
-#ifdef CONFIG_X86_64
-static noinline void pgtable_bad(struct pt_regs *regs,
- unsigned long error_code, unsigned long address)
+static noinline void
+pgtable_bad(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
- unsigned long flags = oops_begin();
- int sig = SIGKILL;
- struct task_struct *tsk = current;
+ struct task_struct *tsk;
+ unsigned long flags;
+ int sig;
+
+ flags = oops_begin();
+ tsk = current;
+ sig = SIGKILL;
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
tsk->comm, address);
dump_pagetable(address);
- tsk->thread.cr2 = address;
- tsk->thread.trap_no = 14;
- tsk->thread.error_code = error_code;
+
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
+
if (__die("Bad pagetable", regs, error_code))
sig = 0;
+
oops_end(flags, regs, sig);
}
-#endif
-static noinline void no_context(struct pt_regs *regs,
- unsigned long error_code, unsigned long address)
+static noinline void
+no_context(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
struct task_struct *tsk = current;
unsigned long *stackend;
-
-#ifdef CONFIG_X86_64
unsigned long flags;
int sig;
-#endif
- /* Are we prepared to handle this kernel fault? */
+ /* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
- * X86_32
- * Valid to do another page fault here, because if this fault
- * had been triggered by is_prefetch fixup_exception would have
- * handled it.
+ * 32-bit:
+ *
+ * Valid to do another page fault here, because if this fault
+ * had been triggered by is_prefetch fixup_exception would have
+ * handled it.
+ *
+ * 64-bit:
*
- * X86_64
- * Hall of shame of CPU/BIOS bugs.
+ * Hall of shame of CPU/BIOS bugs.
*/
if (is_prefetch(regs, error_code, address))
return;
@@ -461,54 +687,70 @@ static noinline void no_context(struct pt_regs *regs,
/*
* Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
+ * terminate things with extreme prejudice:
*/
-#ifdef CONFIG_X86_32
- bust_spinlocks(1);
-#else
flags = oops_begin();
-#endif
show_fault_oops(regs, error_code, address);
- stackend = end_of_stack(tsk);
+ stackend = end_of_stack(tsk);
if (*stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
- tsk->thread.cr2 = address;
- tsk->thread.trap_no = 14;
- tsk->thread.error_code = error_code;
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
-#ifdef CONFIG_X86_32
- die("Oops", regs, error_code);
- bust_spinlocks(0);
- do_exit(SIGKILL);
-#else
sig = SIGKILL;
if (__die("Oops", regs, error_code))
sig = 0;
+
/* Executive summary in case the body of the oops scrolled away */
printk(KERN_EMERG "CR2: %016lx\n", address);
+
oops_end(flags, regs, sig);
-#endif
}
-static void __bad_area_nosemaphore(struct pt_regs *regs,
- unsigned long error_code, unsigned long address,
- int si_code)
+/*
+ * Print out info about fatal segfaults, if the show_unhandled_signals
+ * sysctl is set:
+ */
+static inline void
+show_signal_msg(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, struct task_struct *tsk)
+{
+ if (!unhandled_signal(tsk, SIGSEGV))
+ return;
+
+ if (!printk_ratelimit())
+ return;
+
+ printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+ tsk->comm, task_pid_nr(tsk), address,
+ (void *)regs->ip, (void *)regs->sp, error_code);
+
+ print_vma_addr(KERN_CONT " in ", regs->ip);
+
+ printk(KERN_CONT "\n");
+}
+
+static void
+__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, int si_code)
{
struct task_struct *tsk = current;
/* User mode accesses just cause a SIGSEGV */
if (error_code & PF_USER) {
/*
- * It's possible to have interrupts off here.
+ * It's possible to have interrupts off here:
*/
local_irq_enable();
/*
* Valid to do another page fault here because this one came
- * from user space.
+ * from user space:
*/
if (is_prefetch(regs, error_code, address))
return;
@@ -516,22 +758,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs,
if (is_errata100(regs, address))
return;
- if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
- printk_ratelimit()) {
- printk(
- "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
- task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
- tsk->comm, task_pid_nr(tsk), address,
- (void *) regs->ip, (void *) regs->sp, error_code);
- print_vma_addr(" in ", regs->ip);
- printk("\n");
- }
+ if (unlikely(show_unhandled_signals))
+ show_signal_msg(regs, error_code, address, tsk);
+
+ /* Kernel addresses are always protection faults: */
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.trap_no = 14;
- tsk->thread.cr2 = address;
- /* Kernel addresses are always protection faults */
- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
- tsk->thread.trap_no = 14;
force_sig_info_fault(SIGSEGV, si_code, address, tsk);
+
return;
}
@@ -541,15 +777,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs,
no_context(regs, error_code, address);
}
-static noinline void bad_area_nosemaphore(struct pt_regs *regs,
- unsigned long error_code, unsigned long address)
+static noinline void
+bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}
-static void __bad_area(struct pt_regs *regs,
- unsigned long error_code, unsigned long address,
- int si_code)
+static void
+__bad_area(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, int si_code)
{
struct mm_struct *mm = current->mm;
@@ -562,67 +799,75 @@ static void __bad_area(struct pt_regs *regs,
__bad_area_nosemaphore(regs, error_code, address, si_code);
}
-static noinline void bad_area(struct pt_regs *regs,
- unsigned long error_code, unsigned long address)
+static noinline void
+bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
__bad_area(regs, error_code, address, SEGV_MAPERR);
}
-static noinline void bad_area_access_error(struct pt_regs *regs,
- unsigned long error_code, unsigned long address)
+static noinline void
+bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
__bad_area(regs, error_code, address, SEGV_ACCERR);
}
/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
-static void out_of_memory(struct pt_regs *regs,
- unsigned long error_code, unsigned long address)
+static void
+out_of_memory(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
/*
* We ran out of memory, call the OOM killer, and return the userspace
- * (which will retry the fault, or kill us if we got oom-killed).
+ * (which will retry the fault, or kill us if we got oom-killed):
*/
up_read(&current->mm->mmap_sem);
+
pagefault_out_of_memory();
}
-static void do_sigbus(struct pt_regs *regs,
- unsigned long error_code, unsigned long address)
+static void
+do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
up_read(&mm->mmap_sem);
- /* Kernel mode? Handle exceptions or die */
+ /* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER))
no_context(regs, error_code, address);
-#ifdef CONFIG_X86_32
- /* User space => ok to do another page fault */
+
+ /* User-space => ok to do another page fault: */
if (is_prefetch(regs, error_code, address))
return;
-#endif
- tsk->thread.cr2 = address;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 14;
+
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+
force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
}
-static noinline void mm_fault_error(struct pt_regs *regs,
- unsigned long error_code, unsigned long address, unsigned int fault)
+static noinline void
+mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, unsigned int fault)
{
- if (fault & VM_FAULT_OOM)
+ if (fault & VM_FAULT_OOM) {
out_of_memory(regs, error_code, address);
- else if (fault & VM_FAULT_SIGBUS)
- do_sigbus(regs, error_code, address);
- else
- BUG();
+ } else {
+ if (fault & VM_FAULT_SIGBUS)
+ do_sigbus(regs, error_code, address);
+ else
+ BUG();
+ }
}
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
if ((error_code & PF_WRITE) && !pte_write(*pte))
return 0;
+
if ((error_code & PF_INSTR) && !pte_exec(*pte))
return 0;
@@ -630,21 +875,25 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
}
/*
- * Handle a spurious fault caused by a stale TLB entry. This allows
- * us to lazily refresh the TLB when increasing the permissions of a
- * kernel page (RO -> RW or NX -> X). Doing it eagerly is very
- * expensive since that implies doing a full cross-processor TLB
- * flush, even if no stale TLB entries exist on other processors.
+ * Handle a spurious fault caused by a stale TLB entry.
+ *
+ * This allows us to lazily refresh the TLB when increasing the
+ * permissions of a kernel page (RO -> RW or NX -> X). Doing it
+ * eagerly is very expensive since that implies doing a full
+ * cross-processor TLB flush, even if no stale TLB entries exist
+ * on other processors.
+ *
* There are no security implications to leaving a stale TLB when
* increasing the permissions on a page.
*/
-static noinline int spurious_fault(unsigned long error_code,
- unsigned long address)
+static noinline int
+spurious_fault(unsigned long error_code, unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
+ int ret;
/* Reserved-bit violation or user access to kernel space? */
if (error_code & (PF_USER | PF_RSVD))
@@ -672,123 +921,46 @@ static noinline int spurious_fault(unsigned long error_code,
if (!pte_present(*pte))
return 0;
- return spurious_fault_check(error_code, pte);
-}
-
-/*
- * X86_32
- * Handle a fault on the vmalloc or module mapping area
- *
- * X86_64
- * Handle a fault on the vmalloc area
- *
- * This assumes no large pages in there.
- */
-static noinline int vmalloc_fault(unsigned long address)
-{
-#ifdef CONFIG_X86_32
- unsigned long pgd_paddr;
- pmd_t *pmd_k;
- pte_t *pte_k;
-
- /* Make sure we are in vmalloc area */
- if (!(address >= VMALLOC_START && address < VMALLOC_END))
- return -1;
+ ret = spurious_fault_check(error_code, pte);
+ if (!ret)
+ return 0;
/*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- *
- * Do _not_ use "current" here. We might be inside
- * an interrupt in the middle of a task switch..
+ * Make sure we have permissions in PMD.
+ * If not, then there's a bug in the page tables:
*/
- pgd_paddr = read_cr3();
- pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
- if (!pmd_k)
- return -1;
- pte_k = pte_offset_kernel(pmd_k, address);
- if (!pte_present(*pte_k))
- return -1;
- return 0;
-#else
- pgd_t *pgd, *pgd_ref;
- pud_t *pud, *pud_ref;
- pmd_t *pmd, *pmd_ref;
- pte_t *pte, *pte_ref;
+ ret = spurious_fault_check(error_code, (pte_t *) pmd);
+ WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
- /* Make sure we are in vmalloc area */
- if (!(address >= VMALLOC_START && address < VMALLOC_END))
- return -1;
-
- /* Copy kernel mappings over when needed. This can also
- happen within a race in page table update. In the later
- case just flush. */
-
- pgd = pgd_offset(current->active_mm, address);
- pgd_ref = pgd_offset_k(address);
- if (pgd_none(*pgd_ref))
- return -1;
- if (pgd_none(*pgd))
- set_pgd(pgd, *pgd_ref);
- else
- BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
-
- /* Below here mismatches are bugs because these lower tables
- are shared */
-
- pud = pud_offset(pgd, address);
- pud_ref = pud_offset(pgd_ref, address);
- if (pud_none(*pud_ref))
- return -1;
- if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
- BUG();
- pmd = pmd_offset(pud, address);
- pmd_ref = pmd_offset(pud_ref, address);
- if (pmd_none(*pmd_ref))
- return -1;
- if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
- BUG();
- pte_ref = pte_offset_kernel(pmd_ref, address);
- if (!pte_present(*pte_ref))
- return -1;
- pte = pte_offset_kernel(pmd, address);
- /* Don't use pte_page here, because the mappings can point
- outside mem_map, and the NUMA hash lookup cannot handle
- that. */
- if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
- BUG();
- return 0;
-#endif
+ return ret;
}
int show_unhandled_signals = 1;
-static inline int access_error(unsigned long error_code, int write,
- struct vm_area_struct *vma)
+static inline int
+access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
{
if (write) {
- /* write, present and write, not present */
+ /* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
- } else if (unlikely(error_code & PF_PROT)) {
- /* read, present */
- return 1;
- } else {
- /* read, not present */
- if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
- return 1;
+ return 0;
}
+ /* read, present: */
+ if (unlikely(error_code & PF_PROT))
+ return 1;
+
+ /* read, not present: */
+ if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+ return 1;
+
return 0;
}
static int fault_in_kernel_space(unsigned long address)
{
-#ifdef CONFIG_X86_32
- return address >= TASK_SIZE;
-#else /* !CONFIG_X86_32 */
- return address >= TASK_SIZE64;
-#endif /* CONFIG_X86_32 */
+ return address >= TASK_SIZE_MAX;
}
/*
@@ -796,23 +968,22 @@ static int fault_in_kernel_space(unsigned long address)
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
-#ifdef CONFIG_X86_64
-asmlinkage
-#endif
-void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
+dotraplinkage void __kprobes
+do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
- unsigned long address;
+ struct vm_area_struct *vma;
struct task_struct *tsk;
+ unsigned long address;
struct mm_struct *mm;
- struct vm_area_struct *vma;
int write;
int fault;
tsk = current;
mm = tsk->mm;
+
prefetchw(&mm->mmap_sem);
- /* get the address */
+ /* Get the faulting address: */
address = read_cr2();
if (unlikely(kmmio_fault(regs, address)))
@@ -836,22 +1007,23 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
vmalloc_fault(address) >= 0)
return;
- /* Can handle a stale RO->RW TLB */
+ /* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
return;
- /* kprobes don't want to hook the spurious faults. */
+ /* kprobes don't want to hook the spurious faults: */
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
- * fault we could otherwise deadlock.
+ * fault we could otherwise deadlock:
*/
bad_area_nosemaphore(regs, error_code, address);
+
return;
}
- /* kprobes don't want to hook the spurious faults. */
+ /* kprobes don't want to hook the spurious faults: */
if (unlikely(notify_page_fault(regs)))
return;
/*
@@ -859,22 +1031,22 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
* vmalloc fault has been handled.
*
* User-mode registers count as a user access even for any
- * potential system fault or CPU buglet.
+ * potential system fault or CPU buglet:
*/
if (user_mode_vm(regs)) {
local_irq_enable();
error_code |= PF_USER;
- } else if (regs->flags & X86_EFLAGS_IF)
- local_irq_enable();
+ } else {
+ if (regs->flags & X86_EFLAGS_IF)
+ local_irq_enable();
+ }
-#ifdef CONFIG_X86_64
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
-#endif
/*
- * If we're in an interrupt, have no user context or are running in an
- * atomic region then we must not take the fault.
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
*/
if (unlikely(in_atomic() || !mm)) {
bad_area_nosemaphore(regs, error_code, address);
@@ -883,19 +1055,19 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
/*
* When running in the kernel we expect faults to occur only to
- * addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
- * we will deadlock attempting to validate the fault against the
- * address space. Luckily the kernel only validly references user
- * space from well defined areas of code, which are listed in the
- * exceptions table.
+ * addresses in user space. All other faults represent errors in
+ * the kernel and should generate an OOPS. Unfortunately, in the
+ * case of an erroneous fault occurring in a code path which already
+ * holds mmap_sem we will deadlock attempting to validate the fault
+ * against the address space. Luckily the kernel only validly
+ * references user space from well defined areas of code, which are
+ * listed in the exceptions table.
*
* As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibility of a deadlock.
- * Attempt to lock the address space, if we cannot we then validate the
- * source. If this is invalid we can skip the address space check,
- * thus avoiding the deadlock.
+ * the source reference check when there is a possibility of a
+ * deadlock. Attempt to lock the address space, if we cannot we then
+ * validate the source. If this is invalid we can skip the address
+ * space check, thus avoiding the deadlock:
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if ((error_code & PF_USER) == 0 &&
@@ -906,8 +1078,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
down_read(&mm->mmap_sem);
} else {
/*
- * The above down_read_trylock() might have succeeded in which
- * case we'll have missed the might_sleep() from down_read().
+ * The above down_read_trylock() might have succeeded in
+ * which case we'll have missed the might_sleep() from
+ * down_read():
*/
might_sleep();
}
@@ -927,7 +1100,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
- * and pusha to work. ("enter $65535,$31" pushes
+ * and pusha to work. ("enter $65535, $31" pushes
* 32 pointers and then decrements %sp by 65535.)
*/
if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
@@ -946,6 +1119,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
*/
good_area:
write = error_code & PF_WRITE;
+
if (unlikely(access_error(error_code, write, vma))) {
bad_area_access_error(regs, error_code, address);
return;
@@ -954,75 +1128,21 @@ good_area:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
- * the fault.
+ * the fault:
*/
fault = handle_mm_fault(mm, vma, address, write);
+
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, fault);
return;
}
+
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
-#ifdef CONFIG_X86_32
- /*
- * Did it hit the DOS screen memory VA from vm86 mode?
- */
- if (v8086_mode(regs)) {
- unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
- if (bit < 32)
- tsk->thread.screen_bitmap |= 1 << bit;
- }
-#endif
- up_read(&mm->mmap_sem);
-}
-
-DEFINE_SPINLOCK(pgd_lock);
-LIST_HEAD(pgd_list);
+ check_v8086_mode(regs, address, tsk);
-void vmalloc_sync_all(void)
-{
- unsigned long address;
-
-#ifdef CONFIG_X86_32
- if (SHARED_KERNEL_PMD)
- return;
-
- for (address = VMALLOC_START & PMD_MASK;
- address >= TASK_SIZE && address < FIXADDR_TOP;
- address += PMD_SIZE) {
- unsigned long flags;
- struct page *page;
-
- spin_lock_irqsave(&pgd_lock, flags);
- list_for_each_entry(page, &pgd_list, lru) {
- if (!vmalloc_sync_one(page_address(page),
- address))
- break;
- }
- spin_unlock_irqrestore(&pgd_lock, flags);
- }
-#else /* CONFIG_X86_64 */
- for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
- address += PGDIR_SIZE) {
- const pgd_t *pgd_ref = pgd_offset_k(address);
- unsigned long flags;
- struct page *page;
-
- if (pgd_none(*pgd_ref))
- continue;
- spin_lock_irqsave(&pgd_lock, flags);
- list_for_each_entry(page, &pgd_list, lru) {
- pgd_t *pgd;
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
- if (pgd_none(*pgd))
- set_pgd(pgd, *pgd_ref);
- else
- BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
- }
- spin_unlock_irqrestore(&pgd_lock, flags);
- }
-#endif
+ up_read(&mm->mmap_sem);
}
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index ca53224..d5e2842 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -20,6 +20,64 @@
#include <asm/pat.h>
#include <linux/module.h>
+#ifdef CONFIG_X86_PAE
+static int
+is_io_mapping_possible(resource_size_t base, unsigned long size)
+{
+ return 1;
+}
+#else
+static int
+is_io_mapping_possible(resource_size_t base, unsigned long size)
+{
+ /* There is no way to map greater than 1 << 32 address without PAE */
+ if (base + size > 0x100000000ULL)
+ return 0;
+
+ return 1;
+}
+#endif
+
+int
+reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot)
+{
+ unsigned long ret_flag;
+
+ if (!is_io_mapping_possible(base, size))
+ goto out_err;
+
+ if (!pat_enabled) {
+ *prot = pgprot_noncached(PAGE_KERNEL);
+ return 0;
+ }
+
+ if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag))
+ goto out_err;
+
+ if (ret_flag == _PAGE_CACHE_WB)
+ goto out_free;
+
+ if (kernel_map_sync_memtype(base, size, ret_flag))
+ goto out_free;
+
+ *prot = __pgprot(__PAGE_KERNEL | ret_flag);
+ return 0;
+
+out_free:
+ free_memtype(base, base + size);
+out_err:
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(reserve_io_memtype_wc);
+
+void
+free_io_memtype(u64 base, unsigned long size)
+{
+ if (pat_enabled)
+ free_memtype(base, base + size);
+}
+EXPORT_SYMBOL_GPL(free_io_memtype);
+
/* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
void *
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 9cab18b..0bcd788 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -9,44 +9,44 @@
#include <asm/e820.h>
-static void __init memtest(unsigned long start_phys, unsigned long size,
- unsigned pattern)
+static u64 patterns[] __initdata = {
+ 0,
+ 0xffffffffffffffffULL,
+ 0x5555555555555555ULL,
+ 0xaaaaaaaaaaaaaaaaULL,
+ 0x1111111111111111ULL,
+ 0x2222222222222222ULL,
+ 0x4444444444444444ULL,
+ 0x8888888888888888ULL,
+ 0x3333333333333333ULL,
+ 0x6666666666666666ULL,
+ 0x9999999999999999ULL,
+ 0xccccccccccccccccULL,
+ 0x7777777777777777ULL,
+ 0xbbbbbbbbbbbbbbbbULL,
+ 0xddddddddddddddddULL,
+ 0xeeeeeeeeeeeeeeeeULL,
+ 0x7a6c7258554e494cULL, /* yeah ;-) */
+};
+
+static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
{
- unsigned long i;
- unsigned long *start;
- unsigned long start_bad;
- unsigned long last_bad;
- unsigned long val;
- unsigned long start_phys_aligned;
- unsigned long count;
- unsigned long incr;
-
- switch (pattern) {
- case 0:
- val = 0UL;
- break;
- case 1:
- val = -1UL;
- break;
- case 2:
-#ifdef CONFIG_X86_64
- val = 0x5555555555555555UL;
-#else
- val = 0x55555555UL;
-#endif
- break;
- case 3:
-#ifdef CONFIG_X86_64
- val = 0xaaaaaaaaaaaaaaaaUL;
-#else
- val = 0xaaaaaaaaUL;
-#endif
- break;
- default:
- return;
- }
+ printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n",
+ (unsigned long long) pattern,
+ (unsigned long long) start_bad,
+ (unsigned long long) end_bad);
+ reserve_early(start_bad, end_bad, "BAD RAM");
+}
- incr = sizeof(unsigned long);
+static void __init memtest(u64 pattern, u64 start_phys, u64 size)
+{
+ u64 i, count;
+ u64 *start;
+ u64 start_bad, last_bad;
+ u64 start_phys_aligned;
+ size_t incr;
+
+ incr = sizeof(pattern);
start_phys_aligned = ALIGN(start_phys, incr);
count = (size - (start_phys_aligned - start_phys))/incr;
start = __va(start_phys_aligned);
@@ -54,25 +54,42 @@ static void __init memtest(unsigned long start_phys, unsigned long size,
last_bad = 0;
for (i = 0; i < count; i++)
- start[i] = val;
+ start[i] = pattern;
for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
- if (*start != val) {
- if (start_phys_aligned == last_bad + incr) {
- last_bad += incr;
- } else {
- if (start_bad) {
- printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved",
- val, start_bad, last_bad + incr);
- reserve_early(start_bad, last_bad + incr, "BAD RAM");
- }
- start_bad = last_bad = start_phys_aligned;
- }
+ if (*start == pattern)
+ continue;
+ if (start_phys_aligned == last_bad + incr) {
+ last_bad += incr;
+ continue;
}
+ if (start_bad)
+ reserve_bad_mem(pattern, start_bad, last_bad + incr);
+ start_bad = last_bad = start_phys_aligned;
}
- if (start_bad) {
- printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved",
- val, start_bad, last_bad + incr);
- reserve_early(start_bad, last_bad + incr, "BAD RAM");
+ if (start_bad)
+ reserve_bad_mem(pattern, start_bad, last_bad + incr);
+}
+
+static void __init do_one_pass(u64 pattern, u64 start, u64 end)
+{
+ u64 size = 0;
+
+ while (start < end) {
+ start = find_e820_area_size(start, &size, 1);
+
+ /* done ? */
+ if (start >= end)
+ break;
+ if (start + size > end)
+ size = end - start;
+
+ printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
+ (unsigned long long) start,
+ (unsigned long long) start + size,
+ (unsigned long long) cpu_to_be64(pattern));
+ memtest(pattern, start, size);
+
+ start += size;
}
}
@@ -90,33 +107,22 @@ early_param("memtest", parse_memtest);
void __init early_memtest(unsigned long start, unsigned long end)
{
- u64 t_start, t_size;
- unsigned pattern;
+ unsigned int i;
+ unsigned int idx = 0;
if (!memtest_pattern)
return;
- printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
- for (pattern = 0; pattern < memtest_pattern; pattern++) {
- t_start = start;
- t_size = 0;
- while (t_start < end) {
- t_start = find_e820_area_size(t_start, &t_size, 1);
-
- /* done ? */
- if (t_start >= end)
- break;
- if (t_start + t_size > end)
- t_size = end - t_start;
-
- printk(KERN_CONT "\n %010llx - %010llx pattern %d",
- (unsigned long long)t_start,
- (unsigned long long)t_start + t_size, pattern);
-
- memtest(t_start, t_size, pattern);
+ printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
+ for (i = 0; i < memtest_pattern; i++) {
+ idx = i % ARRAY_SIZE(patterns);
+ do_one_pass(patterns[idx], start, end);
+ }
- t_start += t_size;
- }
+ if (idx > 0) {
+ printk(KERN_INFO "early_memtest: wipe out "
+ "test pattern from memory\n");
+ /* additional test with pattern 0 will do this */
+ do_one_pass(0, start, end);
}
- printk(KERN_CONT "\n");
}
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index d1f7439..3957cd6 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -194,7 +194,7 @@ void *alloc_remap(int nid, unsigned long size)
size = ALIGN(size, L1_CACHE_BYTES);
if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
- return 0;
+ return NULL;
node_remap_alloc_vaddr[nid] += size;
memset(allocation, 0, size);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index deb1c1a..64c9cf0 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -166,7 +166,7 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
return shift;
}
-int early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn)
{
return phys_to_nid(pfn << PAGE_SHIFT);
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8ca0d85..8253bc9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -482,6 +482,13 @@ static int split_large_page(pte_t *kpte, unsigned long address)
pbase = (pte_t *)page_address(base);
paravirt_alloc_pte(&init_mm, page_to_pfn(base));
ref_prot = pte_pgprot(pte_clrhuge(*kpte));
+ /*
+ * If we ever want to utilize the PAT bit, we need to
+ * update this function to make sure it's converted from
+ * bit 12 to bit 7 when we cross from the 2MB level to
+ * the 4K level:
+ */
+ WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
#ifdef CONFIG_X86_64
if (level == PG_LEVEL_1G) {
@@ -508,18 +515,13 @@ static int split_large_page(pte_t *kpte, unsigned long address)
#endif
/*
- * Install the new, split up pagetable. Important details here:
- *
- * On Intel the NX bit of all levels must be cleared to make a
- * page executable. See section 4.13.2 of Intel 64 and IA-32
- * Architectures Software Developer's Manual).
+ * Install the new, split up pagetable.
*
- * Mark the entry present. The current mapping might be
- * set to not present, which we preserved above.
+ * We use the standard kernel pagetable protections for the new
+ * pagetable protections, the actual ptes set above control the
+ * primary protection behavior:
*/
- ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
- pgprot_val(ref_prot) |= _PAGE_PRESENT;
- __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
+ __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
base = NULL;
out_unlock:
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 05f9aef..fdfedb6 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -634,6 +634,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
}
/*
+ * Change the memory type for the physial address range in kernel identity
+ * mapping space if that range is a part of identity map.
+ */
+int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
+{
+ unsigned long id_sz;
+
+ if (!pat_enabled || base >= __pa(high_memory))
+ return 0;
+
+ id_sz = (__pa(high_memory) < base + size) ?
+ __pa(high_memory) - base :
+ size;
+
+ if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
+ printk(KERN_INFO
+ "%s:%d ioremap_change_attr failed %s "
+ "for %Lx-%Lx\n",
+ current->comm, current->pid,
+ cattr_name(flags),
+ base, (unsigned long long)(base + size));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
* Internal interface to reserve a range of physical memory with prot.
* Reserved non RAM regions only and after successful reserve_memtype,
* this func also keeps identity mapping (if any) in sync with this new prot.
@@ -642,7 +669,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
int strict_prot)
{
int is_ram = 0;
- int id_sz, ret;
+ int ret;
unsigned long flags;
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
@@ -679,23 +706,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
flags);
}
- /* Need to keep identity mapping in sync */
- if (paddr >= __pa(high_memory))
- return 0;
-
- id_sz = (__pa(high_memory) < paddr + size) ?
- __pa(high_memory) - paddr :
- size;
-
- if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
+ if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
free_memtype(paddr, paddr + size);
- printk(KERN_ERR
- "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
- "for %Lx-%Lx\n",
- current->comm, current->pid,
- cattr_name(flags),
- (unsigned long long)paddr,
- (unsigned long long)(paddr + size));
return -EINVAL;
}
return 0;
OpenPOWER on IntegriCloud