From cc6150321903ca4c3bc9d53b0cdafb05d77d64d0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:28 +0100 Subject: x86: account overlapped mappings in max_pfn_mapped When end_pfn is not aligned to 2MB (or 1GB) then the kernel might map more memory than end_pfn. Account this in max_pfn_mapped. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_64.c | 2 +- arch/x86/mm/init_64.c | 34 +++++++++++++++++++++++----------- include/asm-x86/page_64.h | 3 +++ include/asm-x86/proto.h | 2 -- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 413b8fc..3d76dbd 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -347,7 +347,7 @@ void __init setup_arch(char **cmdline_p) check_efer(); - init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT)); + max_pfn_mapped = init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT)); if (efi_enabled) efi_init(); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ef9e9cf..1076097 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -313,7 +313,7 @@ __meminit void early_iounmap(void *addr, unsigned long size) __flush_tlb_all(); } -static void __meminit +static unsigned long __meminit phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) { int i = pmd_index(address); @@ -335,21 +335,26 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) set_pte((pte_t *)pmd, pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); } + return address; } -static void __meminit +static unsigned long __meminit phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) { pmd_t *pmd = pmd_offset(pud, 0); + unsigned long last_map_addr; + spin_lock(&init_mm.page_table_lock); - phys_pmd_init(pmd, address, end); + last_map_addr = phys_pmd_init(pmd, address, end); spin_unlock(&init_mm.page_table_lock); __flush_tlb_all(); + return last_map_addr; } -static void __meminit +static unsigned long __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) { + unsigned long last_map_addr = end; int i = pud_index(addr); for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { @@ -368,13 +373,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) if (pud_val(*pud)) { if (!pud_large(*pud)) - phys_pmd_update(pud, addr, end); + last_map_addr = phys_pmd_update(pud, addr, end); continue; } if (direct_gbpages) { set_pte((pte_t *)pud, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); + last_map_addr = (addr & PUD_MASK) + PUD_SIZE; continue; } @@ -382,12 +388,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) spin_lock(&init_mm.page_table_lock); set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); - phys_pmd_init(pmd, addr, end); + last_map_addr = phys_pmd_init(pmd, addr, end); spin_unlock(&init_mm.page_table_lock); unmap_low_page(pmd); } __flush_tlb_all(); + + return last_map_addr >> PAGE_SHIFT; } static void __init find_early_table_space(unsigned long end) @@ -542,9 +550,9 @@ static void __init early_memtest(unsigned long start, unsigned long end) * This runs before bootmem is initialized and gets pages directly from * the physical memory. To access them they are temporarily mapped. */ -void __init_refok init_memory_mapping(unsigned long start, unsigned long end) +unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end) { - unsigned long next; + unsigned long next, last_map_addr = end; unsigned long start_phys = start, end_phys = end; printk(KERN_INFO "init_memory_mapping\n"); @@ -577,7 +585,7 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) next = start + PGDIR_SIZE; if (next > end) next = end; - phys_pud_init(pud, __pa(start), __pa(next)); + last_map_addr = phys_pud_init(pud, __pa(start), __pa(next)); if (!after_bootmem) set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); unmap_low_page(pud); @@ -593,6 +601,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) if (!after_bootmem) early_memtest(start_phys, end_phys); + + return last_map_addr; } #ifndef CONFIG_NUMA @@ -632,11 +642,13 @@ int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdat = NODE_DATA(nid); struct zone *zone = pgdat->node_zones + ZONE_NORMAL; - unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - init_memory_mapping(start, start + size-1); + last_mapped_pfn = init_memory_mapping(start, start + size-1); + if (last_mapped_pfn > max_pfn_mapped) + max_pfn_mapped = last_mapped_pfn; ret = __add_pages(zone, start_pfn, nr_pages); WARN_ON(1); diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index 54d5db6..6ea7285 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -80,6 +80,9 @@ typedef struct { pteval_t pte; } pte_t; #define vmemmap ((struct page *)VMEMMAP_START) +extern unsigned long init_memory_mapping(unsigned long start, + unsigned long end); + #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_FLATMEM diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index 9da46af..1e17bcc 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h @@ -7,8 +7,6 @@ extern void early_idt_handler(void); -extern void init_memory_mapping(unsigned long start, unsigned long end); - extern void system_call(void); extern void syscall_init(void); -- cgit v1.1