summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2008-01-09 11:27:23 -0600
committerKumar Gala <galak@kernel.crashing.org>2008-01-23 19:29:08 -0600
commitf98eeb4eb1c52de89dcefeb538029bcecc6dd42d (patch)
treeda91da9e329d35360ece38eb7f9fbcbc740cec63
parent52920df4aa9dd25836b8ed4dc0b177ea14c09e53 (diff)
downloadop-kernel-dev-f98eeb4eb1c52de89dcefeb538029bcecc6dd42d.zip
op-kernel-dev-f98eeb4eb1c52de89dcefeb538029bcecc6dd42d.tar.gz
[POWERPC] Fix handling of memreserve if the range lands in highmem
There were several issues if a memreserve range existed and happened to be in highmem: * The bootmem allocator is only aware of lowmem so calling reserve_bootmem with a highmem address would cause a BUG_ON * All highmem pages were provided to the buddy allocator Added a lmb_is_reserved() api that we now use to determine if a highem page should continue to be PageReserved or provided to the buddy allocator. Also, we incorrectly reported the amount of pages reserved since all highmem pages are initally marked reserved and we clear the PageReserved flag as we "free" up the highmem pages. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
-rw-r--r--arch/powerpc/mm/lmb.c13
-rw-r--r--arch/powerpc/mm/mem.c21
-rw-r--r--include/asm-powerpc/lmb.h1
3 files changed, 33 insertions, 2 deletions
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index 8f4d2dc..4ce23bc 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -342,3 +342,16 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit)
}
}
}
+
+int __init lmb_is_reserved(unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ unsigned long upper = lmb.reserved.region[i].base +
+ lmb.reserved.region[i].size - 1;
+ if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
+ return 1;
+ }
+ return 0;
+}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5402fb6b..e812244 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -213,15 +213,30 @@ void __init do_init_bootmem(void)
*/
#ifdef CONFIG_HIGHMEM
free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
+
+ /* reserve the sections we're already using */
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ unsigned long addr = lmb.reserved.region[i].base +
+ lmb_size_bytes(&lmb.reserved, i) - 1;
+ if (addr < total_lowmem)
+ reserve_bootmem(lmb.reserved.region[i].base,
+ lmb_size_bytes(&lmb.reserved, i));
+ else if (lmb.reserved.region[i].base < total_lowmem) {
+ unsigned long adjusted_size = total_lowmem -
+ lmb.reserved.region[i].base;
+ reserve_bootmem(lmb.reserved.region[i].base,
+ adjusted_size);
+ }
+ }
#else
free_bootmem_with_active_regions(0, max_pfn);
-#endif
/* reserve the sections we're already using */
for (i = 0; i < lmb.reserved.cnt; i++)
reserve_bootmem(lmb.reserved.region[i].base,
lmb_size_bytes(&lmb.reserved, i));
+#endif
/* XXX need to clip this if using highmem? */
sparse_memory_present_with_active_regions(0);
@@ -334,11 +349,13 @@ void __init mem_init(void)
highmem_mapnr = total_lowmem >> PAGE_SHIFT;
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
struct page *page = pfn_to_page(pfn);
-
+ if (lmb_is_reserved(pfn << PAGE_SHIFT))
+ continue;
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
totalhigh_pages++;
+ reservedpages--;
}
totalram_pages += totalhigh_pages;
printk(KERN_DEBUG "High memory: %luk\n",
diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h
index b5f9f4c..5d1dc48 100644
--- a/include/asm-powerpc/lmb.h
+++ b/include/asm-powerpc/lmb.h
@@ -51,6 +51,7 @@ extern unsigned long __init __lmb_alloc_base(unsigned long size,
extern unsigned long __init lmb_phys_mem_size(void);
extern unsigned long __init lmb_end_of_DRAM(void);
extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
+extern int __init lmb_is_reserved(unsigned long addr);
extern void lmb_dump_all(void);
OpenPOWER on IntegriCloud