diff options
author | Matt Fleming <matt@console-pimps.org> | 2009-07-03 16:16:54 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-07-03 16:16:54 +0900 |
commit | c601a51af10f714292f42eab45fa8c9154dc1414 (patch) | |
tree | 3b3aeba10f7883578ac8dfd765804594eea8e9f1 | |
parent | 47220f623c3216ca276bad517ed208ea2ffceaa2 (diff) | |
download | op-kernel-dev-c601a51af10f714292f42eab45fa8c9154dc1414.zip op-kernel-dev-c601a51af10f714292f42eab45fa8c9154dc1414.tar.gz |
sh: Use bootmem ontop of lmb
Rework the bootmem allocator to use the lmb framework.
Signed-off-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/Kconfig | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/lmb.h | 6 | ||||
-rw-r--r-- | arch/sh/kernel/setup.c | 71 |
3 files changed, 59 insertions, 19 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e2bdd7b..0fb99b0 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -10,6 +10,7 @@ config SUPERH select EMBEDDED select HAVE_CLK select HAVE_IDE + select HAVE_LMB select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT select HAVE_IOREMAP_PROT if MMU diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h new file mode 100644 index 0000000..9b437f6 --- /dev/null +++ b/arch/sh/include/asm/lmb.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH_LMB_H +#define __ASM_SH_LMB_H + +#define LMB_REAL_LIMIT 0 + +#endif /* __ASM_SH_LMB_H */ diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index dd38338..ceb409b 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -30,6 +30,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/platform_device.h> +#include <linux/lmb.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/page.h> @@ -233,39 +234,45 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, void __init setup_bootmem_allocator(unsigned long free_pfn) { unsigned long bootmap_size; + unsigned long bootmap_pages, bootmem_paddr; + u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT; + int i; + + bootmap_pages = bootmem_bootmap_pages(total_pages); + + bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ - bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn, + bootmap_size = init_bootmem_node(NODE_DATA(0), + bootmem_paddr >> PAGE_SHIFT, min_low_pfn, max_low_pfn); - __add_active_range(0, min_low_pfn, max_low_pfn); - register_bootmem_low_pages(); - - node_set_online(0); + /* Add active regions with valid PFNs. */ + for (i = 0; i < lmb.memory.cnt; i++) { + unsigned long start_pfn, end_pfn; + start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; + end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); + __add_active_range(0, start_pfn, end_pfn); + } /* - * Reserve the kernel text and - * Reserve the bootmem bitmap. We do this in two steps (first step - * was init_bootmem()), because this catches the (definitely buggy) - * case of us accidentally initializing the bootmem allocator with - * an invalid RAM area. + * Add all physical memory to the bootmem map and mark each + * area as present. */ - reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, - (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) - - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET), - BOOTMEM_DEFAULT); + register_bootmem_low_pages(); - /* - * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. - */ - if (CONFIG_ZERO_PAGE_OFFSET != 0) - reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET, + /* Reserve the sections we're already using. */ + for (i = 0; i < lmb.reserved.cnt; i++) + reserve_bootmem(lmb.reserved.region[i].base, + lmb_size_bytes(&lmb.reserved, i), BOOTMEM_DEFAULT); + node_set_online(0); + sparse_memory_present_with_active_regions(0); #ifdef CONFIG_BLK_DEV_INITRD @@ -296,12 +303,37 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) static void __init setup_memory(void) { unsigned long start_pfn; + u64 base = min_low_pfn << PAGE_SHIFT; + u64 size = (max_low_pfn << PAGE_SHIFT) - base; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(_end)); + + lmb_add(base, size); + + /* + * Reserve the kernel text and + * Reserve the bootmem bitmap. We do this in two steps (first step + * was init_bootmem()), because this catches the (definitely buggy) + * case of us accidentally initializing the bootmem allocator with + * an invalid RAM area. + */ + lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, + (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - + (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); + + /* + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. + */ + if (CONFIG_ZERO_PAGE_OFFSET != 0) + lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); + + lmb_analyze(); + lmb_dump_all(); + setup_bootmem_allocator(start_pfn); } #else @@ -402,6 +434,7 @@ void __init setup_arch(char **cmdline_p) nodes_clear(node_online_map); /* Setup bootmem with available RAM */ + lmb_init(); setup_memory(); sparse_init(); |