diff options
author | Matt Tolentino <metolent@snoqualmie.dp.intel.com> | 2005-06-23 00:08:06 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-23 09:45:06 -0700 |
commit | 2b97690f4cd960779fb351b7cd9974390afabb36 (patch) | |
tree | 26a94d00bc67f3036cea966ba7435f8e1efd3779 | |
parent | 1035faf1b19efb83d5626985240f52cd149dd39b (diff) | |
download | op-kernel-dev-2b97690f4cd960779fb351b7cd9974390afabb36.zip op-kernel-dev-2b97690f4cd960779fb351b7cd9974390afabb36.tar.gz |
[PATCH] reorganize x86-64 NUMA and DISCONTIGMEM config options
In order to use the alternative sparsemem implmentation for NUMA kernels,
we need to reorganize the config options. This patch effectively abstracts
out the CONFIG_DISCONTIGMEM options to CONFIG_NUMA in most cases. Thus,
the discontigmem implementation may be employed as always, but the
sparsemem implementation may be used alternatively.
Signed-off-by: Matt Tolentino <matthew.e.tolentino@intel.com>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/x86_64/kernel/head64.c | 2 | ||||
-rw-r--r-- | arch/x86_64/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/x86_64/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 9 | ||||
-rw-r--r-- | arch/x86_64/mm/ioremap.c | 2 | ||||
-rw-r--r-- | include/asm-x86_64/io.h | 5 | ||||
-rw-r--r-- | include/asm-x86_64/mmzone.h | 15 | ||||
-rw-r--r-- | include/asm-x86_64/page.h | 4 | ||||
-rw-r--r-- | include/asm-x86_64/topology.h | 4 |
9 files changed, 25 insertions, 24 deletions
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c index 0f8c78d..cf6ab14 100644 --- a/arch/x86_64/kernel/head64.c +++ b/arch/x86_64/kernel/head64.c @@ -94,7 +94,7 @@ void __init x86_64_start_kernel(char * real_mode_data) s = strstr(saved_command_line, "earlyprintk="); if (s != NULL) setup_early_printk(s); -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA s = strstr(saved_command_line, "numa="); if (s != NULL) numa_setup(s+5); diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 125a0bb..dd8419b 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -348,7 +348,7 @@ static __init void parse_cmdline_early (char ** cmdline_p) if (!memcmp(from, "mem=", 4)) parse_memopt(from+4, &from); -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA if (!memcmp(from, "numa=", 5)) numa_setup(from+5); #endif @@ -377,7 +377,7 @@ static __init void parse_cmdline_early (char ** cmdline_p) *cmdline_p = command_line; } -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_NUMA static void __init contig_initmem_init(void) { unsigned long bootmap_size, bootmap; @@ -554,7 +554,7 @@ void __init setup_arch(char **cmdline_p) acpi_numa_init(); #endif -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA numa_initmem_init(0, end_pfn); #else contig_initmem_init(); diff --git a/arch/x86_64/mm/Makefile b/arch/x86_64/mm/Makefile index 66c354a..1d232a8 100644 --- a/arch/x86_64/mm/Makefile +++ b/arch/x86_64/mm/Makefile @@ -4,7 +4,7 @@ obj-y := init.o fault.o ioremap.o extable.o pageattr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_DISCONTIGMEM) += numa.o +obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_K8_NUMA) += k8topology.o obj-$(CONFIG_ACPI_NUMA) += srat.o diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index dbe53b4..72e4b36 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -318,7 +318,7 @@ void zap_low_mappings(void) flush_tlb_all(); } -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_NUMA void __init paging_init(void) { { @@ -427,13 +427,16 @@ void __init mem_init(void) reservedpages = 0; /* this will put all low memory onto the freelists */ -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA totalram_pages += numa_free_all_bootmem(); tmp = 0; /* should count reserved pages here for all nodes */ #else + +#ifdef CONFIG_FLATMEM max_mapnr = end_pfn; if (!mem_map) BUG(); +#endif totalram_pages += free_all_bootmem(); @@ -515,7 +518,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) void __init reserve_bootmem_generic(unsigned long phys, unsigned len) { /* Should check here against the e820 map to avoid double free */ -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA int nid = phys_to_nid(phys); reserve_bootmem_node(NODE_DATA(nid), phys, len); #else diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index 58aac23..6972df4 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c @@ -178,7 +178,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) return (__force void __iomem *)phys_to_virt(phys_addr); -#ifndef CONFIG_DISCONTIGMEM +#ifdef CONFIG_FLATMEM /* * Don't allow anybody to remap normal RAM that we're using.. */ diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h index 9420270..37fc3f1 100644 --- a/include/asm-x86_64/io.h +++ b/include/asm-x86_64/io.h @@ -124,12 +124,7 @@ extern inline void * phys_to_virt(unsigned long address) /* * Change "struct page" to physical address. */ -#ifdef CONFIG_DISCONTIGMEM -#include <asm/mmzone.h> #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) -#else -#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) -#endif #include <asm-generic/iomap.h> diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index ca4fc3f..7684137 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h @@ -6,7 +6,7 @@ #include <linux/config.h> -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA #define VIRTUAL_BUG_ON(x) @@ -30,17 +30,16 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) return nid; } -#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) - -#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) #define NODE_DATA(nid) (node_data[nid]) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ NODE_DATA(nid)->node_spanned_pages) -#define local_mapnr(kvaddr) \ - ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) +#ifdef CONFIG_DISCONTIGMEM + +#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) +#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) /* AK: this currently doesn't deal with invalid addresses. We'll see if the 2.5 kernel doesn't pass them @@ -57,4 +56,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) ({ u8 nid__ = pfn_to_nid(pfn); \ nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) <= node_end_pfn(nid__); })) #endif + +#define local_mapnr(kvaddr) \ + ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) +#endif #endif diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 9ce338c..60130f4 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -119,7 +119,9 @@ extern __inline__ int get_order(unsigned long size) __pa(v); }) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#ifndef CONFIG_DISCONTIGMEM +#define __boot_va(x) __va(x) +#define __boot_pa(x) __pa(x) +#ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 67f24e0..da21573 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -3,7 +3,7 @@ #include <linux/config.h> -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA #include <asm/mpspec.h> #include <asm/bitops.h> @@ -37,7 +37,6 @@ static inline cpumask_t __pcibus_to_cpumask(int bus) } #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus->number) -#ifdef CONFIG_NUMA /* sched_domains SD_NODE_INIT for x86_64 machines */ #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ @@ -59,7 +58,6 @@ static inline cpumask_t __pcibus_to_cpumask(int bus) .balance_interval = 1, \ .nr_balance_failed = 0, \ } -#endif #endif |