diff options
author | Keith Mannthey <kmannth@us.ibm.com> | 2006-09-30 23:27:05 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-01 00:39:18 -0700 |
commit | 71efa8fdc55e70ec6687c897a30759f0a2c2ad7e (patch) | |
tree | 93975fd0431f856f7285ad90a13cc4ea73f740d3 /arch/x86_64/mm | |
parent | ec69acbb1191df671ff8e07c8e146619a5c53f70 (diff) | |
download | op-kernel-dev-71efa8fdc55e70ec6687c897a30759f0a2c2ad7e.zip op-kernel-dev-71efa8fdc55e70ec6687c897a30759f0a2c2ad7e.tar.gz |
[PATCH] hot-add-mem x86_64: Enable SPARSEMEM in srat.c
Enable x86_64 srat.c to share code between both reserve and sparsemem based
add memory paths. Both paths need the hot-add area node locality infomration
(nodes_add). This code refactors the code path to allow this.
Signed-off-by: Keith Mannthey <kmannth@us.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r-- | arch/x86_64/mm/srat.c | 51 |
1 files changed, 29 insertions, 22 deletions
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index f8c04d6..19396a2 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c @@ -23,12 +23,6 @@ int acpi_numa __initdata; -#if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \ - defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \ - && !defined(CONFIG_MEMORY_HOTPLUG) -#define RESERVE_HOTADD 1 -#endif - static struct acpi_table_slit *acpi_slit; static nodemask_t nodes_parsed __initdata; @@ -36,9 +30,6 @@ static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode nodes_add[MAX_NUMNODES] __initdata; static int found_add_area __initdata; int hotadd_percent __initdata = 0; -#ifndef RESERVE_HOTADD -#define hotadd_percent 0 /* Ignore all settings */ -#endif /* Too small nodes confuse the VM badly. Usually they result from BIOS bugs. */ @@ -160,7 +151,7 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) pxm, pa->apic_id, node); } -#ifdef RESERVE_HOTADD +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE /* * Protect against too large hotadd areas that would fill up memory. */ @@ -203,15 +194,37 @@ static int hotadd_enough_memory(struct bootnode *nd) return 1; } +static int update_end_of_memory(unsigned long end) +{ + found_add_area = 1; + if ((end >> PAGE_SHIFT) > end_pfn) + end_pfn = end >> PAGE_SHIFT; + return 1; +} + +static inline int save_add_info(void) +{ + return hotadd_percent > 0; +} +#else +int update_end_of_memory(unsigned long end) {return 0;} +static int hotadd_enough_memory(struct bootnode *nd) {return 1;} +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +static inline int save_add_info(void) {return 1;} +#else +static inline int save_add_info(void) {return 0;} +#endif +#endif /* - * It is fine to add this area to the nodes data it will be used later + * Update nodes_add and decide if to include add are in the zone. + * Both SPARSE and RESERVE need nodes_add infomation. * This code supports one contigious hot add area per node. */ static int reserve_hotadd(int node, unsigned long start, unsigned long end) { unsigned long s_pfn = start >> PAGE_SHIFT; unsigned long e_pfn = end >> PAGE_SHIFT; - int changed = 0; + int ret = 0, changed = 0; struct bootnode *nd = &nodes_add[node]; /* I had some trouble with strange memory hotadd regions breaking @@ -240,7 +253,6 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) /* Looks good */ - found_add_area = 1; if (nd->start == nd->end) { nd->start = start; nd->end = end; @@ -258,14 +270,12 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); } - if ((nd->end >> PAGE_SHIFT) > end_pfn) - end_pfn = nd->end >> PAGE_SHIFT; + ret = update_end_of_memory(nd->end); if (changed) printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); - return 0; + return ret; } -#endif /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ void __init @@ -284,7 +294,7 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) } if (ma->flags.enabled == 0) return; - if (ma->flags.hot_pluggable && hotadd_percent == 0) + if (ma->flags.hot_pluggable && !save_add_info()) return; start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); @@ -327,15 +337,13 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) push_node_boundaries(node, nd->start >> PAGE_SHIFT, nd->end >> PAGE_SHIFT); -#ifdef RESERVE_HOTADD - if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) { + if (ma->flags.hot_pluggable && !reserve_hotadd(node, start, end) < 0) { /* Ignore hotadd region. Undo damage */ printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); *nd = oldnode; if ((nd->start | nd->end) == 0) node_clear(node, nodes_parsed); } -#endif } /* Sanity check to catch more bad SRATs (they are amazingly common). @@ -351,7 +359,6 @@ static int nodes_cover_memory(void) unsigned long e = nodes[i].end >> PAGE_SHIFT; pxmram += e - s; pxmram -= absent_pages_in_range(s, e); - pxmram -= nodes_add[i].end - nodes_add[i].start; if ((long)pxmram < 0) pxmram = 0; } |