summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2005-08-26 18:34:10 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-26 19:37:12 -0700
commit485761bd6a72d33b3d4fa884927b2b0d983b701e (patch)
treec75562513489f62c8dcfd41acd467bca3d3202cc /arch
parentbebf4688e9dbbfdd421736685d607bced91a3c91 (diff)
downloadop-kernel-dev-485761bd6a72d33b3d4fa884927b2b0d983b701e.zip
op-kernel-dev-485761bd6a72d33b3d4fa884927b2b0d983b701e.tar.gz
[PATCH] x86_64: Tell VM about holes in nodes
Some nodes can have large holes on x86-64. This fixes problems with the VM allowing too many dirty pages because it overestimates the number of available RAM in a node. In extreme cases you can end up with all RAM filled with dirty pages which can lead to deadlocks and other nasty behaviour. This patch just tells the VM about the known holes from e820. Reserved (like the kernel text or mem_map) is still not taken into account, but that should be only a few percent error now. Small detail is that the flat setup uses the NUMA free_area_init_node() now too because it offers more flexibility. (akpm: lotsa thanks to Martin for working this problem out) Cc: Martin Bligh <mbligh@mbligh.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86_64/kernel/e820.c34
-rw-r--r--arch/x86_64/mm/init.c16
-rw-r--r--arch/x86_64/mm/numa.c8
3 files changed, 53 insertions, 5 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 6ded3a5..b548dea 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -185,6 +185,40 @@ unsigned long __init e820_end_of_ram(void)
}
/*
+ * Compute how much memory is missing in a range.
+ * Unlike the other functions in this file the arguments are in page numbers.
+ */
+unsigned long __init
+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long ram = 0;
+ unsigned long start = start_pfn << PAGE_SHIFT;
+ unsigned long end = end_pfn << PAGE_SHIFT;
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long last, addr;
+
+ if (ei->type != E820_RAM ||
+ ei->addr+ei->size <= start ||
+ ei->addr >= end)
+ continue;
+
+ addr = round_up(ei->addr, PAGE_SIZE);
+ if (addr < start)
+ addr = start;
+
+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (last >= end)
+ last = end;
+
+ if (last > addr)
+ ram += last - addr;
+ }
+ return ((end - start) - ram) >> PAGE_SHIFT;
+}
+
+/*
* Mark e820 reserved areas as busy for the resource manager.
*/
void __init e820_reserve_resources(void)
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 72e4b36..aa4a518 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -322,18 +322,26 @@ void zap_low_mappings(void)
void __init paging_init(void)
{
{
- unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned long zones_size[MAX_NR_ZONES];
+ unsigned long holes[MAX_NR_ZONES];
unsigned int max_dma;
+ memset(zones_size, 0, sizeof(zones_size));
+ memset(holes, 0, sizeof(holes));
+
max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- if (end_pfn < max_dma)
+ if (end_pfn < max_dma) {
zones_size[ZONE_DMA] = end_pfn;
- else {
+ holes[ZONE_DMA] = e820_hole_size(0, end_pfn);
+ } else {
zones_size[ZONE_DMA] = max_dma;
+ holes[ZONE_DMA] = e820_hole_size(0, max_dma);
zones_size[ZONE_NORMAL] = end_pfn - max_dma;
+ holes[ZONE_NORMAL] = e820_hole_size(max_dma, end_pfn);
}
- free_area_init(zones_size);
+ free_area_init_node(0, NODE_DATA(0), zones_size,
+ __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
}
return;
}
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 70cb290..6a156f5 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -126,9 +126,11 @@ void __init setup_node_zones(int nodeid)
{
unsigned long start_pfn, end_pfn;
unsigned long zones[MAX_NR_ZONES];
+ unsigned long holes[MAX_NR_ZONES];
unsigned long dma_end_pfn;
memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES);
+ memset(holes, 0, sizeof(unsigned long) * MAX_NR_ZONES);
start_pfn = node_start_pfn(nodeid);
end_pfn = node_end_pfn(nodeid);
@@ -139,13 +141,17 @@ void __init setup_node_zones(int nodeid)
dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT;
if (start_pfn < dma_end_pfn) {
zones[ZONE_DMA] = dma_end_pfn - start_pfn;
+ holes[ZONE_DMA] = e820_hole_size(start_pfn, dma_end_pfn);
zones[ZONE_NORMAL] = end_pfn - dma_end_pfn;
+ holes[ZONE_NORMAL] = e820_hole_size(dma_end_pfn, end_pfn);
+
} else {
zones[ZONE_NORMAL] = end_pfn - start_pfn;
+ holes[ZONE_NORMAL] = e820_hole_size(start_pfn, end_pfn);
}
free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
- start_pfn, NULL);
+ start_pfn, holes);
}
void __init numa_init_array(void)
OpenPOWER on IntegriCloud