summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-05-25 12:40:08 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-05-25 12:40:08 -0400
commitc6a756795d5ba0637aae8da89dd11bb7e3a1ee74 (patch)
tree1c19f951f2604dbb6b867a6dcdf94d20c204cc5c /mm
parent382066da251132f768380f4852ed5afb72d88f80 (diff)
parenta8bd60705aa17a998516837d9c1e503ad4cbd7fc (diff)
downloadop-kernel-dev-c6a756795d5ba0637aae8da89dd11bb7e3a1ee74.zip
op-kernel-dev-c6a756795d5ba0637aae8da89dd11bb7e3a1ee74.tar.gz
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c30
-rw-r--r--mm/slab.c19
-rw-r--r--mm/sparse.c9
3 files changed, 39 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea77c99..253a450 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
#include <linux/mempolicy.h>
#include <asm/tlbflush.h>
+#include <asm/div64.h>
#include "internal.h"
/*
@@ -950,7 +951,7 @@ restart:
goto got_pg;
do {
- if (cpuset_zone_allowed(*z, gfp_mask))
+ if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL))
wakeup_kswapd(*z, order);
} while (*(++z));
@@ -969,7 +970,8 @@ restart:
alloc_flags |= ALLOC_HARDER;
if (gfp_mask & __GFP_HIGH)
alloc_flags |= ALLOC_HIGH;
- alloc_flags |= ALLOC_CPUSET;
+ if (wait)
+ alloc_flags |= ALLOC_CPUSET;
/*
* Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -2123,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
#ifdef CONFIG_FLAT_NODE_MEM_MAP
/* ia64 gets its own node_mem_map, before this, without bootmem */
if (!pgdat->node_mem_map) {
- unsigned long size;
+ unsigned long size, start, end;
struct page *map;
- size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
+ /*
+ * The zone's endpoints aren't required to be MAX_ORDER
+ * aligned but the node_mem_map endpoints must be in order
+ * for the buddy allocator to function correctly.
+ */
+ start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
+ end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+ end = ALIGN(end, MAX_ORDER_NR_PAGES);
+ size = (end - start) * sizeof(struct page);
map = alloc_remap(pgdat->node_id, size);
if (!map)
map = alloc_bootmem_node(pgdat, size);
- pgdat->node_mem_map = map;
+ pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
#ifdef CONFIG_FLATMEM
/*
@@ -2566,9 +2576,11 @@ void setup_per_zone_pages_min(void)
}
for_each_zone(zone) {
- unsigned long tmp;
+ u64 tmp;
+
spin_lock_irqsave(&zone->lru_lock, flags);
- tmp = (pages_min * zone->present_pages) / lowmem_pages;
+ tmp = (u64)pages_min * zone->present_pages;
+ do_div(tmp, lowmem_pages);
if (is_highmem(zone)) {
/*
* __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2595,8 +2607,8 @@ void setup_per_zone_pages_min(void)
zone->pages_min = tmp;
}
- zone->pages_low = zone->pages_min + tmp / 4;
- zone->pages_high = zone->pages_min + tmp / 2;
+ zone->pages_low = zone->pages_min + (tmp >> 2);
+ zone->pages_high = zone->pages_min + (tmp >> 1);
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
diff --git a/mm/slab.c b/mm/slab.c
index c32af7e..d31a06b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -700,6 +700,14 @@ static enum {
FULL
} g_cpucache_up;
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
+{
+ return g_cpucache_up == FULL;
+}
+
static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -2192,11 +2200,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
- if (l3) {
+ if (l3 && l3->alien)
+ drain_alien_cache(cachep, l3->alien);
+ }
+
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (l3)
drain_array(cachep, l3, l3->shared, 1, node);
- if (l3->alien)
- drain_alien_cache(cachep, l3->alien);
- }
}
}
diff --git a/mm/sparse.c b/mm/sparse.c
index d7c32de..100040c 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,7 @@ static struct mem_section *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- if (system_state == SYSTEM_RUNNING)
+ if (slab_is_available())
section = kmalloc_node(array_size, GFP_KERNEL, nid);
else
section = alloc_bootmem_node(NODE_DATA(nid), array_size);
@@ -87,11 +87,8 @@ int __section_nr(struct mem_section* ms)
unsigned long root_nr;
struct mem_section* root;
- for (root_nr = 0;
- root_nr < NR_MEM_SECTIONS;
- root_nr += SECTIONS_PER_ROOT) {
- root = __nr_to_section(root_nr);
-
+ for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
+ root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
if (!root)
continue;
OpenPOWER on IntegriCloud