summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/madvise.c21
-rw-r--r--mm/memory.c17
-rw-r--r--mm/mempolicy.c189
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c124
-rw-r--r--mm/page_alloc.c62
-rw-r--r--mm/rmap.c51
-rw-r--r--mm/shmem.c89
-rw-r--r--mm/slab.c823
-rw-r--r--mm/slob.c2
-rw-r--r--mm/swap.c32
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/swapfile.c16
-rw-r--r--mm/vmscan.c441
15 files changed, 1345 insertions, 539 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b21d78c..5087077 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -85,7 +85,7 @@ void free_huge_page(struct page *page)
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
- page[1].mapping = NULL;
+ page[1].lru.next = NULL; /* reset dtor */
spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
@@ -105,9 +105,9 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
}
spin_unlock(&hugetlb_lock);
set_page_count(page, 1);
- page[1].mapping = (void *)free_huge_page;
+ page[1].lru.next = (void *)free_huge_page; /* set dtor */
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
- clear_highpage(&page[i]);
+ clear_user_highpage(&page[i], addr);
return page;
}
@@ -391,12 +391,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
if (!new_page) {
page_cache_release(old_page);
-
- /* Logically this is OOM, not a SIGBUS, but an OOM
- * could cause the kernel to go killing other
- * processes which won't help the hugepage situation
- * at all (?) */
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_OOM;
}
spin_unlock(&mm->page_table_lock);
@@ -444,6 +439,7 @@ retry:
page = alloc_huge_page(vma, address);
if (!page) {
hugetlb_put_quota(mapping);
+ ret = VM_FAULT_OOM;
goto out;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index ae0ae3e..af3d573 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -22,16 +22,23 @@ static long madvise_behavior(struct vm_area_struct * vma,
struct mm_struct * mm = vma->vm_mm;
int error = 0;
pgoff_t pgoff;
- int new_flags = vma->vm_flags & ~VM_READHINTMASK;
+ int new_flags = vma->vm_flags;
switch (behavior) {
+ case MADV_NORMAL:
+ new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
+ break;
case MADV_SEQUENTIAL:
- new_flags |= VM_SEQ_READ;
+ new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
break;
case MADV_RANDOM:
- new_flags |= VM_RAND_READ;
+ new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
break;
- default:
+ case MADV_DONTFORK:
+ new_flags |= VM_DONTCOPY;
+ break;
+ case MADV_DOFORK:
+ new_flags &= ~VM_DONTCOPY;
break;
}
@@ -177,6 +184,12 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
long error;
switch (behavior) {
+ case MADV_DOFORK:
+ if (vma->vm_flags & VM_IO) {
+ error = -EINVAL;
+ break;
+ }
+ case MADV_DONTFORK:
case MADV_NORMAL:
case MADV_SEQUENTIAL:
case MADV_RANDOM:
diff --git a/mm/memory.c b/mm/memory.c
index 7a11ddd..9abc600 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -82,6 +82,16 @@ EXPORT_SYMBOL(num_physpages);
EXPORT_SYMBOL(high_memory);
EXPORT_SYMBOL(vmalloc_earlyreserve);
+int randomize_va_space __read_mostly = 1;
+
+static int __init disable_randmaps(char *s)
+{
+ randomize_va_space = 0;
+ return 0;
+}
+__setup("norandmaps", disable_randmaps);
+
+
/*
* If a p?d_bad entry is found while walking page tables, report
* the error, before resetting entry to p?d_none. Usually (but
@@ -1871,6 +1881,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
entry = pte_to_swp_entry(orig_pte);
+again:
page = lookup_swap_cache(entry);
if (!page) {
swapin_readahead(entry, address, vma);
@@ -1894,6 +1905,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
mark_page_accessed(page);
lock_page(page);
+ if (!PageSwapCache(page)) {
+ /* Page migration has occured */
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
/*
* Back out if somebody else already faulted in this pte.
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7379018..880831b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -95,6 +95,9 @@
#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
+/* The number of pages to migrate per call to migrate_pages() */
+#define MIGRATE_CHUNK_SIZE 256
+
static kmem_cache_t *policy_cache;
static kmem_cache_t *sn_cache;
@@ -129,19 +132,29 @@ static int mpol_check_policy(int mode, nodemask_t *nodes)
}
return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
}
+
/* Generate a custom zonelist for the BIND policy. */
static struct zonelist *bind_zonelist(nodemask_t *nodes)
{
struct zonelist *zl;
- int num, max, nd;
+ int num, max, nd, k;
max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
- zl = kmalloc(sizeof(void *) * max, GFP_KERNEL);
+ zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
if (!zl)
return NULL;
num = 0;
- for_each_node_mask(nd, *nodes)
- zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone];
+ /* First put in the highest zones from all nodes, then all the next
+ lower zones etc. Avoid empty zones because the memory allocator
+ doesn't like them. If you implement node hot removal you
+ have to fix that. */
+ for (k = policy_zone; k >= 0; k--) {
+ for_each_node_mask(nd, *nodes) {
+ struct zone *z = &NODE_DATA(nd)->node_zones[k];
+ if (z->present_pages > 0)
+ zl->zones[num++] = z;
+ }
+ }
zl->zones[num] = NULL;
return zl;
}
@@ -543,24 +556,91 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
}
}
-static int swap_pages(struct list_head *pagelist)
+/*
+ * Migrate the list 'pagelist' of pages to a certain destination.
+ *
+ * Specify destination with either non-NULL vma or dest_node >= 0
+ * Return the number of pages not migrated or error code
+ */
+static int migrate_pages_to(struct list_head *pagelist,
+ struct vm_area_struct *vma, int dest)
{
+ LIST_HEAD(newlist);
LIST_HEAD(moved);
LIST_HEAD(failed);
- int n;
+ int err = 0;
+ int nr_pages;
+ struct page *page;
+ struct list_head *p;
- n = migrate_pages(pagelist, NULL, &moved, &failed);
- putback_lru_pages(&failed);
- putback_lru_pages(&moved);
+redo:
+ nr_pages = 0;
+ list_for_each(p, pagelist) {
+ if (vma)
+ page = alloc_page_vma(GFP_HIGHUSER, vma, vma->vm_start);
+ else
+ page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
- return n;
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
+ list_add(&page->lru, &newlist);
+ nr_pages++;
+ if (nr_pages > MIGRATE_CHUNK_SIZE)
+ break;
+ }
+ err = migrate_pages(pagelist, &newlist, &moved, &failed);
+
+ putback_lru_pages(&moved); /* Call release pages instead ?? */
+
+ if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
+ goto redo;
+out:
+ /* Return leftover allocated pages */
+ while (!list_empty(&newlist)) {
+ page = list_entry(newlist.next, struct page, lru);
+ list_del(&page->lru);
+ __free_page(page);
+ }
+ list_splice(&failed, pagelist);
+ if (err < 0)
+ return err;
+
+ /* Calculate number of leftover pages */
+ nr_pages = 0;
+ list_for_each(p, pagelist)
+ nr_pages++;
+ return nr_pages;
}
/*
- * For now migrate_pages simply swaps out the pages from nodes that are in
- * the source set but not in the target set. In the future, we would
- * want a function that moves pages between the two nodesets in such
- * a way as to preserve the physical layout as much as possible.
+ * Migrate pages from one node to a target node.
+ * Returns error or the number of pages not migrated.
+ */
+int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
+{
+ nodemask_t nmask;
+ LIST_HEAD(pagelist);
+ int err = 0;
+
+ nodes_clear(nmask);
+ node_set(source, nmask);
+
+ check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
+ flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+
+ if (!list_empty(&pagelist)) {
+ err = migrate_pages_to(&pagelist, NULL, dest);
+ if (!list_empty(&pagelist))
+ putback_lru_pages(&pagelist);
+ }
+ return err;
+}
+
+/*
+ * Move pages between the two nodesets so as to preserve the physical
+ * layout as much as possible.
*
* Returns the number of page that could not be moved.
*/
@@ -568,22 +648,76 @@ int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
{
LIST_HEAD(pagelist);
- int count = 0;
- nodemask_t nodes;
+ int busy = 0;
+ int err = 0;
+ nodemask_t tmp;
- nodes_andnot(nodes, *from_nodes, *to_nodes);
+ down_read(&mm->mmap_sem);
- down_read(&mm->mmap_sem);
- check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes,
- flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+/*
+ * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
+ * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
+ * bit in 'tmp', and return that <source, dest> pair for migration.
+ * The pair of nodemasks 'to' and 'from' define the map.
+ *
+ * If no pair of bits is found that way, fallback to picking some
+ * pair of 'source' and 'dest' bits that are not the same. If the
+ * 'source' and 'dest' bits are the same, this represents a node
+ * that will be migrating to itself, so no pages need move.
+ *
+ * If no bits are left in 'tmp', or if all remaining bits left
+ * in 'tmp' correspond to the same bit in 'to', return false
+ * (nothing left to migrate).
+ *
+ * This lets us pick a pair of nodes to migrate between, such that
+ * if possible the dest node is not already occupied by some other
+ * source node, minimizing the risk of overloading the memory on a
+ * node that would happen if we migrated incoming memory to a node
+ * before migrating outgoing memory source that same node.
+ *
+ * A single scan of tmp is sufficient. As we go, we remember the
+ * most recent <s, d> pair that moved (s != d). If we find a pair
+ * that not only moved, but what's better, moved to an empty slot
+ * (d is not set in tmp), then we break out then, with that pair.
+ * Otherwise when we finish scannng from_tmp, we at least have the
+ * most recent <s, d> pair that moved. If we get all the way through
+ * the scan of tmp without finding any node that moved, much less
+ * moved to an empty node, then there is nothing left worth migrating.
+ */
- if (!list_empty(&pagelist)) {
- count = swap_pages(&pagelist);
- putback_lru_pages(&pagelist);
+ tmp = *from_nodes;
+ while (!nodes_empty(tmp)) {
+ int s,d;
+ int source = -1;
+ int dest = 0;
+
+ for_each_node_mask(s, tmp) {
+ d = node_remap(s, *from_nodes, *to_nodes);
+ if (s == d)
+ continue;
+
+ source = s; /* Node moved. Memorize */
+ dest = d;
+
+ /* dest not in remaining from nodes? */
+ if (!node_isset(dest, tmp))
+ break;
+ }
+ if (source == -1)
+ break;
+
+ node_clear(source, tmp);
+ err = migrate_to_node(mm, source, dest, flags);
+ if (err > 0)
+ busy += err;
+ if (err < 0)
+ break;
}
up_read(&mm->mmap_sem);
- return count;
+ if (err < 0)
+ return err;
+ return busy;
}
long do_mbind(unsigned long start, unsigned long len,
@@ -643,8 +777,9 @@ long do_mbind(unsigned long start, unsigned long len,
int nr_failed = 0;
err = mbind_range(vma, start, end, new);
+
if (!list_empty(&pagelist))
- nr_failed = swap_pages(&pagelist);
+ nr_failed = migrate_pages_to(&pagelist, vma, -1);
if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
@@ -673,6 +808,8 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
nodes_clear(*nodes);
if (maxnode == 0 || !nmask)
return 0;
+ if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
+ return -EINVAL;
nlongs = BITS_TO_LONGS(maxnode);
if ((maxnode % BITS_PER_LONG) == 0)
@@ -1034,6 +1171,7 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
return interleave_nodes(pol);
}
+#ifdef CONFIG_HUGETLBFS
/* Return a zonelist suitable for a huge page allocation. */
struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
{
@@ -1047,6 +1185,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
}
return zonelist_policy(GFP_HIGHUSER, pol);
}
+#endif
/* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */
diff --git a/mm/nommu.c b/mm/nommu.c
index c10262d..99d2102 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -57,6 +57,8 @@ EXPORT_SYMBOL(vmalloc);
EXPORT_SYMBOL(vfree);
EXPORT_SYMBOL(vmalloc_to_page);
EXPORT_SYMBOL(vmalloc_32);
+EXPORT_SYMBOL(vmap);
+EXPORT_SYMBOL(vunmap);
/*
* Handle all mappings that got truncated by a "truncate()"
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 14bd4ec..8123fad 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -58,15 +58,17 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
/*
* Processes which fork a lot of child processes are likely
- * a good choice. We add the vmsize of the children if they
+ * a good choice. We add half the vmsize of the children if they
* have an own mm. This prevents forking servers to flood the
- * machine with an endless amount of children
+ * machine with an endless amount of children. In case a single
+ * child is eating the vast majority of memory, adding only half
+ * to the parents will make the child our kill candidate of choice.
*/
list_for_each(tsk, &p->children) {
struct task_struct *chld;
chld = list_entry(tsk, struct task_struct, sibling);
if (chld->mm != p->mm && chld->mm)
- points += chld->mm->total_vm;
+ points += chld->mm->total_vm/2 + 1;
}
/*
@@ -131,17 +133,47 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
}
/*
+ * Types of limitations to the nodes from which allocations may occur
+ */
+#define CONSTRAINT_NONE 1
+#define CONSTRAINT_MEMORY_POLICY 2
+#define CONSTRAINT_CPUSET 3
+
+/*
+ * Determine the type of allocation constraint.
+ */
+static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
+{
+#ifdef CONFIG_NUMA
+ struct zone **z;
+ nodemask_t nodes = node_online_map;
+
+ for (z = zonelist->zones; *z; z++)
+ if (cpuset_zone_allowed(*z, gfp_mask))
+ node_clear((*z)->zone_pgdat->node_id,
+ nodes);
+ else
+ return CONSTRAINT_CPUSET;
+
+ if (!nodes_empty(nodes))
+ return CONSTRAINT_MEMORY_POLICY;
+#endif
+
+ return CONSTRAINT_NONE;
+}
+
+/*
* Simple selection loop. We chose the process with the highest
* number of 'points'. We expect the caller will lock the tasklist.
*
* (not docbooked, we don't want this one cluttering up the manual)
*/
-static struct task_struct * select_bad_process(void)
+static struct task_struct *select_bad_process(unsigned long *ppoints)
{
- unsigned long maxpoints = 0;
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
struct timespec uptime;
+ *ppoints = 0;
do_posix_clock_monotonic_gettime(&uptime);
do_each_thread(g, p) {
@@ -169,9 +201,9 @@ static struct task_struct * select_bad_process(void)
return p;
points = badness(p, uptime.tv_sec);
- if (points > maxpoints || !chosen) {
+ if (points > *ppoints || !chosen) {
chosen = p;
- maxpoints = points;
+ *ppoints = points;
}
} while_each_thread(g, p);
return chosen;
@@ -182,7 +214,7 @@ static struct task_struct * select_bad_process(void)
* CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
* we select a process with CAP_SYS_RAW_IO set).
*/
-static void __oom_kill_task(task_t *p)
+static void __oom_kill_task(task_t *p, const char *message)
{
if (p->pid == 1) {
WARN_ON(1);
@@ -198,8 +230,8 @@ static void __oom_kill_task(task_t *p)
return;
}
task_unlock(p);
- printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n",
- p->pid, p->comm);
+ printk(KERN_ERR "%s: Killed process %d (%s).\n",
+ message, p->pid, p->comm);
/*
* We give our sacrificial lamb high priority and access to
@@ -212,7 +244,7 @@ static void __oom_kill_task(task_t *p)
force_sig(SIGKILL, p);
}
-static struct mm_struct *oom_kill_task(task_t *p)
+static struct mm_struct *oom_kill_task(task_t *p, const char *message)
{
struct mm_struct *mm = get_task_mm(p);
task_t * g, * q;
@@ -224,35 +256,38 @@ static struct mm_struct *oom_kill_task(task_t *p)
return NULL;
}
- __oom_kill_task(p);
+ __oom_kill_task(p, message);
/*
* kill all processes that share the ->mm (i.e. all threads),
* but are in a different thread group
*/
do_each_thread(g, q)
if (q->mm == mm && q->tgid != p->tgid)
- __oom_kill_task(q);
+ __oom_kill_task(q, message);
while_each_thread(g, q);
return mm;
}
-static struct mm_struct *oom_kill_process(struct task_struct *p)
+static struct mm_struct *oom_kill_process(struct task_struct *p,
+ unsigned long points, const char *message)
{
struct mm_struct *mm;
struct task_struct *c;
struct list_head *tsk;
+ printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li and "
+ "children.\n", p->pid, p->comm, points);
/* Try to kill a child first */
list_for_each(tsk, &p->children) {
c = list_entry(tsk, struct task_struct, sibling);
if (c->mm == p->mm)
continue;
- mm = oom_kill_task(c);
+ mm = oom_kill_task(c, message);
if (mm)
return mm;
}
- return oom_kill_task(p);
+ return oom_kill_task(p, message);
}
/**
@@ -263,38 +298,63 @@ static struct mm_struct *oom_kill_process(struct task_struct *p)
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(gfp_t gfp_mask, int order)
+void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
{
struct mm_struct *mm = NULL;
- task_t * p;
+ task_t *p;
+ unsigned long points;
if (printk_ratelimit()) {
printk("oom-killer: gfp_mask=0x%x, order=%d\n",
gfp_mask, order);
+ dump_stack();
show_mem();
}
cpuset_lock();
read_lock(&tasklist_lock);
+
+ /*
+ * Check if there were limitations on the allocation (only relevant for
+ * NUMA) that may require different handling.
+ */
+ switch (constrained_alloc(zonelist, gfp_mask)) {
+ case CONSTRAINT_MEMORY_POLICY:
+ mm = oom_kill_process(current, points,
+ "No available memory (MPOL_BIND)");
+ break;
+
+ case CONSTRAINT_CPUSET:
+ mm = oom_kill_process(current, points,
+ "No available memory in cpuset");
+ break;
+
+ case CONSTRAINT_NONE:
retry:
- p = select_bad_process();
+ /*
+ * Rambo mode: Shoot down a process and hope it solves whatever
+ * issues we may have.
+ */
+ p = select_bad_process(&points);
- if (PTR_ERR(p) == -1UL)
- goto out;
+ if (PTR_ERR(p) == -1UL)
+ goto out;
- /* Found nothing?!?! Either we hang forever, or we panic. */
- if (!p) {
- read_unlock(&tasklist_lock);
- cpuset_unlock();
- panic("Out of memory and no killable processes...\n");
- }
+ /* Found nothing?!?! Either we hang forever, or we panic. */
+ if (!p) {
+ read_unlock(&tasklist_lock);
+ cpuset_unlock();
+ panic("Out of memory and no killable processes...\n");
+ }
- mm = oom_kill_process(p);
- if (!mm)
- goto retry;
+ mm = oom_kill_process(p, points, "Out of memory");
+ if (!mm)
+ goto retry;
+
+ break;
+ }
- out:
- read_unlock(&tasklist_lock);
+out:
cpuset_unlock();
if (mm)
mmput(mm);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index df54e2f..791690d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,6 +56,7 @@ long nr_swap_pages;
int percpu_pagelist_fraction;
static void fastcall free_hot_cold_page(struct page *page, int cold);
+static void __free_pages_ok(struct page *page, unsigned int order);
/*
* results with 256, 32 in the lowmem_reserve sysctl:
@@ -169,20 +170,23 @@ static void bad_page(struct page *page)
* All pages have PG_compound set. All pages have their ->private pointing at
* the head page (even the head page has this).
*
- * The first tail page's ->mapping, if non-zero, holds the address of the
- * compound page's put_page() function.
- *
- * The order of the allocation is stored in the first tail page's ->index
- * This is only for debug at present. This usage means that zero-order pages
- * may not be compound.
+ * The first tail page's ->lru.next holds the address of the compound page's
+ * put_page() function. Its ->lru.prev holds the order of allocation.
+ * This usage means that zero-order pages may not be compound.
*/
+
+static void free_compound_page(struct page *page)
+{
+ __free_pages_ok(page, (unsigned long)page[1].lru.prev);
+}
+
static void prep_compound_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
- page[1].mapping = NULL;
- page[1].index = order;
+ page[1].lru.next = (void *)free_compound_page; /* set dtor */
+ page[1].lru.prev = (void *)order;
for (i = 0; i < nr_pages; i++) {
struct page *p = page + i;
@@ -196,7 +200,7 @@ static void destroy_compound_page(struct page *page, unsigned long order)
int i;
int nr_pages = 1 << order;
- if (unlikely(page[1].index != order))
+ if (unlikely((unsigned long)page[1].lru.prev != order))
bad_page(page);
for (i = 0; i < nr_pages; i++) {
@@ -1011,7 +1015,7 @@ rebalance:
if (page)
goto got_pg;
- out_of_memory(gfp_mask, order);
+ out_of_memory(zonelist, gfp_mask, order);
goto restart;
}
@@ -1213,18 +1217,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{
int cpu = 0;
- memset(ret, 0, sizeof(*ret));
+ memset(ret, 0, nr * sizeof(unsigned long));
cpus_and(*cpumask, *cpumask, cpu_online_map);
cpu = first_cpu(*cpumask);
while (cpu < NR_CPUS) {
unsigned long *in, *out, off;
+ if (!cpu_isset(cpu, *cpumask))
+ continue;
+
in = (unsigned long *)&per_cpu(page_states, cpu);
cpu = next_cpu(cpu, *cpumask);
- if (cpu < NR_CPUS)
+ if (likely(cpu < NR_CPUS))
prefetch(&per_cpu(page_states, cpu));
out = (unsigned long *)ret;
@@ -1534,29 +1541,29 @@ static int __initdata node_load[MAX_NUMNODES];
*/
static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
{
- int i, n, val;
+ int n, val;
int min_val = INT_MAX;
int best_node = -1;
- for_each_online_node(i) {
- cpumask_t tmp;
+ /* Use the local node if we haven't already */
+ if (!node_isset(node, *used_node_mask)) {
+ node_set(node, *used_node_mask);
+ return node;
+ }
- /* Start from local node */
- n = (node+i) % num_online_nodes();
+ for_each_online_node(n) {
+ cpumask_t tmp;
/* Don't want a node to appear more than once */
if (node_isset(n, *used_node_mask))
continue;
- /* Use the local node if we haven't already */
- if (!node_isset(node, *used_node_mask)) {
- best_node = node;
- break;
- }
-
/* Use the distance array to find the distance */
val = node_distance(node, n);
+ /* Penalize nodes under us ("prefer the next node") */
+ val += (n < node);
+
/* Give preference to headless and unused nodes */
tmp = node_to_cpumask(n);
if (!cpus_empty(tmp))
@@ -1799,7 +1806,7 @@ void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
memmap_init_zone((size), (nid), (zone), (start_pfn))
#endif
-static int __meminit zone_batchsize(struct zone *zone)
+static int __cpuinit zone_batchsize(struct zone *zone)
{
int batch;
@@ -1886,14 +1893,13 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
* not check if the processor is online before following the pageset pointer.
* Other parts of the kernel may not check if the zone is available.
*/
-static struct per_cpu_pageset
- boot_pageset[NR_CPUS];
+static struct per_cpu_pageset boot_pageset[NR_CPUS];
/*
* Dynamically allocate memory for the
* per cpu pageset array in struct zone.
*/
-static int __meminit process_zones(int cpu)
+static int __cpuinit process_zones(int cpu)
{
struct zone *zone, *dzone;
@@ -1934,7 +1940,7 @@ static inline void free_zone_pagesets(int cpu)
}
}
-static int __meminit pageset_cpuup_callback(struct notifier_block *nfb,
+static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/mm/rmap.c b/mm/rmap.c
index d85a99d..df2c41c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -52,6 +52,7 @@
#include <linux/init.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
+#include <linux/module.h>
#include <asm/tlbflush.h>
@@ -205,6 +206,36 @@ out:
return anon_vma;
}
+#ifdef CONFIG_MIGRATION
+/*
+ * Remove an anonymous page from swap replacing the swap pte's
+ * through real pte's pointing to valid pages and then releasing
+ * the page from the swap cache.
+ *
+ * Must hold page lock on page.
+ */
+void remove_from_swap(struct page *page)
+{
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+
+ if (!PageAnon(page) || !PageSwapCache(page))
+ return;
+
+ anon_vma = page_lock_anon_vma(page);
+ if (!anon_vma)
+ return;
+
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
+ remove_vma_swap(vma, page);
+
+ spin_unlock(&anon_vma->lock);
+
+ delete_from_swap_cache(page);
+}
+EXPORT_SYMBOL(remove_from_swap);
+#endif
+
/*
* At what user virtual address is page expected in vma?
*/
@@ -541,7 +572,8 @@ void page_remove_rmap(struct page *page)
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
+static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ int ignore_refs)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -564,7 +596,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
* skipped over this mm) then we should reactivate it.
*/
if ((vma->vm_flags & VM_LOCKED) ||
- ptep_clear_flush_young(vma, address, pte)) {
+ (ptep_clear_flush_young(vma, address, pte)
+ && !ignore_refs)) {
ret = SWAP_FAIL;
goto out_unmap;
}
@@ -698,7 +731,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
pte_unmap_unlock(pte - 1, ptl);
}
-static int try_to_unmap_anon(struct page *page)
+static int try_to_unmap_anon(struct page *page, int ignore_refs)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
@@ -709,7 +742,7 @@ static int try_to_unmap_anon(struct page *page)
return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- ret = try_to_unmap_one(page, vma);
+ ret = try_to_unmap_one(page, vma, ignore_refs);
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
@@ -726,7 +759,7 @@ static int try_to_unmap_anon(struct page *page)
*
* This function is only called from try_to_unmap for object-based pages.
*/
-static int try_to_unmap_file(struct page *page)
+static int try_to_unmap_file(struct page *page, int ignore_refs)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -740,7 +773,7 @@ static int try_to_unmap_file(struct page *page)
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- ret = try_to_unmap_one(page, vma);
+ ret = try_to_unmap_one(page, vma, ignore_refs);
if (ret == SWAP_FAIL || !page_mapped(page))
goto out;
}
@@ -825,16 +858,16 @@ out:
* SWAP_AGAIN - we missed a mapping, try again later
* SWAP_FAIL - the page is unswappable
*/
-int try_to_unmap(struct page *page)
+int try_to_unmap(struct page *page, int ignore_refs)
{
int ret;
BUG_ON(!PageLocked(page));
if (PageAnon(page))
- ret = try_to_unmap_anon(page);
+ ret = try_to_unmap_anon(page, ignore_refs);
else
- ret = try_to_unmap_file(page);
+ ret = try_to_unmap_file(page, ignore_refs);
if (!page_mapped(page))
ret = SWAP_SUCCESS;
diff --git a/mm/shmem.c b/mm/shmem.c
index ce501bc..7c455fb 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -45,6 +45,7 @@
#include <linux/swapops.h>
#include <linux/mempolicy.h>
#include <linux/namei.h>
+#include <linux/ctype.h>
#include <asm/uaccess.h>
#include <asm/div64.h>
#include <asm/pgtable.h>
@@ -874,6 +875,51 @@ redirty:
}
#ifdef CONFIG_NUMA
+static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
+{
+ char *nodelist = strchr(value, ':');
+ int err = 1;
+
+ if (nodelist) {
+ /* NUL-terminate policy string */
+ *nodelist++ = '\0';
+ if (nodelist_parse(nodelist, *policy_nodes))
+ goto out;
+ }
+ if (!strcmp(value, "default")) {
+ *policy = MPOL_DEFAULT;
+ /* Don't allow a nodelist */
+ if (!nodelist)
+ err = 0;
+ } else if (!strcmp(value, "prefer")) {
+ *policy = MPOL_PREFERRED;
+ /* Insist on a nodelist of one node only */
+ if (nodelist) {
+ char *rest = nodelist;
+ while (isdigit(*rest))
+ rest++;
+ if (!*rest)
+ err = 0;
+ }
+ } else if (!strcmp(value, "bind")) {
+ *policy = MPOL_BIND;
+ /* Insist on a nodelist */
+ if (nodelist)
+ err = 0;
+ } else if (!strcmp(value, "interleave")) {
+ *policy = MPOL_INTERLEAVE;
+ /* Default to nodes online if no nodelist */
+ if (!nodelist)
+ *policy_nodes = node_online_map;
+ err = 0;
+ }
+out:
+ /* Restore string for error message */
+ if (nodelist)
+ *--nodelist = ':';
+ return err;
+}
+
static struct page *shmem_swapin_async(struct shared_policy *p,
swp_entry_t entry, unsigned long idx)
{
@@ -926,6 +972,11 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
return page;
}
#else
+static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
+{
+ return 1;
+}
+
static inline struct page *
shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
{
@@ -1028,6 +1079,14 @@ repeat:
page_cache_release(swappage);
goto repeat;
}
+ if (!PageSwapCache(swappage)) {
+ /* Page migration has occured */
+ shmem_swp_unmap(entry);
+ spin_unlock(&info->lock);
+ unlock_page(swappage);
+ page_cache_release(swappage);
+ goto repeat;
+ }
if (PageWriteback(swappage)) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
@@ -1851,7 +1910,23 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid,
{
char *this_char, *value, *rest;
- while ((this_char = strsep(&options, ",")) != NULL) {
+ while (options != NULL) {
+ this_char = options;
+ for (;;) {
+ /*
+ * NUL-terminate this option: unfortunately,
+ * mount options form a comma-separated list,
+ * but mpol's nodelist may also contain commas.
+ */
+ options = strchr(options, ',');
+ if (options == NULL)
+ break;
+ options++;
+ if (!isdigit(*options)) {
+ options[-1] = '\0';
+ break;
+ }
+ }
if (!*this_char)
continue;
if ((value = strchr(this_char,'=')) != NULL) {
@@ -1902,18 +1977,8 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid,
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"mpol")) {
- if (!strcmp(value,"default"))
- *policy = MPOL_DEFAULT;
- else if (!strcmp(value,"preferred"))
- *policy = MPOL_PREFERRED;
- else if (!strcmp(value,"bind"))
- *policy = MPOL_BIND;
- else if (!strcmp(value,"interleave"))
- *policy = MPOL_INTERLEAVE;
- else
+ if (shmem_parse_mpol(value,policy,policy_nodes))
goto bad_val;
- } else if (!strcmp(this_char,"mpol_nodelist")) {
- nodelist_parse(value, *policy_nodes);
} else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n",
this_char);
diff --git a/mm/slab.c b/mm/slab.c
index 6f8495e..add05d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -55,7 +55,7 @@
*
* SMP synchronization:
* constructors and destructors are called without any locking.
- * Several members in kmem_cache_t and struct slab never change, they
+ * Several members in struct kmem_cache and struct slab never change, they
* are accessed without any locking.
* The per-cpu arrays are never accessed from the wrong cpu, no locking,
* and local interrupts are disabled so slab code is preempt-safe.
@@ -244,7 +244,7 @@ struct slab {
*/
struct slab_rcu {
struct rcu_head head;
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
void *addr;
};
@@ -294,6 +294,7 @@ struct kmem_list3 {
unsigned long next_reap;
int free_touched;
unsigned int free_limit;
+ unsigned int colour_next; /* Per-node cache coloring */
spinlock_t list_lock;
struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */
@@ -316,6 +317,8 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
*/
static __always_inline int index_of(const size_t size)
{
+ extern void __bad_size(void);
+
if (__builtin_constant_p(size)) {
int i = 0;
@@ -326,25 +329,23 @@ static __always_inline int index_of(const size_t size)
i++;
#include "linux/kmalloc_sizes.h"
#undef CACHE
- {
- extern void __bad_size(void);
- __bad_size();
- }
+ __bad_size();
} else
- BUG();
+ __bad_size();
return 0;
}
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
-static inline void kmem_list3_init(struct kmem_list3 *parent)
+static void kmem_list3_init(struct kmem_list3 *parent)
{
INIT_LIST_HEAD(&parent->slabs_full);
INIT_LIST_HEAD(&parent->slabs_partial);
INIT_LIST_HEAD(&parent->slabs_free);
parent->shared = NULL;
parent->alien = NULL;
+ parent->colour_next = 0;
spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
@@ -364,7 +365,7 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
} while (0)
/*
- * kmem_cache_t
+ * struct kmem_cache
*
* manages a cache.
*/
@@ -375,7 +376,7 @@ struct kmem_cache {
unsigned int batchcount;
unsigned int limit;
unsigned int shared;
- unsigned int objsize;
+ unsigned int buffer_size;
/* 2) touched by every alloc & free from the backend */
struct kmem_list3 *nodelists[MAX_NUMNODES];
unsigned int flags; /* constant flags */
@@ -391,16 +392,15 @@ struct kmem_cache {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
- unsigned int colour_next; /* cache colouring */
- kmem_cache_t *slabp_cache;
+ struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
/* constructor func */
- void (*ctor) (void *, kmem_cache_t *, unsigned long);
+ void (*ctor) (void *, struct kmem_cache *, unsigned long);
/* de-constructor func */
- void (*dtor) (void *, kmem_cache_t *, unsigned long);
+ void (*dtor) (void *, struct kmem_cache *, unsigned long);
/* 4) cache creation/removal */
const char *name;
@@ -423,8 +423,14 @@ struct kmem_cache {
atomic_t freemiss;
#endif
#if DEBUG
- int dbghead;
- int reallen;
+ /*
+ * If debugging is enabled, then the allocator can add additional
+ * fields and/or padding to every object. buffer_size contains the total
+ * object size including these internal fields, the following two
+ * variables contain the offset to the user object and its size.
+ */
+ int obj_offset;
+ int obj_size;
#endif
};
@@ -495,50 +501,50 @@ struct kmem_cache {
/* memory layout of objects:
* 0 : objp
- * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that
+ * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
* the end of an object is aligned with the end of the real
* allocation. Catches writes behind the end of the allocation.
- * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1:
+ * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
* redzone word.
- * cachep->dbghead: The real object.
- * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
+ * cachep->obj_offset: The real object.
+ * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
+ * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
*/
-static int obj_dbghead(kmem_cache_t *cachep)
+static int obj_offset(struct kmem_cache *cachep)
{
- return cachep->dbghead;
+ return cachep->obj_offset;
}
-static int obj_reallen(kmem_cache_t *cachep)
+static int obj_size(struct kmem_cache *cachep)
{
- return cachep->reallen;
+ return cachep->obj_size;
}
-static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
- return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD);
+ return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
}
-static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long *)(objp + cachep->objsize -
+ return (unsigned long *)(objp + cachep->buffer_size -
2 * BYTES_PER_WORD);
- return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
+ return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
-static void **dbg_userword(kmem_cache_t *cachep, void *objp)
+static void **dbg_userword(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
- return (void **)(objp + cachep->objsize - BYTES_PER_WORD);
+ return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
#else
-#define obj_dbghead(x) 0
-#define obj_reallen(cachep) (cachep->objsize)
+#define obj_offset(x) 0
+#define obj_size(cachep) (cachep->buffer_size)
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
@@ -591,6 +597,18 @@ static inline struct slab *page_get_slab(struct page *page)
return (struct slab *)page->lru.prev;
}
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+ struct page *page = virt_to_page(obj);
+ return page_get_cache(page);
+}
+
+static inline struct slab *virt_to_slab(const void *obj)
+{
+ struct page *page = virt_to_page(obj);
+ return page_get_slab(page);
+}
+
/* These are the default caches for kmalloc. Custom caches can have other sizes. */
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
@@ -619,16 +637,16 @@ static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
-static kmem_cache_t cache_cache = {
+static struct kmem_cache cache_cache = {
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
- .objsize = sizeof(kmem_cache_t),
+ .buffer_size = sizeof(struct kmem_cache),
.flags = SLAB_NO_REAP,
.spinlock = SPIN_LOCK_UNLOCKED,
.name = "kmem_cache",
#if DEBUG
- .reallen = sizeof(kmem_cache_t),
+ .obj_size = sizeof(struct kmem_cache),
#endif
};
@@ -657,17 +675,17 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work);
-static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node);
-static void enable_cpucache(kmem_cache_t *cachep);
+static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
+static void enable_cpucache(struct kmem_cache *cachep);
static void cache_reap(void *unused);
-static int __node_shrink(kmem_cache_t *cachep, int node);
+static int __node_shrink(struct kmem_cache *cachep, int node);
-static inline struct array_cache *ac_data(kmem_cache_t *cachep)
+static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
}
-static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
+static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
@@ -691,43 +709,80 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
return csizep->cs_cachep;
}
-kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
+struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
return __find_general_cachep(size, gfpflags);
}
EXPORT_SYMBOL(kmem_find_general_cachep);
-/* Cal the num objs, wastage, and bytes left over for a given slab size. */
-static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
- int flags, size_t *left_over, unsigned int *num)
+static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
- int i;
- size_t wastage = PAGE_SIZE << gfporder;
- size_t extra = 0;
- size_t base = 0;
+ return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+}
- if (!(flags & CFLGS_OFF_SLAB)) {
- base = sizeof(struct slab);
- extra = sizeof(kmem_bufctl_t);
- }
- i = 0;
- while (i * size + ALIGN(base + i * extra, align) <= wastage)
- i++;
- if (i > 0)
- i--;
+/* Calculate the number of objects and left-over bytes for a given
+ buffer size. */
+static void cache_estimate(unsigned long gfporder, size_t buffer_size,
+ size_t align, int flags, size_t *left_over,
+ unsigned int *num)
+{
+ int nr_objs;
+ size_t mgmt_size;
+ size_t slab_size = PAGE_SIZE << gfporder;
+
+ /*
+ * The slab management structure can be either off the slab or
+ * on it. For the latter case, the memory allocated for a
+ * slab is used for:
+ *
+ * - The struct slab
+ * - One kmem_bufctl_t for each object
+ * - Padding to respect alignment of @align
+ * - @buffer_size bytes for each object
+ *
+ * If the slab management structure is off the slab, then the
+ * alignment will already be calculated into the size. Because
+ * the slabs are all pages aligned, the objects will be at the
+ * correct alignment when allocated.
+ */
+ if (flags & CFLGS_OFF_SLAB) {
+ mgmt_size = 0;
+ nr_objs = slab_size / buffer_size;
+
+ if (nr_objs > SLAB_LIMIT)
+ nr_objs = SLAB_LIMIT;
+ } else {
+ /*
+ * Ignore padding for the initial guess. The padding
+ * is at most @align-1 bytes, and @buffer_size is at
+ * least @align. In the worst case, this result will
+ * be one greater than the number of objects that fit
+ * into the memory allocation when taking the padding
+ * into account.
+ */
+ nr_objs = (slab_size - sizeof(struct slab)) /
+ (buffer_size + sizeof(kmem_bufctl_t));
+
+ /*
+ * This calculated number will be either the right
+ * amount, or one greater than what we want.
+ */
+ if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
+ > slab_size)
+ nr_objs--;
- if (i > SLAB_LIMIT)
- i = SLAB_LIMIT;
+ if (nr_objs > SLAB_LIMIT)
+ nr_objs = SLAB_LIMIT;
- *num = i;
- wastage -= i * size;
- wastage -= ALIGN(base + i * extra, align);
- *left_over = wastage;
+ mgmt_size = slab_mgmt_size(nr_objs, align);
+ }
+ *num = nr_objs;
+ *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
}
#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
-static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
+static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
{
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg);
@@ -774,9 +829,9 @@ static struct array_cache *alloc_arraycache(int node, int entries,
}
#ifdef CONFIG_NUMA
-static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int);
+static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
-static inline struct array_cache **alloc_alien_cache(int node, int limit)
+static struct array_cache **alloc_alien_cache(int node, int limit)
{
struct array_cache **ac_ptr;
int memsize = sizeof(void *) * MAX_NUMNODES;
@@ -803,7 +858,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit)
return ac_ptr;
}
-static inline void free_alien_cache(struct array_cache **ac_ptr)
+static void free_alien_cache(struct array_cache **ac_ptr)
{
int i;
@@ -816,8 +871,8 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
kfree(ac_ptr);
}
-static inline void __drain_alien_cache(kmem_cache_t *cachep,
- struct array_cache *ac, int node)
+static void __drain_alien_cache(struct kmem_cache *cachep,
+ struct array_cache *ac, int node)
{
struct kmem_list3 *rl3 = cachep->nodelists[node];
@@ -829,14 +884,14 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep,
}
}
-static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
+static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
{
int i = 0;
struct array_cache *ac;
unsigned long flags;
for_each_online_node(i) {
- ac = l3->alien[i];
+ ac = alien[i];
if (ac) {
spin_lock_irqsave(&ac->lock, flags);
__drain_alien_cache(cachep, ac, i);
@@ -845,16 +900,25 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
}
}
#else
-#define alloc_alien_cache(node, limit) do { } while (0)
-#define free_alien_cache(ac_ptr) do { } while (0)
-#define drain_alien_cache(cachep, l3) do { } while (0)
+
+#define drain_alien_cache(cachep, alien) do { } while (0)
+
+static inline struct array_cache **alloc_alien_cache(int node, int limit)
+{
+ return (struct array_cache **) 0x01020304ul;
+}
+
+static inline void free_alien_cache(struct array_cache **ac_ptr)
+{
+}
+
#endif
static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
int memsize = sizeof(struct kmem_list3);
@@ -881,6 +945,11 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ /*
+ * The l3s don't come and go as CPUs come and
+ * go. cache_chain_mutex is sufficient
+ * protection here.
+ */
cachep->nodelists[node] = l3;
}
@@ -895,26 +964,46 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
& array cache's */
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
nc = alloc_arraycache(node, cachep->limit,
- cachep->batchcount);
+ cachep->batchcount);
if (!nc)
goto bad;
+ shared = alloc_arraycache(node,
+ cachep->shared * cachep->batchcount,
+ 0xbaadf00d);
+ if (!shared)
+ goto bad;
+
+ alien = alloc_alien_cache(node, cachep->limit);
+ if (!alien)
+ goto bad;
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG_ON(!l3);
- if (!l3->shared) {
- if (!(nc = alloc_arraycache(node,
- cachep->shared *
- cachep->batchcount,
- 0xbaadf00d)))
- goto bad;
- /* we are serialised from CPU_DEAD or
- CPU_UP_CANCELLED by the cpucontrol lock */
- l3->shared = nc;
+ spin_lock_irq(&l3->list_lock);
+ if (!l3->shared) {
+ /*
+ * We are serialised from CPU_DEAD or
+ * CPU_UP_CANCELLED by the cpucontrol lock
+ */
+ l3->shared = shared;
+ shared = NULL;
}
+#ifdef CONFIG_NUMA
+ if (!l3->alien) {
+ l3->alien = alien;
+ alien = NULL;
+ }
+#endif
+ spin_unlock_irq(&l3->list_lock);
+
+ kfree(shared);
+ free_alien_cache(alien);
}
mutex_unlock(&cache_chain_mutex);
break;
@@ -923,25 +1012,34 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
+ /*
+ * Even if all the cpus of a node are down, we don't free the
+ * kmem_list3 of any cache. This to avoid a race between
+ * cpu_down, and a kmalloc allocation from another cpu for
+ * memory from the node of the cpu going down. The list3
+ * structure is usually allocated from kmem_cache_create() and
+ * gets destroyed at kmem_cache_destroy().
+ */
/* fall thru */
case CPU_UP_CANCELED:
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
cpumask_t mask;
mask = node_to_cpumask(node);
- spin_lock_irq(&cachep->spinlock);
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
l3 = cachep->nodelists[node];
if (!l3)
- goto unlock_cache;
+ goto free_array_cache;
- spin_lock(&l3->list_lock);
+ spin_lock_irq(&l3->list_lock);
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
@@ -949,34 +1047,44 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(mask)) {
- spin_unlock(&l3->list_lock);
- goto unlock_cache;
+ spin_unlock_irq(&l3->list_lock);
+ goto free_array_cache;
}
- if (l3->shared) {
+ shared = l3->shared;
+ if (shared) {
free_block(cachep, l3->shared->entry,
l3->shared->avail, node);
- kfree(l3->shared);
l3->shared = NULL;
}
- if (l3->alien) {
- drain_alien_cache(cachep, l3);
- free_alien_cache(l3->alien);
- l3->alien = NULL;
- }
- /* free slabs belonging to this node */
- if (__node_shrink(cachep, node)) {
- cachep->nodelists[node] = NULL;
- spin_unlock(&l3->list_lock);
- kfree(l3);
- } else {
- spin_unlock(&l3->list_lock);
+ alien = l3->alien;
+ l3->alien = NULL;
+
+ spin_unlock_irq(&l3->list_lock);
+
+ kfree(shared);
+ if (alien) {
+ drain_alien_cache(cachep, alien);
+ free_alien_cache(alien);
}
- unlock_cache:
- spin_unlock_irq(&cachep->spinlock);
+free_array_cache:
kfree(nc);
}
+ /*
+ * In the previous loop, all the objects were freed to
+ * the respective cache's slabs, now we can go ahead and
+ * shrink each nodelist to its limit.
+ */
+ list_for_each_entry(cachep, &cache_chain, next) {
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+ spin_lock_irq(&l3->list_lock);
+ /* free slabs belonging to this node */
+ __node_shrink(cachep, node);
+ spin_unlock_irq(&l3->list_lock);
+ }
mutex_unlock(&cache_chain_mutex);
break;
#endif
@@ -992,7 +1100,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
* swap the static kmem_list3 with kmalloced memory
*/
-static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid)
+static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
{
struct kmem_list3 *ptr;
@@ -1032,14 +1140,14 @@ void __init kmem_cache_init(void)
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
- * 1) initialize the cache_cache cache: it contains the kmem_cache_t
+ * 1) initialize the cache_cache cache: it contains the struct kmem_cache
* structures of all caches, except cache_cache itself: cache_cache
* is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
* 2) Create the first kmalloc cache.
- * The kmem_cache_t for the new cache is allocated normally.
+ * The struct kmem_cache for the new cache is allocated normally.
* An __init data area is used for the head array.
* 3) Create the remaining kmalloc caches, with minimally sized
* head arrays.
@@ -1057,15 +1165,14 @@ void __init kmem_cache_init(void)
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
- cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
+ cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
- cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
+ cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
&left_over, &cache_cache.num);
if (!cache_cache.num)
BUG();
cache_cache.colour = left_over / cache_cache.colour_off;
- cache_cache.colour_next = 0;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size());
@@ -1132,8 +1239,8 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
local_irq_disable();
- BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
- memcpy(ptr, ac_data(&cache_cache),
+ BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
+ memcpy(ptr, cpu_cache_get(&cache_cache),
sizeof(struct arraycache_init));
cache_cache.array[smp_processor_id()] = ptr;
local_irq_enable();
@@ -1141,9 +1248,9 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
local_irq_disable();
- BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep)
+ BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
!= &initarray_generic.cache);
- memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep),
+ memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
sizeof(struct arraycache_init));
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
ptr;
@@ -1170,7 +1277,7 @@ void __init kmem_cache_init(void)
/* 6) resize the head arrays to their final sizes */
{
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep);
@@ -1181,7 +1288,7 @@ void __init kmem_cache_init(void)
g_cpucache_up = FULL;
/* Register a cpu startup notifier callback
- * that initializes ac_data for all new cpus
+ * that initializes cpu_cache_get for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
@@ -1213,7 +1320,7 @@ __initcall(cpucache_init);
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct page *page;
void *addr;
@@ -1239,7 +1346,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/*
* Interface to system's page release.
*/
-static void kmem_freepages(kmem_cache_t *cachep, void *addr)
+static void kmem_freepages(struct kmem_cache *cachep, void *addr)
{
unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr);
@@ -1261,7 +1368,7 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
static void kmem_rcu_free(struct rcu_head *head)
{
struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
- kmem_cache_t *cachep = slab_rcu->cachep;
+ struct kmem_cache *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr);
if (OFF_SLAB(cachep))
@@ -1271,12 +1378,12 @@ static void kmem_rcu_free(struct rcu_head *head)
#if DEBUG
#ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
+static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
unsigned long caller)
{
- int size = obj_reallen(cachep);
+ int size = obj_size(cachep);
- addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)];
+ addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
if (size < 5 * sizeof(unsigned long))
return;
@@ -1304,10 +1411,10 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
}
#endif
-static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
+static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
{
- int size = obj_reallen(cachep);
- addr = &((char *)addr)[obj_dbghead(cachep)];
+ int size = obj_size(cachep);
+ addr = &((char *)addr)[obj_offset(cachep)];
memset(addr, val, size);
*(unsigned char *)(addr + size - 1) = POISON_END;
@@ -1326,7 +1433,7 @@ static void dump_line(char *data, int offset, int limit)
#if DEBUG
-static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
+static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
{
int i, size;
char *realobj;
@@ -1344,8 +1451,8 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
(unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
- realobj = (char *)objp + obj_dbghead(cachep);
- size = obj_reallen(cachep);
+ realobj = (char *)objp + obj_offset(cachep);
+ size = obj_size(cachep);
for (i = 0; i < size && lines; i += 16, lines--) {
int limit;
limit = 16;
@@ -1355,14 +1462,14 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
}
}
-static void check_poison_obj(kmem_cache_t *cachep, void *objp)
+static void check_poison_obj(struct kmem_cache *cachep, void *objp)
{
char *realobj;
int size, i;
int lines = 0;
- realobj = (char *)objp + obj_dbghead(cachep);
- size = obj_reallen(cachep);
+ realobj = (char *)objp + obj_offset(cachep);
+ size = obj_size(cachep);
for (i = 0; i < size; i++) {
char exp = POISON_FREE;
@@ -1395,20 +1502,20 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
/* Print some data about the neighboring objects, if they
* exist:
*/
- struct slab *slabp = page_get_slab(virt_to_page(objp));
+ struct slab *slabp = virt_to_slab(objp);
int objnr;
- objnr = (objp - slabp->s_mem) / cachep->objsize;
+ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
if (objnr) {
- objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
- realobj = (char *)objp + obj_dbghead(cachep);
+ objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
+ realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
if (objnr + 1 < cachep->num) {
- objp = slabp->s_mem + (objnr + 1) * cachep->objsize;
- realobj = (char *)objp + obj_dbghead(cachep);
+ objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
+ realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
@@ -1417,25 +1524,23 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
}
#endif
-/* Destroy all the objs in a slab, and release the mem back to the system.
- * Before calling the slab must have been unlinked from the cache.
- * The cache-lock is not held/needed.
+#if DEBUG
+/**
+ * slab_destroy_objs - call the registered destructor for each object in
+ * a slab that is to be destroyed.
*/
-static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{
- void *addr = slabp->s_mem - slabp->colouroff;
-
-#if DEBUG
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = slabp->s_mem + cachep->objsize * i;
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize % PAGE_SIZE) == 0
+ if ((cachep->buffer_size % PAGE_SIZE) == 0
&& OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
- cachep->objsize / PAGE_SIZE,
+ cachep->buffer_size / PAGE_SIZE,
1);
else
check_poison_obj(cachep, objp);
@@ -1452,18 +1557,32 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
"was overwritten");
}
if (cachep->dtor && !(cachep->flags & SLAB_POISON))
- (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0);
+ (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
}
+}
#else
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
+{
if (cachep->dtor) {
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = slabp->s_mem + cachep->objsize * i;
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
(cachep->dtor) (objp, cachep, 0);
}
}
+}
#endif
+/**
+ * Destroy all the objs in a slab, and release the mem back to the system.
+ * Before calling the slab must have been unlinked from the cache.
+ * The cache-lock is not held/needed.
+ */
+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+{
+ void *addr = slabp->s_mem - slabp->colouroff;
+
+ slab_destroy_objs(cachep, slabp);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu;
@@ -1478,9 +1597,9 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
}
}
-/* For setting up all the kmem_list3s for cache whose objsize is same
+/* For setting up all the kmem_list3s for cache whose buffer_size is same
as size of kmem_list3. */
-static inline void set_up_list3s(kmem_cache_t *cachep, int index)
+static void set_up_list3s(struct kmem_cache *cachep, int index)
{
int node;
@@ -1493,15 +1612,20 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
}
/**
- * calculate_slab_order - calculate size (page order) of slabs and the number
- * of objects per slab.
+ * calculate_slab_order - calculate size (page order) of slabs
+ * @cachep: pointer to the cache that is being created
+ * @size: size of objects to be created in this cache.
+ * @align: required alignment for the objects.
+ * @flags: slab allocation flags
+ *
+ * Also calculates the number of objects per slab.
*
* This could be made much more intelligent. For now, try to avoid using
* high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed.
*/
-static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
- size_t align, gfp_t flags)
+static inline size_t calculate_slab_order(struct kmem_cache *cachep,
+ size_t size, size_t align, unsigned long flags)
{
size_t left_over = 0;
@@ -1572,13 +1696,13 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-kmem_cache_t *
+struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
- void (*dtor)(void*, kmem_cache_t *, unsigned long))
+ unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
+ void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
- kmem_cache_t *cachep = NULL;
+ struct kmem_cache *cachep = NULL;
struct list_head *p;
/*
@@ -1593,10 +1717,16 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG();
}
+ /*
+ * Prevent CPUs from coming and going.
+ * lock_cpu_hotplug() nests outside cache_chain_mutex
+ */
+ lock_cpu_hotplug();
+
mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) {
- kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
+ struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
mm_segment_t old_fs = get_fs();
char tmp;
int res;
@@ -1611,7 +1741,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
set_fs(old_fs);
if (res) {
printk("SLAB: cache with size %d has lost its name\n",
- pc->objsize);
+ pc->buffer_size);
continue;
}
@@ -1696,20 +1826,20 @@ kmem_cache_create (const char *name, size_t size, size_t align,
align = ralign;
/* Get cache's description obj. */
- cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
+ cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
if (!cachep)
goto oops;
- memset(cachep, 0, sizeof(kmem_cache_t));
+ memset(cachep, 0, sizeof(struct kmem_cache));
#if DEBUG
- cachep->reallen = size;
+ cachep->obj_size = size;
if (flags & SLAB_RED_ZONE) {
/* redzoning only works with word aligned caches */
align = BYTES_PER_WORD;
/* add space for red zone words */
- cachep->dbghead += BYTES_PER_WORD;
+ cachep->obj_offset += BYTES_PER_WORD;
size += 2 * BYTES_PER_WORD;
}
if (flags & SLAB_STORE_USER) {
@@ -1722,8 +1852,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
- && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
- cachep->dbghead += PAGE_SIZE - size;
+ && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
+ cachep->obj_offset += PAGE_SIZE - size;
size = PAGE_SIZE;
}
#endif
@@ -1786,7 +1916,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & SLAB_CACHE_DMA)
cachep->gfpflags |= GFP_DMA;
spin_lock_init(&cachep->spinlock);
- cachep->objsize = size;
+ cachep->buffer_size = size;
if (flags & CFLGS_OFF_SLAB)
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -1794,8 +1924,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->dtor = dtor;
cachep->name = name;
- /* Don't let CPUs to come and go */
- lock_cpu_hotplug();
if (g_cpucache_up == FULL) {
enable_cpucache(cachep);
@@ -1843,23 +1971,23 @@ kmem_cache_create (const char *name, size_t size, size_t align,
jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
- BUG_ON(!ac_data(cachep));
- ac_data(cachep)->avail = 0;
- ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
- ac_data(cachep)->batchcount = 1;
- ac_data(cachep)->touched = 0;
+ BUG_ON(!cpu_cache_get(cachep));
+ cpu_cache_get(cachep)->avail = 0;
+ cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
+ cpu_cache_get(cachep)->batchcount = 1;
+ cpu_cache_get(cachep)->touched = 0;
cachep->batchcount = 1;
cachep->limit = BOOT_CPUCACHE_ENTRIES;
}
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
- unlock_cpu_hotplug();
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
mutex_unlock(&cache_chain_mutex);
+ unlock_cpu_hotplug();
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
@@ -1875,7 +2003,7 @@ static void check_irq_on(void)
BUG_ON(irqs_disabled());
}
-static void check_spinlock_acquired(kmem_cache_t *cachep)
+static void check_spinlock_acquired(struct kmem_cache *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -1883,7 +2011,7 @@ static void check_spinlock_acquired(kmem_cache_t *cachep)
#endif
}
-static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
+static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -1916,45 +2044,43 @@ static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
preempt_enable();
}
-static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
int force, int node);
static void do_drain(void *arg)
{
- kmem_cache_t *cachep = (kmem_cache_t *) arg;
+ struct kmem_cache *cachep = (struct kmem_cache *) arg;
struct array_cache *ac;
int node = numa_node_id();
check_irq_off();
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
spin_lock(&cachep->nodelists[node]->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
spin_unlock(&cachep->nodelists[node]->list_lock);
ac->avail = 0;
}
-static void drain_cpu_caches(kmem_cache_t *cachep)
+static void drain_cpu_caches(struct kmem_cache *cachep)
{
struct kmem_list3 *l3;
int node;
smp_call_function_all_cpus(do_drain, cachep);
check_irq_on();
- spin_lock_irq(&cachep->spinlock);
for_each_online_node(node) {
l3 = cachep->nodelists[node];
if (l3) {
- spin_lock(&l3->list_lock);
+ spin_lock_irq(&l3->list_lock);
drain_array_locked(cachep, l3->shared, 1, node);
- spin_unlock(&l3->list_lock);
+ spin_unlock_irq(&l3->list_lock);
if (l3->alien)
- drain_alien_cache(cachep, l3);
+ drain_alien_cache(cachep, l3->alien);
}
}
- spin_unlock_irq(&cachep->spinlock);
}
-static int __node_shrink(kmem_cache_t *cachep, int node)
+static int __node_shrink(struct kmem_cache *cachep, int node)
{
struct slab *slabp;
struct kmem_list3 *l3 = cachep->nodelists[node];
@@ -1983,7 +2109,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
return ret;
}
-static int __cache_shrink(kmem_cache_t *cachep)
+static int __cache_shrink(struct kmem_cache *cachep)
{
int ret = 0, i = 0;
struct kmem_list3 *l3;
@@ -2009,7 +2135,7 @@ static int __cache_shrink(kmem_cache_t *cachep)
* Releases as many slabs as possible for a cache.
* To help debugging, a zero exit status indicates all slabs were released.
*/
-int kmem_cache_shrink(kmem_cache_t *cachep)
+int kmem_cache_shrink(struct kmem_cache *cachep)
{
if (!cachep || in_interrupt())
BUG();
@@ -2022,7 +2148,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
- * Remove a kmem_cache_t object from the slab cache.
+ * Remove a struct kmem_cache object from the slab cache.
* Returns 0 on success.
*
* It is expected this function will be called by a module when it is
@@ -2035,7 +2161,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy().
*/
-int kmem_cache_destroy(kmem_cache_t *cachep)
+int kmem_cache_destroy(struct kmem_cache *cachep)
{
int i;
struct kmem_list3 *l3;
@@ -2086,7 +2212,7 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
-static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
+static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
int colour_off, gfp_t local_flags)
{
struct slab *slabp;
@@ -2112,13 +2238,13 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
return (kmem_bufctl_t *) (slabp + 1);
}
-static void cache_init_objs(kmem_cache_t *cachep,
+static void cache_init_objs(struct kmem_cache *cachep,
struct slab *slabp, unsigned long ctor_flags)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = slabp->s_mem + cachep->objsize * i;
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
@@ -2136,7 +2262,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
* Otherwise, deadlock. They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
- cachep->ctor(objp + obj_dbghead(cachep), cachep,
+ cachep->ctor(objp + obj_offset(cachep), cachep,
ctor_flags);
if (cachep->flags & SLAB_RED_ZONE) {
@@ -2147,10 +2273,10 @@ static void cache_init_objs(kmem_cache_t *cachep,
slab_error(cachep, "constructor overwrote the"
" start of an object");
}
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
&& cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp),
- cachep->objsize / PAGE_SIZE, 0);
+ cachep->buffer_size / PAGE_SIZE, 0);
#else
if (cachep->ctor)
cachep->ctor(objp, cachep, ctor_flags);
@@ -2161,7 +2287,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
slabp->free = 0;
}
-static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
+static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))
@@ -2172,7 +2298,43 @@ static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
}
}
-static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
+static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
+{
+ void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
+ kmem_bufctl_t next;
+
+ slabp->inuse++;
+ next = slab_bufctl(slabp)[slabp->free];
+#if DEBUG
+ slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+ WARN_ON(slabp->nodeid != nodeid);
+#endif
+ slabp->free = next;
+
+ return objp;
+}
+
+static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
+ int nodeid)
+{
+ unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
+
+#if DEBUG
+ /* Verify that the slab belongs to the intended node */
+ WARN_ON(slabp->nodeid != nodeid);
+
+ if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
+ printk(KERN_ERR "slab: double free detected in cache "
+ "'%s', objp %p\n", cachep->name, objp);
+ BUG();
+ }
+#endif
+ slab_bufctl(slabp)[objnr] = slabp->free;
+ slabp->free = objnr;
+ slabp->inuse--;
+}
+
+static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
{
int i;
struct page *page;
@@ -2191,7 +2353,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct slab *slabp;
void *objp;
@@ -2217,20 +2379,20 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
*/
ctor_flags |= SLAB_CTOR_ATOMIC;
- /* About to mess with non-constant members - lock. */
+ /* Take the l3 list lock to change the colour_next on this node */
check_irq_off();
- spin_lock(&cachep->spinlock);
+ l3 = cachep->nodelists[nodeid];
+ spin_lock(&l3->list_lock);
/* Get colour for the slab, and cal the next value. */
- offset = cachep->colour_next;
- cachep->colour_next++;
- if (cachep->colour_next >= cachep->colour)
- cachep->colour_next = 0;
- offset *= cachep->colour_off;
+ offset = l3->colour_next;
+ l3->colour_next++;
+ if (l3->colour_next >= cachep->colour)
+ l3->colour_next = 0;
+ spin_unlock(&l3->list_lock);
- spin_unlock(&cachep->spinlock);
+ offset *= cachep->colour_off;
- check_irq_off();
if (local_flags & __GFP_WAIT)
local_irq_enable();
@@ -2260,7 +2422,6 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
if (local_flags & __GFP_WAIT)
local_irq_disable();
check_irq_off();
- l3 = cachep->nodelists[nodeid];
spin_lock(&l3->list_lock);
/* Make slab active. */
@@ -2302,14 +2463,14 @@ static void kfree_debugcheck(const void *objp)
}
}
-static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
+static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller)
{
struct page *page;
unsigned int objnr;
struct slab *slabp;
- objp -= obj_dbghead(cachep);
+ objp -= obj_offset(cachep);
kfree_debugcheck(objp);
page = virt_to_page(objp);
@@ -2341,31 +2502,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller;
- objnr = (objp - slabp->s_mem) / cachep->objsize;
+ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
+ BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);
if (cachep->flags & SLAB_DEBUG_INITIAL) {
/* Need to call the slab's constructor so the
* caller can perform a verify of its state (debugging).
* Called without the cache-lock held.
*/
- cachep->ctor(objp + obj_dbghead(cachep),
+ cachep->ctor(objp + obj_offset(cachep),
cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
}
if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object,
* call the destruction callback
*/
- cachep->dtor(objp + obj_dbghead(cachep), cachep, 0);
+ cachep->dtor(objp + obj_offset(cachep), cachep, 0);
}
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
kernel_map_pages(virt_to_page(objp),
- cachep->objsize / PAGE_SIZE, 0);
+ cachep->buffer_size / PAGE_SIZE, 0);
} else {
poison_obj(cachep, objp, POISON_FREE);
}
@@ -2376,7 +2537,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
return objp;
}
-static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
+static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
{
kmem_bufctl_t i;
int entries = 0;
@@ -2409,14 +2570,14 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
#define check_slabp(x,y) do { } while(0)
#endif
-static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
+static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
{
int batchcount;
struct kmem_list3 *l3;
struct array_cache *ac;
check_irq_off();
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
retry:
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
@@ -2461,22 +2622,12 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
check_slabp(cachep, slabp);
check_spinlock_acquired(cachep);
while (slabp->inuse < cachep->num && batchcount--) {
- kmem_bufctl_t next;
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- /* get obj pointer */
- ac->entry[ac->avail++] = slabp->s_mem +
- slabp->free * cachep->objsize;
-
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
- WARN_ON(numa_node_id() != slabp->nodeid);
-#endif
- slabp->free = next;
+ ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
+ numa_node_id());
}
check_slabp(cachep, slabp);
@@ -2498,7 +2649,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
x = cache_grow(cachep, flags, numa_node_id());
// cache_grow can reenable interrupts, then ac could change.
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
if (!x && ac->avail == 0) // no objects in sight? abort
return NULL;
@@ -2510,7 +2661,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
}
static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
+cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
@@ -2519,16 +2670,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
}
#if DEBUG
-static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
+static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
void *objp, void *caller)
{
if (!objp)
return objp;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
- cachep->objsize / PAGE_SIZE, 1);
+ cachep->buffer_size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
@@ -2553,7 +2704,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
- objp += obj_dbghead(cachep);
+ objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) {
unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
@@ -2568,7 +2719,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
-static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *objp;
struct array_cache *ac;
@@ -2583,7 +2734,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
#endif
check_irq_off();
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
if (likely(ac->avail)) {
STATS_INC_ALLOCHIT(cachep);
ac->touched = 1;
@@ -2595,7 +2746,8 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
return objp;
}
-static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static __always_inline void *
+__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
unsigned long save_flags;
void *objp;
@@ -2606,7 +2758,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
objp = ____cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- __builtin_return_address(0));
+ caller);
prefetchw(objp);
return objp;
}
@@ -2615,19 +2767,19 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
/*
* A interface to enable slab creation on nodeid
*/
-static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct list_head *entry;
struct slab *slabp;
struct kmem_list3 *l3;
void *obj;
- kmem_bufctl_t next;
int x;
l3 = cachep->nodelists[nodeid];
BUG_ON(!l3);
retry:
+ check_irq_off();
spin_lock(&l3->list_lock);
entry = l3->slabs_partial.next;
if (entry == &l3->slabs_partial) {
@@ -2647,14 +2799,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
BUG_ON(slabp->inuse == cachep->num);
- /* get obj pointer */
- obj = slabp->s_mem + slabp->free * cachep->objsize;
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
-#endif
- slabp->free = next;
+ obj = slab_get_obj(cachep, slabp, nodeid);
check_slabp(cachep, slabp);
l3->free_objects--;
/* move slabp to correct slabp list: */
@@ -2685,7 +2830,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
+static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
int node)
{
int i;
@@ -2694,29 +2839,14 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
for (i = 0; i < nr_objects; i++) {
void *objp = objpp[i];
struct slab *slabp;
- unsigned int objnr;
- slabp = page_get_slab(virt_to_page(objp));
+ slabp = virt_to_slab(objp);
l3 = cachep->nodelists[node];
list_del(&slabp->list);
- objnr = (objp - slabp->s_mem) / cachep->objsize;
check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp);
-
-#if DEBUG
- /* Verify that the slab belongs to the intended node */
- WARN_ON(slabp->nodeid != node);
-
- if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
- printk(KERN_ERR "slab: double free detected in cache "
- "'%s', objp %p\n", cachep->name, objp);
- BUG();
- }
-#endif
- slab_bufctl(slabp)[objnr] = slabp->free;
- slabp->free = objnr;
+ slab_put_obj(cachep, slabp, objp, node);
STATS_DEC_ACTIVE(cachep);
- slabp->inuse--;
l3->free_objects++;
check_slabp(cachep, slabp);
@@ -2738,7 +2868,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
}
}
-static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
+static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
int batchcount;
struct kmem_list3 *l3;
@@ -2797,9 +2927,9 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
*
* Called with disabled ints.
*/
-static inline void __cache_free(kmem_cache_t *cachep, void *objp)
+static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
- struct array_cache *ac = ac_data(cachep);
+ struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
@@ -2810,7 +2940,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
#ifdef CONFIG_NUMA
{
struct slab *slabp;
- slabp = page_get_slab(virt_to_page(objp));
+ slabp = virt_to_slab(objp);
if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL;
int nodeid = slabp->nodeid;
@@ -2856,9 +2986,9 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
-void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc);
@@ -2876,12 +3006,12 @@ EXPORT_SYMBOL(kmem_cache_alloc);
*
* Currently only used for dentry validation.
*/
-int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
+int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
{
unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET;
unsigned long align_mask = BYTES_PER_WORD - 1;
- unsigned long size = cachep->objsize;
+ unsigned long size = cachep->buffer_size;
struct page *page;
if (unlikely(addr < min_addr))
@@ -2917,32 +3047,23 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
* New and improved: it will now make sure that the object gets
* put on the correct node list so that there is no false sharing.
*/
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
unsigned long save_flags;
void *ptr;
- if (nodeid == -1)
- return __cache_alloc(cachep, flags);
-
- if (unlikely(!cachep->nodelists[nodeid])) {
- /* Fall back to __cache_alloc if we run into trouble */
- printk(KERN_WARNING
- "slab: not allocating in inactive node %d for cache %s\n",
- nodeid, cachep->name);
- return __cache_alloc(cachep, flags);
- }
-
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
- if (nodeid == numa_node_id())
+
+ if (nodeid == -1 || nodeid == numa_node_id() ||
+ !cachep->nodelists[nodeid])
ptr = ____cache_alloc(cachep, flags);
else
ptr = __cache_alloc_node(cachep, flags, nodeid);
local_irq_restore(save_flags);
- ptr =
- cache_alloc_debugcheck_after(cachep, flags, ptr,
- __builtin_return_address(0));
+
+ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
+ __builtin_return_address(0));
return ptr;
}
@@ -2950,7 +3071,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
void *kmalloc_node(size_t size, gfp_t flags, int node)
{
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
cachep = kmem_find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
@@ -2981,9 +3102,10 @@ EXPORT_SYMBOL(kmalloc_node);
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/
-void *__kmalloc(size_t size, gfp_t flags)
+static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
+ void *caller)
{
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
/* If you want to save a few bytes .text space: replace
* __ with kmem_.
@@ -2993,10 +3115,27 @@ void *__kmalloc(size_t size, gfp_t flags)
cachep = __find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
return NULL;
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, caller);
+}
+
+#ifndef CONFIG_DEBUG_SLAB
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ return __do_kmalloc(size, flags, NULL);
}
EXPORT_SYMBOL(__kmalloc);
+#else
+
+void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
+{
+ return __do_kmalloc(size, flags, caller);
+}
+EXPORT_SYMBOL(__kmalloc_track_caller);
+
+#endif
+
#ifdef CONFIG_SMP
/**
* __alloc_percpu - allocate one copy of the object for every present
@@ -3054,7 +3193,7 @@ EXPORT_SYMBOL(__alloc_percpu);
* Free an object which was previously allocated from this
* cache.
*/
-void kmem_cache_free(kmem_cache_t *cachep, void *objp)
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
@@ -3075,15 +3214,15 @@ EXPORT_SYMBOL(kmem_cache_free);
*/
void kfree(const void *objp)
{
- kmem_cache_t *c;
+ struct kmem_cache *c;
unsigned long flags;
if (unlikely(!objp))
return;
local_irq_save(flags);
kfree_debugcheck(objp);
- c = page_get_cache(virt_to_page(objp));
- mutex_debug_check_no_locks_freed(objp, obj_reallen(c));
+ c = virt_to_cache(objp);
+ mutex_debug_check_no_locks_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
@@ -3112,13 +3251,13 @@ void free_percpu(const void *objp)
EXPORT_SYMBOL(free_percpu);
#endif
-unsigned int kmem_cache_size(kmem_cache_t *cachep)
+unsigned int kmem_cache_size(struct kmem_cache *cachep)
{
- return obj_reallen(cachep);
+ return obj_size(cachep);
}
EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(kmem_cache_t *cachep)
+const char *kmem_cache_name(struct kmem_cache *cachep)
{
return cachep->name;
}
@@ -3127,7 +3266,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
* This initializes kmem_list3 for all nodes.
*/
-static int alloc_kmemlist(kmem_cache_t *cachep)
+static int alloc_kmemlist(struct kmem_cache *cachep)
{
int node;
struct kmem_list3 *l3;
@@ -3183,7 +3322,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
}
struct ccupdate_struct {
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
struct array_cache *new[NR_CPUS];
};
@@ -3193,13 +3332,13 @@ static void do_ccupdate_local(void *info)
struct array_cache *old;
check_irq_off();
- old = ac_data(new->cachep);
+ old = cpu_cache_get(new->cachep);
new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
new->new[smp_processor_id()] = old;
}
-static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
int shared)
{
struct ccupdate_struct new;
@@ -3220,11 +3359,11 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
check_irq_on();
- spin_lock_irq(&cachep->spinlock);
+ spin_lock(&cachep->spinlock);
cachep->batchcount = batchcount;
cachep->limit = limit;
cachep->shared = shared;
- spin_unlock_irq(&cachep->spinlock);
+ spin_unlock(&cachep->spinlock);
for_each_online_cpu(i) {
struct array_cache *ccold = new.new[i];
@@ -3245,7 +3384,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
return 0;
}
-static void enable_cpucache(kmem_cache_t *cachep)
+static void enable_cpucache(struct kmem_cache *cachep)
{
int err;
int limit, shared;
@@ -3258,13 +3397,13 @@ static void enable_cpucache(kmem_cache_t *cachep)
* The numbers are guessed, we should auto-tune as described by
* Bonwick.
*/
- if (cachep->objsize > 131072)
+ if (cachep->buffer_size > 131072)
limit = 1;
- else if (cachep->objsize > PAGE_SIZE)
+ else if (cachep->buffer_size > PAGE_SIZE)
limit = 8;
- else if (cachep->objsize > 1024)
+ else if (cachep->buffer_size > 1024)
limit = 24;
- else if (cachep->objsize > 256)
+ else if (cachep->buffer_size > 256)
limit = 54;
else
limit = 120;
@@ -3279,7 +3418,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
*/
shared = 0;
#ifdef CONFIG_SMP
- if (cachep->objsize <= PAGE_SIZE)
+ if (cachep->buffer_size <= PAGE_SIZE)
shared = 8;
#endif
@@ -3297,7 +3436,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
cachep->name, -err);
}
-static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
int force, int node)
{
int tofree;
@@ -3342,12 +3481,12 @@ static void cache_reap(void *unused)
}
list_for_each(walk, &cache_chain) {
- kmem_cache_t *searchp;
+ struct kmem_cache *searchp;
struct list_head *p;
int tofree;
struct slab *slabp;
- searchp = list_entry(walk, kmem_cache_t, next);
+ searchp = list_entry(walk, struct kmem_cache, next);
if (searchp->flags & SLAB_NO_REAP)
goto next;
@@ -3356,10 +3495,10 @@ static void cache_reap(void *unused)
l3 = searchp->nodelists[numa_node_id()];
if (l3->alien)
- drain_alien_cache(searchp, l3);
+ drain_alien_cache(searchp, l3->alien);
spin_lock_irq(&l3->list_lock);
- drain_array_locked(searchp, ac_data(searchp), 0,
+ drain_array_locked(searchp, cpu_cache_get(searchp), 0,
numa_node_id());
if (time_after(l3->next_reap, jiffies))
@@ -3450,15 +3589,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
if (p == &cache_chain)
return NULL;
}
- return list_entry(p, kmem_cache_t, next);
+ return list_entry(p, struct kmem_cache, next);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
- kmem_cache_t *cachep = p;
+ struct kmem_cache *cachep = p;
++*pos;
return cachep->next.next == &cache_chain ? NULL
- : list_entry(cachep->next.next, kmem_cache_t, next);
+ : list_entry(cachep->next.next, struct kmem_cache, next);
}
static void s_stop(struct seq_file *m, void *p)
@@ -3468,7 +3607,7 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
- kmem_cache_t *cachep = p;
+ struct kmem_cache *cachep = p;
struct list_head *q;
struct slab *slabp;
unsigned long active_objs;
@@ -3480,8 +3619,7 @@ static int s_show(struct seq_file *m, void *p)
int node;
struct kmem_list3 *l3;
- check_irq_on();
- spin_lock_irq(&cachep->spinlock);
+ spin_lock(&cachep->spinlock);
active_objs = 0;
num_slabs = 0;
for_each_online_node(node) {
@@ -3489,7 +3627,8 @@ static int s_show(struct seq_file *m, void *p)
if (!l3)
continue;
- spin_lock(&l3->list_lock);
+ check_irq_on();
+ spin_lock_irq(&l3->list_lock);
list_for_each(q, &l3->slabs_full) {
slabp = list_entry(q, struct slab, list);
@@ -3514,9 +3653,10 @@ static int s_show(struct seq_file *m, void *p)
num_slabs++;
}
free_objects += l3->free_objects;
- shared_avail += l3->shared->avail;
+ if (l3->shared)
+ shared_avail += l3->shared->avail;
- spin_unlock(&l3->list_lock);
+ spin_unlock_irq(&l3->list_lock);
}
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
@@ -3528,7 +3668,7 @@ static int s_show(struct seq_file *m, void *p)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
- name, active_objs, num_objs, cachep->objsize,
+ name, active_objs, num_objs, cachep->buffer_size,
cachep->num, (1 << cachep->gfporder));
seq_printf(m, " : tunables %4u %4u %4u",
cachep->limit, cachep->batchcount, cachep->shared);
@@ -3560,7 +3700,7 @@ static int s_show(struct seq_file *m, void *p)
}
#endif
seq_putc(m, '\n');
- spin_unlock_irq(&cachep->spinlock);
+ spin_unlock(&cachep->spinlock);
return 0;
}
@@ -3618,7 +3758,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
list_for_each(p, &cache_chain) {
- kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+ next);
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 ||
@@ -3656,5 +3797,5 @@ unsigned int ksize(const void *objp)
if (unlikely(objp == NULL))
return 0;
- return obj_reallen(page_get_cache(virt_to_page(objp)));
+ return obj_size(virt_to_cache(objp));
}
diff --git a/mm/slob.c b/mm/slob.c
index 1c240c4..a1f42bd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(slab_reclaim_pages);
#ifdef CONFIG_SMP
-void *__alloc_percpu(size_t size, size_t align)
+void *__alloc_percpu(size_t size)
{
int i;
struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
diff --git a/mm/swap.c b/mm/swap.c
index bc2442a7..cce3dda 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,19 +34,22 @@
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
-void put_page(struct page *page)
+static void put_compound_page(struct page *page)
{
- if (unlikely(PageCompound(page))) {
- page = (struct page *)page_private(page);
- if (put_page_testzero(page)) {
- void (*dtor)(struct page *page);
+ page = (struct page *)page_private(page);
+ if (put_page_testzero(page)) {
+ void (*dtor)(struct page *page);
- dtor = (void (*)(struct page *))page[1].mapping;
- (*dtor)(page);
- }
- return;
+ dtor = (void (*)(struct page *))page[1].lru.next;
+ (*dtor)(page);
}
- if (put_page_testzero(page))
+}
+
+void put_page(struct page *page)
+{
+ if (unlikely(PageCompound(page)))
+ put_compound_page(page);
+ else if (put_page_testzero(page))
__page_cache_release(page);
}
EXPORT_SYMBOL(put_page);
@@ -244,6 +247,15 @@ void release_pages(struct page **pages, int nr, int cold)
struct page *page = pages[i];
struct zone *pagezone;
+ if (unlikely(PageCompound(page))) {
+ if (zone) {
+ spin_unlock_irq(&zone->lru_lock);
+ zone = NULL;
+ }
+ put_compound_page(page);
+ continue;
+ }
+
if (!put_page_testzero(page))
continue;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 7b09ac5..db8a3d3 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -27,6 +27,7 @@ static struct address_space_operations swap_aops = {
.writepage = swap_writepage,
.sync_page = block_sync_page,
.set_page_dirty = __set_page_dirty_nobuffers,
+ .migratepage = migrate_page,
};
static struct backing_dev_info swap_backing_dev_info = {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f1e69c3..1f9cf0d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -554,6 +554,15 @@ static int unuse_mm(struct mm_struct *mm,
return 0;
}
+#ifdef CONFIG_MIGRATION
+int remove_vma_swap(struct vm_area_struct *vma, struct page *page)
+{
+ swp_entry_t entry = { .val = page_private(page) };
+
+ return unuse_vma(vma, entry, page);
+}
+#endif
+
/*
* Scan swap_map from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty.
@@ -646,6 +655,7 @@ static int try_to_unuse(unsigned int type)
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
+again:
page = read_swap_cache_async(entry, NULL, 0);
if (!page) {
/*
@@ -680,6 +690,12 @@ static int try_to_unuse(unsigned int type)
wait_on_page_locked(page);
wait_on_page_writeback(page);
lock_page(page);
+ if (!PageSwapCache(page)) {
+ /* Page migration has occured */
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
wait_on_page_writeback(page);
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2e34b61..1838c15 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -443,6 +443,10 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
BUG_ON(PageActive(page));
sc->nr_scanned++;
+
+ if (!sc->may_swap && page_mapped(page))
+ goto keep_locked;
+
/* Double the slab pressure for mapped and swapcache pages */
if (page_mapped(page) || PageSwapCache(page))
sc->nr_scanned++;
@@ -477,7 +481,13 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
* processes. Try to unmap it here.
*/
if (page_mapped(page) && mapping) {
- switch (try_to_unmap(page)) {
+ /*
+ * No unmapping if we do not swap
+ */
+ if (!sc->may_swap)
+ goto keep_locked;
+
+ switch (try_to_unmap(page, 0)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
@@ -492,7 +502,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
goto keep_locked;
if (!may_enter_fs)
goto keep_locked;
- if (laptop_mode && !sc->may_writepage)
+ if (!sc->may_writepage)
goto keep_locked;
/* Page is dirty, try to write it out here */
@@ -609,6 +619,15 @@ int putback_lru_pages(struct list_head *l)
}
/*
+ * Non migratable page
+ */
+int fail_migrate_page(struct page *newpage, struct page *page)
+{
+ return -EIO;
+}
+EXPORT_SYMBOL(fail_migrate_page);
+
+/*
* swapout a single page
* page is locked upon entry, unlocked on exit
*/
@@ -617,7 +636,7 @@ static int swap_page(struct page *page)
struct address_space *mapping = page_mapping(page);
if (page_mapped(page) && mapping)
- if (try_to_unmap(page) != SWAP_SUCCESS)
+ if (try_to_unmap(page, 1) != SWAP_SUCCESS)
goto unlock_retry;
if (PageDirty(page)) {
@@ -653,6 +672,167 @@ unlock_retry:
retry:
return -EAGAIN;
}
+EXPORT_SYMBOL(swap_page);
+
+/*
+ * Page migration was first developed in the context of the memory hotplug
+ * project. The main authors of the migration code are:
+ *
+ * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
+ * Hirokazu Takahashi <taka@valinux.co.jp>
+ * Dave Hansen <haveblue@us.ibm.com>
+ * Christoph Lameter <clameter@sgi.com>
+ */
+
+/*
+ * Remove references for a page and establish the new page with the correct
+ * basic settings to be able to stop accesses to the page.
+ */
+int migrate_page_remove_references(struct page *newpage,
+ struct page *page, int nr_refs)
+{
+ struct address_space *mapping = page_mapping(page);
+ struct page **radix_pointer;
+
+ /*
+ * Avoid doing any of the following work if the page count
+ * indicates that the page is in use or truncate has removed
+ * the page.
+ */
+ if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
+ return 1;
+
+ /*
+ * Establish swap ptes for anonymous pages or destroy pte
+ * maps for files.
+ *
+ * In order to reestablish file backed mappings the fault handlers
+ * will take the radix tree_lock which may then be used to stop
+ * processses from accessing this page until the new page is ready.
+ *
+ * A process accessing via a swap pte (an anonymous page) will take a
+ * page_lock on the old page which will block the process until the
+ * migration attempt is complete. At that time the PageSwapCache bit
+ * will be examined. If the page was migrated then the PageSwapCache
+ * bit will be clear and the operation to retrieve the page will be
+ * retried which will find the new page in the radix tree. Then a new
+ * direct mapping may be generated based on the radix tree contents.
+ *
+ * If the page was not migrated then the PageSwapCache bit
+ * is still set and the operation may continue.
+ */
+ try_to_unmap(page, 1);
+
+ /*
+ * Give up if we were unable to remove all mappings.
+ */
+ if (page_mapcount(page))
+ return 1;
+
+ write_lock_irq(&mapping->tree_lock);
+
+ radix_pointer = (struct page **)radix_tree_lookup_slot(
+ &mapping->page_tree,
+ page_index(page));
+
+ if (!page_mapping(page) || page_count(page) != nr_refs ||
+ *radix_pointer != page) {
+ write_unlock_irq(&mapping->tree_lock);
+ return 1;
+ }
+
+ /*
+ * Now we know that no one else is looking at the page.
+ *
+ * Certain minimal information about a page must be available
+ * in order for other subsystems to properly handle the page if they
+ * find it through the radix tree update before we are finished
+ * copying the page.
+ */
+ get_page(newpage);
+ newpage->index = page->index;
+ newpage->mapping = page->mapping;
+ if (PageSwapCache(page)) {
+ SetPageSwapCache(newpage);
+ set_page_private(newpage, page_private(page));
+ }
+
+ *radix_pointer = newpage;
+ __put_page(page);
+ write_unlock_irq(&mapping->tree_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(migrate_page_remove_references);
+
+/*
+ * Copy the page to its new location
+ */
+void migrate_page_copy(struct page *newpage, struct page *page)
+{
+ copy_highpage(newpage, page);
+
+ if (PageError(page))
+ SetPageError(newpage);
+ if (PageReferenced(page))
+ SetPageReferenced(newpage);
+ if (PageUptodate(page))
+ SetPageUptodate(newpage);
+ if (PageActive(page))
+ SetPageActive(newpage);
+ if (PageChecked(page))
+ SetPageChecked(newpage);
+ if (PageMappedToDisk(page))
+ SetPageMappedToDisk(newpage);
+
+ if (PageDirty(page)) {
+ clear_page_dirty_for_io(page);
+ set_page_dirty(newpage);
+ }
+
+ ClearPageSwapCache(page);
+ ClearPageActive(page);
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ page->mapping = NULL;
+
+ /*
+ * If any waiters have accumulated on the new page then
+ * wake them up.
+ */
+ if (PageWriteback(newpage))
+ end_page_writeback(newpage);
+}
+EXPORT_SYMBOL(migrate_page_copy);
+
+/*
+ * Common logic to directly migrate a single page suitable for
+ * pages that do not use PagePrivate.
+ *
+ * Pages are locked upon entry and exit.
+ */
+int migrate_page(struct page *newpage, struct page *page)
+{
+ BUG_ON(PageWriteback(page)); /* Writeback must be complete */
+
+ if (migrate_page_remove_references(newpage, page, 2))
+ return -EAGAIN;
+
+ migrate_page_copy(newpage, page);
+
+ /*
+ * Remove auxiliary swap entries and replace
+ * them with real ptes.
+ *
+ * Note that a real pte entry will allow processes that are not
+ * waiting on the page lock to use the new page via the page tables
+ * before the new page is unlocked.
+ */
+ remove_from_swap(newpage);
+ return 0;
+}
+EXPORT_SYMBOL(migrate_page);
+
/*
* migrate_pages
*
@@ -663,14 +843,9 @@ retry:
* pages are swapped out.
*
* The function returns after 10 attempts or if no pages
- * are movable anymore because t has become empty
+ * are movable anymore because to has become empty
* or no retryable pages exist anymore.
*
- * SIMPLIFIED VERSION: This implementation of migrate_pages
- * is only swapping out pages and never touches the second
- * list. The direct migration patchset
- * extends this function to avoid the use of swap.
- *
* Return: Number of pages not migrated when "to" ran empty.
*/
int migrate_pages(struct list_head *from, struct list_head *to,
@@ -691,6 +866,9 @@ redo:
retry = 0;
list_for_each_entry_safe(page, page2, from, lru) {
+ struct page *newpage = NULL;
+ struct address_space *mapping;
+
cond_resched();
rc = 0;
@@ -698,6 +876,9 @@ redo:
/* page was freed from under us. So we are done. */
goto next;
+ if (to && list_empty(to))
+ break;
+
/*
* Skip locked pages during the first two passes to give the
* functions holding the lock time to release the page. Later we
@@ -734,12 +915,84 @@ redo:
}
}
+ if (!to) {
+ rc = swap_page(page);
+ goto next;
+ }
+
+ newpage = lru_to_page(to);
+ lock_page(newpage);
+
/*
- * Page is properly locked and writeback is complete.
+ * Pages are properly locked and writeback is complete.
* Try to migrate the page.
*/
- rc = swap_page(page);
- goto next;
+ mapping = page_mapping(page);
+ if (!mapping)
+ goto unlock_both;
+
+ if (mapping->a_ops->migratepage) {
+ /*
+ * Most pages have a mapping and most filesystems
+ * should provide a migration function. Anonymous
+ * pages are part of swap space which also has its
+ * own migration function. This is the most common
+ * path for page migration.
+ */
+ rc = mapping->a_ops->migratepage(newpage, page);
+ goto unlock_both;
+ }
+
+ /*
+ * Default handling if a filesystem does not provide
+ * a migration function. We can only migrate clean
+ * pages so try to write out any dirty pages first.
+ */
+ if (PageDirty(page)) {
+ switch (pageout(page, mapping)) {
+ case PAGE_KEEP:
+ case PAGE_ACTIVATE:
+ goto unlock_both;
+
+ case PAGE_SUCCESS:
+ unlock_page(newpage);
+ goto next;
+
+ case PAGE_CLEAN:
+ ; /* try to migrate the page below */
+ }
+ }
+
+ /*
+ * Buffers are managed in a filesystem specific way.
+ * We must have no buffers or drop them.
+ */
+ if (!page_has_buffers(page) ||
+ try_to_release_page(page, GFP_KERNEL)) {
+ rc = migrate_page(newpage, page);
+ goto unlock_both;
+ }
+
+ /*
+ * On early passes with mapped pages simply
+ * retry. There may be a lock held for some
+ * buffers that may go away. Later
+ * swap them out.
+ */
+ if (pass > 4) {
+ /*
+ * Persistently unable to drop buffers..... As a
+ * measure of last resort we fall back to
+ * swap_page().
+ */
+ unlock_page(newpage);
+ newpage = NULL;
+ rc = swap_page(page);
+ goto next;
+ }
+
+unlock_both:
+ unlock_page(newpage);
unlock_page:
unlock_page(page);
@@ -752,7 +1005,10 @@ next:
list_move(&page->lru, failed);
nr_failed++;
} else {
- /* Success */
+ if (newpage) {
+ /* Successful migration. Return page to LRU */
+ move_to_lru(newpage);
+ }
list_move(&page->lru, moved);
}
}
@@ -939,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
struct page *page;
struct pagevec pvec;
int reclaim_mapped = 0;
- long mapped_ratio;
- long distress;
- long swap_tendency;
+
+ if (unlikely(sc->may_swap)) {
+ long mapped_ratio;
+ long distress;
+ long swap_tendency;
+
+ /*
+ * `distress' is a measure of how much trouble we're having
+ * reclaiming pages. 0 -> no problems. 100 -> great trouble.
+ */
+ distress = 100 >> zone->prev_priority;
+
+ /*
+ * The point of this algorithm is to decide when to start
+ * reclaiming mapped memory instead of just pagecache. Work out
+ * how much memory
+ * is mapped.
+ */
+ mapped_ratio = (sc->nr_mapped * 100) / total_memory;
+
+ /*
+ * Now decide how much we really want to unmap some pages. The
+ * mapped ratio is downgraded - just because there's a lot of
+ * mapped memory doesn't necessarily mean that page reclaim
+ * isn't succeeding.
+ *
+ * The distress ratio is important - we don't want to start
+ * going oom.
+ *
+ * A 100% value of vm_swappiness overrides this algorithm
+ * altogether.
+ */
+ swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
+
+ /*
+ * Now use this metric to decide whether to start moving mapped
+ * memory onto the inactive list.
+ */
+ if (swap_tendency >= 100)
+ reclaim_mapped = 1;
+ }
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
@@ -951,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock);
- /*
- * `distress' is a measure of how much trouble we're having reclaiming
- * pages. 0 -> no problems. 100 -> great trouble.
- */
- distress = 100 >> zone->prev_priority;
-
- /*
- * The point of this algorithm is to decide when to start reclaiming
- * mapped memory instead of just pagecache. Work out how much memory
- * is mapped.
- */
- mapped_ratio = (sc->nr_mapped * 100) / total_memory;
-
- /*
- * Now decide how much we really want to unmap some pages. The mapped
- * ratio is downgraded - just because there's a lot of mapped memory
- * doesn't necessarily mean that page reclaim isn't succeeding.
- *
- * The distress ratio is important - we don't want to start going oom.
- *
- * A 100% value of vm_swappiness overrides this algorithm altogether.
- */
- swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
-
- /*
- * Now use this metric to decide whether to start moving mapped memory
- * onto the inactive list.
- */
- if (swap_tendency >= 100)
- reclaim_mapped = 1;
-
while (!list_empty(&l_hold)) {
cond_resched();
page = lru_to_page(&l_hold);
@@ -1170,7 +1433,7 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
int i;
sc.gfp_mask = gfp_mask;
- sc.may_writepage = 0;
+ sc.may_writepage = !laptop_mode;
sc.may_swap = 1;
inc_page_state(allocstall);
@@ -1273,7 +1536,7 @@ loop_again:
total_scanned = 0;
total_reclaimed = 0;
sc.gfp_mask = GFP_KERNEL;
- sc.may_writepage = 0;
+ sc.may_writepage = !laptop_mode;
sc.may_swap = 1;
sc.nr_mapped = read_page_state(nr_mapped);
@@ -1358,9 +1621,7 @@ scan:
sc.nr_reclaimed = 0;
sc.priority = priority;
sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
- atomic_inc(&zone->reclaim_in_progress);
shrink_zone(zone, &sc);
- atomic_dec(&zone->reclaim_in_progress);
reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
lru_pages);
@@ -1586,40 +1847,61 @@ module_init(kswapd_init)
*/
int zone_reclaim_mode __read_mostly;
+#define RECLAIM_OFF 0
+#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
+#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
+#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
+#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
+
/*
* Mininum time between zone reclaim scans
*/
-#define ZONE_RECLAIM_INTERVAL HZ/2
+int zone_reclaim_interval __read_mostly = 30*HZ;
+
+/*
+ * Priority for ZONE_RECLAIM. This determines the fraction of pages
+ * of a node considered for each zone_reclaim. 4 scans 1/16th of
+ * a zone.
+ */
+#define ZONE_RECLAIM_PRIORITY 4
+
/*
* Try to free up some pages from this zone through reclaim.
*/
int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
- int nr_pages = 1 << order;
+ int nr_pages;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
- struct scan_control sc = {
- .gfp_mask = gfp_mask,
- .may_writepage = 0,
- .may_swap = 0,
- .nr_mapped = read_page_state(nr_mapped),
- .nr_scanned = 0,
- .nr_reclaimed = 0,
- .priority = 0
- };
+ struct scan_control sc;
+ cpumask_t mask;
+ int node_id;
+
+ if (time_before(jiffies,
+ zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
+ return 0;
if (!(gfp_mask & __GFP_WAIT) ||
- zone->zone_pgdat->node_id != numa_node_id() ||
zone->all_unreclaimable ||
atomic_read(&zone->reclaim_in_progress) > 0)
return 0;
- if (time_before(jiffies,
- zone->last_unsuccessful_zone_reclaim + ZONE_RECLAIM_INTERVAL))
- return 0;
+ node_id = zone->zone_pgdat->node_id;
+ mask = node_to_cpumask(node_id);
+ if (!cpus_empty(mask) && node_id != numa_node_id())
+ return 0;
+
+ sc.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE);
+ sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP);
+ sc.nr_scanned = 0;
+ sc.nr_reclaimed = 0;
+ sc.priority = ZONE_RECLAIM_PRIORITY + 1;
+ sc.nr_mapped = read_page_state(nr_mapped);
+ sc.gfp_mask = gfp_mask;
disable_swap_token();
+ nr_pages = 1 << order;
if (nr_pages > SWAP_CLUSTER_MAX)
sc.swap_cluster_max = nr_pages;
else
@@ -1629,14 +1911,37 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
p->flags |= PF_MEMALLOC;
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- shrink_zone(zone, &sc);
+
+ /*
+ * Free memory by calling shrink zone with increasing priorities
+ * until we have enough memory freed.
+ */
+ do {
+ sc.priority--;
+ shrink_zone(zone, &sc);
+
+ } while (sc.nr_reclaimed < nr_pages && sc.priority > 0);
+
+ if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
+ /*
+ * shrink_slab does not currently allow us to determine
+ * how many pages were freed in the zone. So we just
+ * shake the slab and then go offnode for a single allocation.
+ *
+ * shrink_slab will free memory on all zones and may take
+ * a long time.
+ */
+ shrink_slab(sc.nr_scanned, gfp_mask, order);
+ sc.nr_reclaimed = 1; /* Avoid getting the off node timeout */
+ }
+
p->reclaim_state = NULL;
current->flags &= ~PF_MEMALLOC;
if (sc.nr_reclaimed == 0)
zone->last_unsuccessful_zone_reclaim = jiffies;
- return sc.nr_reclaimed > nr_pages;
+ return sc.nr_reclaimed >= nr_pages;
}
#endif
OpenPOWER on IntegriCloud