summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c3
-rw-r--r--mm/swapfile.c12
2 files changed, 8 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f71d8be..76b092b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -854,7 +854,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
int memsize = sizeof(struct kmem_list3);
- struct array_cache *nc = NULL;
switch (action) {
case CPU_UP_PREPARE:
@@ -891,6 +890,8 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
/* Now we can go ahead with allocating the shared array's
& array cache's */
list_for_each_entry(cachep, &cache_chain, next) {
+ struct array_cache *nc;
+
nc = alloc_arraycache(node, cachep->limit,
cachep->batchcount);
if (!nc)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6da4b28..80f948a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1493,7 +1493,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
goto bad_swap;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
goto bad_swap;
-
+
/* OK, set up the swap map and apply the bad block list */
if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
error = -ENOMEM;
@@ -1502,17 +1502,17 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
error = 0;
memset(p->swap_map, 0, maxpages * sizeof(short));
- for (i=0; i<swap_header->info.nr_badpages; i++) {
- int page = swap_header->info.badpages[i];
- if (page <= 0 || page >= swap_header->info.last_page)
+ for (i = 0; i < swap_header->info.nr_badpages; i++) {
+ int page_nr = swap_header->info.badpages[i];
+ if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
error = -EINVAL;
else
- p->swap_map[page] = SWAP_MAP_BAD;
+ p->swap_map[page_nr] = SWAP_MAP_BAD;
}
nr_good_pages = swap_header->info.last_page -
swap_header->info.nr_badpages -
1 /* header page */;
- if (error)
+ if (error)
goto bad_swap;
}
OpenPOWER on IntegriCloud