summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c12
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/swap.c4
6 files changed, 14 insertions, 20 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7c4f9e0..f2e574d 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -172,30 +172,22 @@ postcore_initcall(bdi_class_init);
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
- char *name;
va_list args;
int ret = 0;
struct device *dev;
va_start(args, fmt);
- name = kvasprintf(GFP_KERNEL, fmt, args);
+ dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
va_end(args);
-
- if (!name)
- return -ENOMEM;
-
- dev = device_create(bdi_class, parent, MKDEV(0, 0), name);
if (IS_ERR(dev)) {
ret = PTR_ERR(dev);
goto exit;
}
bdi->dev = dev;
- dev_set_drvdata(bdi->dev, bdi);
- bdi_debug_register(bdi, name);
+ bdi_debug_register(bdi, dev_name(dev));
exit:
- kfree(name);
return ret;
}
EXPORT_SYMBOL(bdi_register);
diff --git a/mm/memory.c b/mm/memory.c
index fb5608a..19e0ae9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2295,8 +2295,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vmf.flags = flags;
vmf.page = NULL;
- BUG_ON(vma->vm_flags & VM_PFNMAP);
-
ret = vma->vm_ops->fault(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
return ret;
diff --git a/mm/mmap.c b/mm/mmap.c
index fac6633..669499e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(vm_get_page_prot);
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
+atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
/*
* Check that a process has enough memory to allocate a new virtual
@@ -177,7 +177,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
* cast `allowed' as a signed long because vm_committed_space
* sometimes has a negative value
*/
- if (atomic_read(&vm_committed_space) < (long)allowed)
+ if (atomic_long_read(&vm_committed_space) < (long)allowed)
return 0;
error:
vm_unacct_memory(pages);
diff --git a/mm/nommu.c b/mm/nommu.c
index ef8c62c..dca93fc 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -39,7 +39,7 @@ struct page *mem_map;
unsigned long max_mapnr;
unsigned long num_physpages;
unsigned long askedalloc, realalloc;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
+atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
@@ -1410,7 +1410,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
* cast `allowed' as a signed long because vm_committed_space
* sometimes has a negative value
*/
- if (atomic_read(&vm_committed_space) < (long)allowed)
+ if (atomic_long_read(&vm_committed_space) < (long)allowed)
return 0;
error:
vm_unacct_memory(pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6383557..8e83f02 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1396,6 +1396,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
(void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
&preferred_zone);
+ if (!preferred_zone)
+ return NULL;
+
classzone_idx = zone_idx(preferred_zone);
zonelist_scan:
@@ -2804,7 +2807,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
alloc_size = zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t);
- if (system_state == SYSTEM_BOOTING) {
+ if (!slab_is_available()) {
zone->wait_table = (wait_queue_head_t *)
alloc_bootmem_node(pgdat, alloc_size);
} else {
@@ -3378,7 +3381,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
* is used by this zone for memmap. This affects the watermark
* and per-cpu initialisations
*/
- memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
+ memmap_pages =
+ PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
if (realsize >= memmap_pages) {
realsize -= memmap_pages;
printk(KERN_DEBUG
diff --git a/mm/swap.c b/mm/swap.c
index 91e1944..45c9f25 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -503,7 +503,7 @@ void vm_acct_memory(long pages)
local = &__get_cpu_var(committed_space);
*local += pages;
if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
- atomic_add(*local, &vm_committed_space);
+ atomic_long_add(*local, &vm_committed_space);
*local = 0;
}
preempt_enable();
@@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
committed = &per_cpu(committed_space, (long)hcpu);
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- atomic_add(*committed, &vm_committed_space);
+ atomic_long_add(*committed, &vm_committed_space);
*committed = 0;
drain_cpu_pagevecs((long)hcpu);
}
OpenPOWER on IntegriCloud