summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 17:08:13 -0700
committerTejun Heo <tj@kernel.org>2013-04-01 18:45:36 -0700
commit229641a6f1f09e27a1f12fba38980f33f4c92975 (patch)
tree234a6f8aea0910de3242af0bbe6d7494fcf81847 /mm
parentd55262c4d164759a8debe772da6c9b16059dec47 (diff)
parent07961ac7c0ee8b546658717034fe692fd12eefa9 (diff)
downloadop-kernel-dev-229641a6f1f09e27a1f12fba38980f33f4c92975.zip
op-kernel-dev-229641a6f1f09e27a1f12fba38980f33f4c92975.tar.gz
Merge tag 'v3.9-rc5' into wq/for-3.10
Writeback conversion to workqueue will be based on top of wq/for-3.10 branch to take advantage of custom attrs and NUMA support for unbound workqueues. Mainline currently contains two commits which result in non-trivial merge conflicts with wq/for-3.10 and because block/for-3.10/core is based on v3.9-rc3 which contains one of the conflicting commits, we need a pre-merge-window merge anyway. Let's pull v3.9-rc5 into wq/for-3.10 so that the block tree doesn't suffer from workqueue merge conflicts. The two conflicts and their resolutions: * e68035fb65 ("workqueue: convert to idr_alloc()") in mainline changes worker_pool_assign_id() to use idr_alloc() instead of the old idr interface. worker_pool_assign_id() goes through multiple locking changes in wq/for-3.10 causing the following conflict. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; <<<<<<< HEAD lockdep_assert_held(&wq_pool_mutex); do { if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) return -ENOMEM; ret = idr_get_new(&worker_pool_idr, pool, &pool->id); } while (ret == -EAGAIN); ======= mutex_lock(&worker_pool_idr_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) pool->id = ret; mutex_unlock(&worker_pool_idr_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 return ret < 0 ? ret : 0; } We want locking from the former and idr_alloc() usage from the latter, which can be combined to the following. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; lockdep_assert_held(&wq_pool_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) { pool->id = ret; return 0; } return ret; } * eb2834285c ("workqueue: fix possible pool stall bug in wq_unbind_fn()") updated wq_unbind_fn() such that it has single larger for_each_std_worker_pool() loop instead of two separate loops with a schedule() call inbetween. wq/for-3.10 renamed pool->assoc_mutex to pool->manager_mutex causing the following conflict (earlier function body and comments omitted for brevity). static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); <<<<<<< HEAD mutex_unlock(&pool->manager_mutex); } ======= mutex_unlock(&pool->assoc_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 schedule(); <<<<<<< HEAD for_each_cpu_worker_pool(pool, cpu) ======= >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } The resolution is mostly trivial. We want the control flow of the latter with the rename of the former. static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); mutex_unlock(&pool->manager_mutex); schedule(); atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/fremap.c17
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/process_vm_access.c8
10 files changed, 40 insertions, 38 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index ae55c1e..3bea74f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -286,8 +286,12 @@ config NR_QUICK
default "1"
config VIRT_TO_BUS
- def_bool y
- depends on HAVE_VIRT_TO_BUS
+ bool
+ help
+ An architecture should select this if it implements the
+ deprecated interface virt_to_bus(). All new architectures
+ should probably not select this.
+
config MMU_NOTIFIER
bool
diff --git a/mm/fremap.c b/mm/fremap.c
index 0cd4c1148..87da359 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -129,7 +129,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
struct vm_area_struct *vma;
int err = -EINVAL;
int has_write_lock = 0;
- vm_flags_t vm_flags;
+ vm_flags_t vm_flags = 0;
if (prot)
return err;
@@ -204,10 +204,8 @@ get_write_lock:
unsigned long addr;
struct file *file = get_file(vma->vm_file);
- vm_flags = vma->vm_flags;
- if (!(flags & MAP_NONBLOCK))
- vm_flags |= VM_POPULATE;
- addr = mmap_region(file, start, size, vm_flags, pgoff);
+ addr = mmap_region(file, start, size,
+ vma->vm_flags, pgoff);
fput(file);
if (IS_ERR_VALUE(addr)) {
err = addr;
@@ -226,12 +224,6 @@ get_write_lock:
mutex_unlock(&mapping->i_mmap_mutex);
}
- if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) {
- if (!has_write_lock)
- goto get_write_lock;
- vma->vm_flags |= VM_POPULATE;
- }
-
if (vma->vm_flags & VM_LOCKED) {
/*
* drop PG_Mlocked flag for over-mapped range
@@ -254,7 +246,8 @@ get_write_lock:
*/
out:
- vm_flags = vma->vm_flags;
+ if (vma)
+ vm_flags = vma->vm_flags;
if (likely(!has_write_lock))
up_read(&mm->mmap_sem);
else
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a0be33..ca9a7c6 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
- struct hstate *h = &default_hstate;
- return h->nr_huge_pages * pages_per_huge_page(h);
+ struct hstate *h;
+ unsigned long nr_total_pages = 0;
+
+ for_each_hstate(h)
+ nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
+ return nr_total_pages;
}
static int hugetlb_acct_memory(struct hstate *h, long delta)
diff --git a/mm/ksm.c b/mm/ksm.c
index 85bfd4c..b6afe0c 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -489,7 +489,7 @@ out: page = NULL;
*/
static inline int get_kpfn_nid(unsigned long kpfn)
{
- return ksm_merge_across_nodes ? 0 : pfn_to_nid(kpfn);
+ return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
}
static void remove_node_from_stable_tree(struct stable_node *stable_node)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 53b8201..2b55222 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3012,6 +3012,8 @@ void memcg_update_array_size(int num)
memcg_limited_groups_array_size = memcg_caches_array_size(num);
}
+static void kmem_cache_destroy_work_func(struct work_struct *w);
+
int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
{
struct memcg_cache_params *cur_params = s->memcg_params;
@@ -3031,6 +3033,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
return -ENOMEM;
}
+ INIT_WORK(&s->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
s->memcg_params->is_root_cache = true;
/*
@@ -3078,6 +3082,8 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
if (!s->memcg_params)
return -ENOMEM;
+ INIT_WORK(&s->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
if (memcg) {
s->memcg_params->memcg = memcg;
s->memcg_params->root_cache = root_cache;
@@ -3358,8 +3364,6 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
cachep = memcg_params_to_cache(params);
cachep->memcg_params->dead = true;
- INIT_WORK(&cachep->memcg_params->destroy,
- kmem_cache_destroy_work_func);
schedule_work(&cachep->memcg_params->destroy);
}
mutex_unlock(&memcg->slab_caches_mutex);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b81a367b..ee37657 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1779,7 +1779,11 @@ void try_offline_node(int nid)
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
- if (zone->wait_table)
+ /*
+ * wait_table may be allocated from boot memory,
+ * here only free if it's allocated by vmalloc.
+ */
+ if (is_vmalloc_addr(zone->wait_table))
vfree(zone->wait_table);
}
@@ -1801,7 +1805,7 @@ int __ref remove_memory(int nid, u64 start, u64 size)
int retry = 1;
start_pfn = PFN_DOWN(start);
- end_pfn = start_pfn + PFN_DOWN(size);
+ end_pfn = PFN_UP(start + size - 1);
/*
* When CONFIG_MEMCG is on, one memory block may be used by other
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 31d2663..7431001 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2390,9 +2390,9 @@ restart:
*mpol_new = *n->policy;
atomic_set(&mpol_new->refcnt, 1);
- sp_node_init(n_new, n->end, end, mpol_new);
- sp_insert(sp, n_new);
+ sp_node_init(n_new, end, n->end, mpol_new);
n->end = start;
+ sp_insert(sp, n_new);
n_new = NULL;
mpol_new = NULL;
break;
diff --git a/mm/mlock.c b/mm/mlock.c
index 1c5e33f..79b7cf7 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
newflags = vma->vm_flags & ~VM_LOCKED;
if (on)
- newflags |= VM_LOCKED | VM_POPULATE;
+ newflags |= VM_LOCKED;
tmp = vma->vm_end;
if (tmp > end)
@@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
* range with the first VMA. Also, skip undesirable VMA types.
*/
nend = min(end, vma->vm_end);
- if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) !=
- VM_POPULATE)
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
continue;
if (nstart < vma->vm_start)
nstart = vma->vm_start;
@@ -492,9 +491,9 @@ static int do_mlockall(int flags)
struct vm_area_struct * vma, * prev = NULL;
if (flags & MCL_FUTURE)
- current->mm->def_flags |= VM_LOCKED | VM_POPULATE;
+ current->mm->def_flags |= VM_LOCKED;
else
- current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE);
+ current->mm->def_flags &= ~VM_LOCKED;
if (flags == MCL_FUTURE)
goto out;
@@ -503,7 +502,7 @@ static int do_mlockall(int flags)
newflags = vma->vm_flags & ~VM_LOCKED;
if (flags & MCL_CURRENT)
- newflags |= VM_LOCKED | VM_POPULATE;
+ newflags |= VM_LOCKED;
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
diff --git a/mm/mmap.c b/mm/mmap.c
index 2664a47..6466699 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
}
addr = mmap_region(file, addr, len, vm_flags, pgoff);
- if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE))
+ if (!IS_ERR_VALUE(addr) &&
+ ((vm_flags & VM_LOCKED) ||
+ (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
*populate = len;
return addr;
}
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 926b466..fd26d04 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -429,12 +429,6 @@ compat_process_vm_rw(compat_pid_t pid,
if (flags != 0)
return -EINVAL;
- if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec)))
- goto out;
-
- if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec)))
- goto out;
-
if (vm_write)
rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
UIO_FASTIOV, iovstack_l,
@@ -459,8 +453,6 @@ free_iovecs:
kfree(iov_r);
if (iov_l != iovstack_l)
kfree(iov_l);
-
-out:
return rc;
}
OpenPOWER on IntegriCloud