summaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2013-02-22 16:32:46 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:50:11 -0800
commit1869305009857cdeaabe6283bcdc2359c5784543 (patch)
treea8a500c71e7aa3a645322635f3d591c16601af27 /mm/mlock.c
parentcea10a19b7972a1954c4a2d05a7de8db48b444fb (diff)
downloadop-kernel-dev-1869305009857cdeaabe6283bcdc2359c5784543.zip
op-kernel-dev-1869305009857cdeaabe6283bcdc2359c5784543.tar.gz
mm: introduce VM_POPULATE flag to better deal with racy userspace programs
The vm_populate() code populates user mappings without constantly holding the mmap_sem. This makes it susceptible to racy userspace programs: the user mappings may change while vm_populate() is running, and in this case vm_populate() may end up populating the new mapping instead of the old one. In order to reduce the possibility of userspace getting surprised by this behavior, this change introduces the VM_POPULATE vma flag which gets set on vmas we want vm_populate() to work on. This way vm_populate() may still end up populating the new mapping after such a race, but only if the new mapping is also one that the user has requested (using MAP_SHARED, MAP_LOCKED or mlock) to be populated. Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Tested-by: Andy Lutomirski <luto@amacapital.net> Cc: Greg Ungerer <gregungerer@westnet.com.au> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 569400a..d6378fe 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -340,9 +340,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
- newflags = vma->vm_flags | VM_LOCKED;
- if (!on)
- newflags &= ~VM_LOCKED;
+ newflags = vma->vm_flags & ~VM_LOCKED;
+ if (on)
+ newflags |= VM_LOCKED | VM_POPULATE;
tmp = vma->vm_end;
if (tmp > end)
@@ -402,7 +402,8 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
* range with the first VMA. Also, skip undesirable VMA types.
*/
nend = min(end, vma->vm_end);
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) !=
+ VM_POPULATE)
continue;
if (nstart < vma->vm_start)
nstart = vma->vm_start;
@@ -475,18 +476,18 @@ static int do_mlockall(int flags)
struct vm_area_struct * vma, * prev = NULL;
if (flags & MCL_FUTURE)
- current->mm->def_flags |= VM_LOCKED;
+ current->mm->def_flags |= VM_LOCKED | VM_POPULATE;
else
- current->mm->def_flags &= ~VM_LOCKED;
+ current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE);
if (flags == MCL_FUTURE)
goto out;
for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
vm_flags_t newflags;
- newflags = vma->vm_flags | VM_LOCKED;
- if (!(flags & MCL_CURRENT))
- newflags &= ~VM_LOCKED;
+ newflags = vma->vm_flags & ~VM_LOCKED;
+ if (flags & MCL_CURRENT)
+ newflags |= VM_LOCKED | VM_POPULATE;
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
OpenPOWER on IntegriCloud