diff options
author | Michel Lespinasse <walken@google.com> | 2011-01-13 15:46:12 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 17:32:36 -0800 |
commit | 5fdb2002131cd4e210b9638a4fc932ec7be491d1 (patch) | |
tree | 233dc1cab275397d211a7d5490f19192a59a47fd /mm/mlock.c | |
parent | 110d74a921f4d272b47ef6104fcf937df808f4c8 (diff) | |
download | op-kernel-dev-5fdb2002131cd4e210b9638a4fc932ec7be491d1.zip op-kernel-dev-5fdb2002131cd4e210b9638a4fc932ec7be491d1.tar.gz |
mm: move VM_LOCKED check to __mlock_vma_pages_range()
Use a single code path for faulting in pages during mlock.
The reason to have it in this patch series is that I did not want to
update both code paths in a later change that releases mmap_sem when
blocking on disk.
Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 26 |
1 files changed, 13 insertions, 13 deletions
@@ -169,7 +169,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - gup_flags = FOLL_TOUCH | FOLL_MLOCK; + gup_flags = FOLL_TOUCH; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW @@ -178,6 +178,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; + if (vma->vm_flags & VM_LOCKED) + gup_flags |= FOLL_MLOCK; + /* We don't try to access the guard page of a stack vma */ if (stack_guard_page(vma, start)) { addr += PAGE_SIZE; @@ -456,18 +459,15 @@ static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors) /* * Now fault in a range of pages within the first VMA. */ - if (vma->vm_flags & VM_LOCKED) { - ret = __mlock_vma_pages_range(vma, nstart, nend); - if (ret < 0 && ignore_errors) { - ret = 0; - continue; /* continue at next VMA */ - } - if (ret) { - ret = __mlock_posix_error_return(ret); - break; - } - } else - make_pages_present(nstart, nend); + ret = __mlock_vma_pages_range(vma, nstart, nend); + if (ret < 0 && ignore_errors) { + ret = 0; + continue; /* continue at next VMA */ + } + if (ret) { + ret = __mlock_posix_error_return(ret); + break; + } } up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ |