summaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c49
1 files changed, 36 insertions, 13 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 22041aa..bd6f0e4 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -198,17 +198,26 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
for (i = 0; i < ret; i++) {
struct page *page = pages[i];
- lock_page(page);
- /*
- * Because we lock page here and migration is blocked
- * by the elevated reference, we need only check for
- * file-cache page truncation. This page->mapping
- * check also neatly skips over the ZERO_PAGE(),
- * though if that's common we'd prefer not to lock it.
- */
- if (page->mapping)
- mlock_vma_page(page);
- unlock_page(page);
+ if (page->mapping) {
+ /*
+ * That preliminary check is mainly to avoid
+ * the pointless overhead of lock_page on the
+ * ZERO_PAGE: which might bounce very badly if
+ * there is contention. However, we're still
+ * dirtying its cacheline with get/put_page:
+ * we'll add another __get_user_pages flag to
+ * avoid it if that case turns out to matter.
+ */
+ lock_page(page);
+ /*
+ * Because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
+ if (page->mapping)
+ mlock_vma_page(page);
+ unlock_page(page);
+ }
put_page(page); /* ref from get_user_pages() */
}
@@ -309,9 +318,23 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
vma->vm_flags &= ~VM_LOCKED;
for (addr = start; addr < end; addr += PAGE_SIZE) {
- struct page *page = follow_page(vma, addr, FOLL_GET);
- if (page) {
+ struct page *page;
+ /*
+ * Although FOLL_DUMP is intended for get_dump_page(),
+ * it just so happens that its special treatment of the
+ * ZERO_PAGE (returning an error instead of doing get_page)
+ * suits munlock very well (and if somehow an abnormal page
+ * has sneaked into the range, we won't oops here: great).
+ */
+ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ if (page && !IS_ERR(page)) {
lock_page(page);
+ /*
+ * Like in __mlock_vma_pages_range(),
+ * because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
if (page->mapping)
munlock_vma_page(page);
unlock_page(page);
OpenPOWER on IntegriCloud