summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2014-04-04 01:28:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-04 16:16:55 -0700
commitcda540ace6a194850e23c79955cc2e46fd91c19a (patch)
tree886bd309085e4cc16f671186cbf34eca6b415aa1 /mm/memory.c
parentd15e03104eb9a4f8e244ab6ed3ca5a107e46db13 (diff)
downloadop-kernel-dev-cda540ace6a194850e23c79955cc2e46fd91c19a.zip
op-kernel-dev-cda540ace6a194850e23c79955cc2e46fd91c19a.tar.gz
mm: get_user_pages(write,force) refuse to COW in shared areas
get_user_pages(write=1, force=1) has always had odd behaviour on write- protected shared mappings: although it demands FMODE_WRITE-access to the underlying object (do_mmap_pgoff sets neither VM_SHARED nor VM_MAYWRITE without that), it ends up with do_wp_page substituting private anonymous Copied-On-Write pages for the shared file pages in the area. That was long ago intentional, as a safety measure to prevent ptrace setting a breakpoint (or POKETEXT or POKEDATA) from inadvertently corrupting the underlying executable. Yet exec and dynamic loaders open the file read-only, and use MAP_PRIVATE rather than MAP_SHARED. The traditional odd behaviour still causes surprises and bugs in mm, and is probably not what any caller wants - even the comment on the flag says "You do not want this" (although it's undoubtedly necessary for overriding userspace protections in some contexts, and good when !write). Let's stop doing that. But it would be dangerous to remove the long- standing safety at this stage, so just make get_user_pages(write,force) fail with EFAULT when applied to a write-protected shared area. Infiniband may in future want to force write through to underlying object: we can add another FOLL_flag later to enable that if required. Odd though the old behaviour was, there is no doubt that we may turn out to break userspace with this change, and have to revert it quickly. Issue a WARN_ON_ONCE to help debug the changed case (easily triggered by userspace, so only once to prevent spamming the logs); and delay a few associated cleanups until this change is proved. get_user_pages callers who might see trouble from this change: ptrace poking, or writing to /proc/<pid>/mem drivers/infiniband/ drivers/media/v4l2-core/ drivers/gpu/drm/exynos/exynos_drm_gem.c drivers/staging/tidspbridge/core/tiomap3430.c if they ever apply get_user_pages to write-protected shared mappings of an object which was opened for writing. I went to apply the same change to mm/nommu.c, but retreated. NOMMU has no place for COW, and its VM_flags conventions are not the same: I'd be more likely to screw up NOMMU than make an improvement there. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c66
1 files changed, 45 insertions, 21 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 90cea22..82c1e4c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1705,15 +1705,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
- /*
- * Require read or write permissions.
- * If FOLL_FORCE is set, we only require the "MAY" flags.
- */
- vm_flags = (gup_flags & FOLL_WRITE) ?
- (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= (gup_flags & FOLL_FORCE) ?
- (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
-
/*
* If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
* would be called on PROT_NONE ranges. We must never invoke
@@ -1741,7 +1732,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
- return i ? : -EFAULT;
+ goto efault;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
else
@@ -1751,12 +1742,12 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, pg);
if (pmd_none(*pmd))
- return i ? : -EFAULT;
+ goto efault;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, pg);
if (pte_none(*pte)) {
pte_unmap(pte);
- return i ? : -EFAULT;
+ goto efault;
}
vma = get_gate_vma(mm);
if (pages) {
@@ -1769,7 +1760,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
page = pte_page(*pte);
else {
pte_unmap(pte);
- return i ? : -EFAULT;
+ goto efault;
}
}
pages[i] = page;
@@ -1780,10 +1771,42 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
- if (!vma ||
- (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- !(vm_flags & vma->vm_flags))
- return i ? : -EFAULT;
+ if (!vma)
+ goto efault;
+ vm_flags = vma->vm_flags;
+ if (vm_flags & (VM_IO | VM_PFNMAP))
+ goto efault;
+
+ if (gup_flags & FOLL_WRITE) {
+ if (!(vm_flags & VM_WRITE)) {
+ if (!(gup_flags & FOLL_FORCE))
+ goto efault;
+ /*
+ * We used to let the write,force case do COW
+ * in a VM_MAYWRITE VM_SHARED !VM_WRITE vma, so
+ * ptrace could set a breakpoint in a read-only
+ * mapping of an executable, without corrupting
+ * the file (yet only when that file had been
+ * opened for writing!). Anon pages in shared
+ * mappings are surprising: now just reject it.
+ */
+ if (!is_cow_mapping(vm_flags)) {
+ WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
+ goto efault;
+ }
+ }
+ } else {
+ if (!(vm_flags & VM_READ)) {
+ if (!(gup_flags & FOLL_FORCE))
+ goto efault;
+ /*
+ * Is there actually any vma we can reach here
+ * which does not have VM_MAYREAD set?
+ */
+ if (!(vm_flags & VM_MAYREAD))
+ goto efault;
+ }
+ }
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
@@ -1837,7 +1860,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return -EFAULT;
}
if (ret & VM_FAULT_SIGBUS)
- return i ? i : -EFAULT;
+ goto efault;
BUG();
}
@@ -1895,6 +1918,8 @@ next_page:
} while (nr_pages && start < vma->vm_end);
} while (nr_pages);
return i;
+efault:
+ return i ? : -EFAULT;
}
EXPORT_SYMBOL(__get_user_pages);
@@ -1962,9 +1987,8 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to by the caller
- * @force: whether to force write access even if user mapping is
- * readonly. This will result in the page being COWed even
- * in MAP_SHARED mappings. You do not want this.
+ * @force: whether to force access even when user mapping is currently
+ * protected (but never forces write access to shared mapping).
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
OpenPOWER on IntegriCloud