diff options
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | mm/memory.c | 100 | ||||
-rw-r--r-- | mm/mmap.c | 12 | ||||
-rw-r--r-- | mm/mprotect.c | 11 |
4 files changed, 99 insertions, 28 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 697c6bf..3b09444 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -199,6 +199,10 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); + + /* notification that a previously read-only page is about to become + * writable, if an error is returned it will cause a SIGBUS */ + int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); #ifdef CONFIG_NUMA int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); struct mempolicy *(*get_policy)(struct vm_area_struct *vma, diff --git a/mm/memory.c b/mm/memory.c index 11673c5..247b5c3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1457,25 +1457,60 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, { struct page *old_page, *new_page; pte_t entry; - int ret = VM_FAULT_MINOR; + int reuse, ret = VM_FAULT_MINOR; old_page = vm_normal_page(vma, address, orig_pte); if (!old_page) goto gotten; - if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { - int reuse = can_share_swap_page(old_page); - unlock_page(old_page); - if (reuse) { - flush_cache_page(vma, address, pte_pfn(orig_pte)); - entry = pte_mkyoung(orig_pte); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); - ptep_set_access_flags(vma, address, page_table, entry, 1); - update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); - ret |= VM_FAULT_WRITE; - goto unlock; + if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) == + (VM_SHARED|VM_WRITE))) { + if (vma->vm_ops && vma->vm_ops->page_mkwrite) { + /* + * Notify the address space that the page is about to + * become writable so that it can prohibit this or wait + * for the page to get into an appropriate state. + * + * We do this without the lock held, so that it can + * sleep if it needs to. + */ + page_cache_get(old_page); + pte_unmap_unlock(page_table, ptl); + + if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) + goto unwritable_page; + + page_cache_release(old_page); + + /* + * Since we dropped the lock we need to revalidate + * the PTE as someone else may have changed it. If + * they did, we just return, as we can count on the + * MMU to tell us if they didn't also make it writable. + */ + page_table = pte_offset_map_lock(mm, pmd, address, + &ptl); + if (!pte_same(*page_table, orig_pte)) + goto unlock; } + + reuse = 1; + } else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { + reuse = can_share_swap_page(old_page); + unlock_page(old_page); + } else { + reuse = 0; + } + + if (reuse) { + flush_cache_page(vma, address, pte_pfn(orig_pte)); + entry = pte_mkyoung(orig_pte); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + ptep_set_access_flags(vma, address, page_table, entry, 1); + update_mmu_cache(vma, address, entry); + lazy_mmu_prot_update(entry); + ret |= VM_FAULT_WRITE; + goto unlock; } /* @@ -1535,6 +1570,10 @@ oom: if (old_page) page_cache_release(old_page); return VM_FAULT_OOM; + +unwritable_page: + page_cache_release(old_page); + return VM_FAULT_SIGBUS; } /* @@ -2083,18 +2122,31 @@ retry: /* * Should we do an early C-O-W break? */ - if (write_access && !(vma->vm_flags & VM_SHARED)) { - struct page *page; + if (write_access) { + if (!(vma->vm_flags & VM_SHARED)) { + struct page *page; - if (unlikely(anon_vma_prepare(vma))) - goto oom; - page = alloc_page_vma(GFP_HIGHUSER, vma, address); - if (!page) - goto oom; - copy_user_highpage(page, new_page, address); - page_cache_release(new_page); - new_page = page; - anon = 1; + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_page_vma(GFP_HIGHUSER, vma, address); + if (!page) + goto oom; + copy_user_highpage(page, new_page, address); + page_cache_release(new_page); + new_page = page; + anon = 1; + + } else { + /* if the page will be shareable, see if the backing + * address space wants to know that the page is about + * to become writable */ + if (vma->vm_ops->page_mkwrite && + vma->vm_ops->page_mkwrite(vma, new_page) < 0 + ) { + page_cache_release(new_page); + return VM_FAULT_SIGBUS; + } + } } page_table = pte_offset_map_lock(mm, pmd, address, &ptl); @@ -1065,7 +1065,8 @@ munmap_back: vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; - vma->vm_page_prot = protection_map[vm_flags & 0x0f]; + vma->vm_page_prot = protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; vma->vm_pgoff = pgoff; if (file) { @@ -1089,6 +1090,12 @@ munmap_back: goto free_vma; } + /* Don't make the VMA automatically writable if it's shared, but the + * backer wishes to know when pages are first written to */ + if (vma->vm_ops && vma->vm_ops->page_mkwrite) + vma->vm_page_prot = + protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; + /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) * that memory reservation must be checked; but that reservation @@ -1921,7 +1928,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len) vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; - vma->vm_page_prot = protection_map[flags & 0x0f]; + vma->vm_page_prot = protection_map[flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; vma_link(mm, vma, prev, rb_link, rb_parent); out: mm->total_vm += len >> PAGE_SHIFT; diff --git a/mm/mprotect.c b/mm/mprotect.c index 14f93e6..638edab 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -123,6 +123,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long oldflags = vma->vm_flags; long nrpages = (end - start) >> PAGE_SHIFT; unsigned long charged = 0; + unsigned int mask; pgprot_t newprot; pgoff_t pgoff; int error; @@ -149,8 +150,6 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, } } - newprot = protection_map[newflags & 0xf]; - /* * First try to merge with previous and/or next vma. */ @@ -177,6 +176,14 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, } success: + /* Don't make the VMA automatically writable if it's shared, but the + * backer wishes to know when pages are first written to */ + mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED; + if (vma->vm_ops && vma->vm_ops->page_mkwrite) + mask &= ~VM_SHARED; + + newprot = protection_map[newflags & mask]; + /* * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. |