summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c161
1 files changed, 23 insertions, 138 deletions
diff --git a/mm/memory.c b/mm/memory.c
index f82b359b..bd16dca 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -966,7 +966,7 @@ no_page_table:
* has touched so far, we don't want to allocate page tables.
*/
if (flags & FOLL_ANON) {
- page = ZERO_PAGE(address);
+ page = ZERO_PAGE(0);
if (flags & FOLL_GET)
get_page(page);
BUG_ON(flags & FOLL_WRITE);
@@ -1111,95 +1111,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages);
-static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, unsigned long end, pgprot_t prot)
-{
- pte_t *pte;
- spinlock_t *ptl;
- int err = 0;
-
- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (!pte)
- return -EAGAIN;
- arch_enter_lazy_mmu_mode();
- do {
- struct page *page = ZERO_PAGE(addr);
- pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
-
- if (unlikely(!pte_none(*pte))) {
- err = -EEXIST;
- pte++;
- break;
- }
- page_cache_get(page);
- page_add_file_rmap(page);
- inc_mm_counter(mm, file_rss);
- set_pte_at(mm, addr, pte, zero_pte);
- } while (pte++, addr += PAGE_SIZE, addr != end);
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(pte - 1, ptl);
- return err;
-}
-
-static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end, pgprot_t prot)
-{
- pmd_t *pmd;
- unsigned long next;
- int err;
-
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- return -EAGAIN;
- do {
- next = pmd_addr_end(addr, end);
- err = zeromap_pte_range(mm, pmd, addr, next, prot);
- if (err)
- break;
- } while (pmd++, addr = next, addr != end);
- return err;
-}
-
-static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end, pgprot_t prot)
-{
- pud_t *pud;
- unsigned long next;
- int err;
-
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
- return -EAGAIN;
- do {
- next = pud_addr_end(addr, end);
- err = zeromap_pmd_range(mm, pud, addr, next, prot);
- if (err)
- break;
- } while (pud++, addr = next, addr != end);
- return err;
-}
-
-int zeromap_page_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long size, pgprot_t prot)
-{
- pgd_t *pgd;
- unsigned long next;
- unsigned long end = addr + size;
- struct mm_struct *mm = vma->vm_mm;
- int err;
-
- BUG_ON(addr >= end);
- pgd = pgd_offset(mm, addr);
- flush_cache_range(vma, addr, end);
- do {
- next = pgd_addr_end(addr, end);
- err = zeromap_pud_range(mm, pgd, addr, next, prot);
- if (err)
- break;
- } while (pgd++, addr = next, addr != end);
- return err;
-}
-
pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
@@ -1700,10 +1611,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
+ if (ptep_set_access_flags(vma, address, page_table, entry,1))
update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
- }
ret |= VM_FAULT_WRITE;
goto unlock;
}
@@ -1717,16 +1626,11 @@ gotten:
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- if (old_page == ZERO_PAGE(address)) {
- new_page = alloc_zeroed_user_highpage_movable(vma, address);
- if (!new_page)
- goto oom;
- } else {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (!new_page)
- goto oom;
- cow_user_page(new_page, old_page, address, vma);
- }
+ VM_BUG_ON(old_page == ZERO_PAGE(0));
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (!new_page)
+ goto oom;
+ cow_user_page(new_page, old_page, address, vma);
/*
* Re-check the pte - we dropped the lock
@@ -1744,7 +1648,6 @@ gotten:
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- lazy_mmu_prot_update(entry);
/*
* Clear the pte entry and flush it first, before updating the
* pte with the new entry. This will avoid a race condition
@@ -2252,44 +2155,28 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl;
pte_t entry;
- if (write_access) {
- /* Allocate our own private page. */
- pte_unmap(page_table);
-
- if (unlikely(anon_vma_prepare(vma)))
- goto oom;
- page = alloc_zeroed_user_highpage_movable(vma, address);
- if (!page)
- goto oom;
-
- entry = mk_pte(page, vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ /* Allocate our own private page. */
+ pte_unmap(page_table);
- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
- if (!pte_none(*page_table))
- goto release;
- inc_mm_counter(mm, anon_rss);
- lru_cache_add_active(page);
- page_add_new_anon_rmap(page, vma, address);
- } else {
- /* Map the ZERO_PAGE - vm_page_prot is readonly */
- page = ZERO_PAGE(address);
- page_cache_get(page);
- entry = mk_pte(page, vma->vm_page_prot);
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+ if (!page)
+ goto oom;
- ptl = pte_lockptr(mm, pmd);
- spin_lock(ptl);
- if (!pte_none(*page_table))
- goto release;
- inc_mm_counter(mm, file_rss);
- page_add_file_rmap(page);
- }
+ entry = mk_pte(page, vma->vm_page_prot);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (!pte_none(*page_table))
+ goto release;
+ inc_mm_counter(mm, anon_rss);
+ lru_cache_add_active(page);
+ page_add_new_anon_rmap(page, vma, address);
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
@@ -2442,7 +2329,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
} else {
if (anon)
page_cache_release(page);
@@ -2470,7 +2356,7 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int write_access, pte_t orig_pte)
{
pgoff_t pgoff = (((address & PAGE_MASK)
- - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
+ - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
pte_unmap(page_table);
@@ -2614,7 +2500,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
} else {
/*
* This is needed only for protection faults but the arch code
OpenPOWER on IntegriCloud