summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h9
-rw-r--r--mm/memory.c67
2 files changed, 7 insertions, 69 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2128ef7..eb815cf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -166,8 +166,6 @@ struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
- unsigned long (*nopfn)(struct vm_area_struct *area,
- unsigned long address);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
@@ -675,13 +673,6 @@ static inline int page_mapped(struct page *page)
}
/*
- * Error return values for the *_nopfn functions
- */
-#define NOPFN_SIGBUS ((unsigned long) -1)
-#define NOPFN_OOM ((unsigned long) -2)
-#define NOPFN_REFAULT ((unsigned long) -3)
-
-/*
* Different kinds of faults, as returned by handle_mm_fault().
* Used to decide whether a process gets delivered SIGBUS or
* just gets major/minor fault counters bumped up.
diff --git a/mm/memory.c b/mm/memory.c
index 2302d22..46dbed4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1058,11 +1058,9 @@ static inline int use_zero_page(struct vm_area_struct *vma)
if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
return 0;
/*
- * And if we have a fault or a nopfn routine, it's not an
- * anonymous region.
+ * And if we have a fault routine, it's not an anonymous region.
*/
- return !vma->vm_ops ||
- (!vma->vm_ops->fault && !vma->vm_ops->nopfn);
+ return !vma->vm_ops || !vma->vm_ops->fault;
}
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
@@ -1338,6 +1336,11 @@ out:
*
* This function should only be called from a vm_ops->fault handler, and
* in that case the handler should return NULL.
+ *
+ * vma cannot be a COW mapping.
+ *
+ * As this is called only for pages that do not currently exist, we
+ * do not need to flush old virtual caches or the TLB.
*/
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
@@ -2501,59 +2504,6 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
-
-/*
- * do_no_pfn() tries to create a new page mapping for a page without
- * a struct_page backing it
- *
- * As this is called only for pages that do not currently exist, we
- * do not need to flush old virtual caches or the TLB.
- *
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
- *
- * It is expected that the ->nopfn handler always returns the same pfn
- * for a given virtual mapping.
- *
- * Mark this `noinline' to prevent it from bloating the main pagefault code.
- */
-static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access)
-{
- spinlock_t *ptl;
- pte_t entry;
- unsigned long pfn;
-
- pte_unmap(page_table);
- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
- BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-
- pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
-
- BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
-
- if (unlikely(pfn == NOPFN_OOM))
- return VM_FAULT_OOM;
- else if (unlikely(pfn == NOPFN_SIGBUS))
- return VM_FAULT_SIGBUS;
- else if (unlikely(pfn == NOPFN_REFAULT))
- return 0;
-
- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
-
- /* Only go through if we didn't race with anybody else... */
- if (pte_none(*page_table)) {
- entry = pfn_pte(pfn, vma->vm_page_prot);
- if (write_access)
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- set_pte_at(mm, address, page_table, entry);
- }
- pte_unmap_unlock(page_table, ptl);
- return 0;
-}
-
/*
* Fault of a previously existing named mapping. Repopulate the pte
* from the encoded file_pte if possible. This enables swappable
@@ -2614,9 +2564,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
pte, pmd, write_access, entry);
- if (unlikely(vma->vm_ops->nopfn))
- return do_no_pfn(mm, vma, address, pte,
- pmd, write_access);
}
return do_anonymous_page(mm, vma, address,
pte, pmd, write_access);
OpenPOWER on IntegriCloud