summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2003-10-03 22:46:53 +0000
committeralc <alc@FreeBSD.org>2003-10-03 22:46:53 +0000
commitb1691aebe4218b5dedfc2dac5620bc1e02226f72 (patch)
tree10ac1c9d9b930d5a0606d41e87f29b5d53946de0
parent678d7d93783f9161ce47727c8e23987c819d7ab6 (diff)
downloadFreeBSD-src-b1691aebe4218b5dedfc2dac5620bc1e02226f72.zip
FreeBSD-src-b1691aebe4218b5dedfc2dac5620bc1e02226f72.tar.gz
Migrate pmap_prefault() into the machine-independent virtual memory layer.
A small helper function pmap_is_prefaultable() is added. This function encapsulate the few lines of pmap_prefault() that actually vary from machine to machine. Note: pmap_is_prefaultable() and pmap_mincore() have much in common. Going forward, it's worth considering their merger.
-rw-r--r--sys/alpha/alpha/pmap.c119
-rw-r--r--sys/amd64/amd64/pmap.c121
-rw-r--r--sys/i386/i386/pmap.c117
-rw-r--r--sys/ia64/ia64/pmap.c112
-rw-r--r--sys/powerpc/aim/mmu_oea.c21
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c21
-rw-r--r--sys/powerpc/powerpc/pmap.c21
-rw-r--r--sys/sparc64/sparc64/pmap.c19
-rw-r--r--sys/vm/pmap.h2
-rw-r--r--sys/vm/vm_fault.c92
10 files changed, 221 insertions, 424 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 82ab789..1133ad2 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -2093,105 +2093,6 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
}
/*
- * pmap_prefault provides a quick way of clustering
- * pagefaults into a processes address space. It is a "cousin"
- * of pmap_object_init_pt, except it runs at page fault time instead
- * of mmap time.
- */
-#define PFBAK 4
-#define PFFOR 4
-#define PAGEORDER_SIZE (PFBAK+PFFOR)
-
-static int pmap_prefault_pageorder[] = {
- -1 * PAGE_SIZE, 1 * PAGE_SIZE,
- -2 * PAGE_SIZE, 2 * PAGE_SIZE,
- -3 * PAGE_SIZE, 3 * PAGE_SIZE,
- -4 * PAGE_SIZE, 4 * PAGE_SIZE
-};
-
-void
-pmap_prefault(pmap, addra, entry)
- pmap_t pmap;
- vm_offset_t addra;
- vm_map_entry_t entry;
-{
- int i;
- vm_offset_t starta;
- vm_offset_t addr;
- vm_pindex_t pindex;
- vm_page_t m, mpte;
- vm_object_t object;
-
- if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
- return;
-
- object = entry->object.vm_object;
-
- starta = addra - PFBAK * PAGE_SIZE;
- if (starta < entry->start) {
- starta = entry->start;
- } else if (starta > addra) {
- starta = 0;
- }
-
- mpte = NULL;
- for (i = 0; i < PAGEORDER_SIZE; i++) {
- vm_object_t backing_object, lobject;
- pt_entry_t *pte;
-
- addr = addra + pmap_prefault_pageorder[i];
- if (addr > addra + (PFFOR * PAGE_SIZE))
- addr = 0;
-
- if (addr < starta || addr >= entry->end)
- continue;
-
- if (!pmap_pte_v(pmap_lev1pte(pmap, addr))
- || !pmap_pte_v(pmap_lev2pte(pmap, addr)))
- continue;
-
- pte = vtopte(addr);
- if (*pte)
- continue;
-
- pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
- lobject = object;
- VM_OBJECT_LOCK(lobject);
- while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
- lobject->type == OBJT_DEFAULT &&
- (backing_object = lobject->backing_object) != NULL) {
- if (lobject->backing_object_offset & PAGE_MASK)
- break;
- pindex += lobject->backing_object_offset >> PAGE_SHIFT;
- VM_OBJECT_LOCK(backing_object);
- VM_OBJECT_UNLOCK(lobject);
- lobject = backing_object;
- }
- VM_OBJECT_UNLOCK(lobject);
- /*
- * give-up when a page is not in memory
- */
- if (m == NULL)
- break;
- vm_page_lock_queues();
- if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
- (m->busy == 0) &&
- (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
-
- if ((m->queue - m->pc) == PQ_CACHE) {
- vm_page_deactivate(m);
- }
- vm_page_busy(m);
- vm_page_unlock_queues();
- mpte = pmap_enter_quick(pmap, addr, m, mpte);
- vm_page_lock_queues();
- vm_page_wakeup(m);
- }
- vm_page_unlock_queues();
- }
-}
-
-/*
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
* pair.
@@ -2557,6 +2458,26 @@ pmap_is_modified(vm_page_t m)
}
/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *pte;
+
+ if (!pmap_pte_v(pmap_lev1pte(pmap, addr)) ||
+ !pmap_pte_v(pmap_lev2pte(pmap, addr)))
+ return (FALSE);
+ pte = vtopte(addr);
+ if (*pte)
+ return (FALSE);
+ return (TRUE);
+}
+
+/*
* Clear the modify bits on the specified physical page.
*/
void
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index d991c26..2e70b52 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2124,106 +2124,6 @@ retry:
}
/*
- * pmap_prefault provides a quick way of clustering
- * pagefaults into a processes address space. It is a "cousin"
- * of pmap_object_init_pt, except it runs at page fault time instead
- * of mmap time.
- */
-#define PFBAK 4
-#define PFFOR 4
-#define PAGEORDER_SIZE (PFBAK+PFFOR)
-
-static int pmap_prefault_pageorder[] = {
- -1 * PAGE_SIZE, 1 * PAGE_SIZE,
- -2 * PAGE_SIZE, 2 * PAGE_SIZE,
- -3 * PAGE_SIZE, 3 * PAGE_SIZE,
- -4 * PAGE_SIZE, 4 * PAGE_SIZE
-};
-
-void
-pmap_prefault(pmap, addra, entry)
- pmap_t pmap;
- vm_offset_t addra;
- vm_map_entry_t entry;
-{
- int i;
- vm_offset_t starta;
- vm_offset_t addr;
- vm_pindex_t pindex;
- vm_page_t m, mpte;
- vm_object_t object;
- pd_entry_t *pde;
-
- if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
- return;
-
- object = entry->object.vm_object;
-
- starta = addra - PFBAK * PAGE_SIZE;
- if (starta < entry->start) {
- starta = entry->start;
- } else if (starta > addra) {
- starta = 0;
- }
-
- mpte = NULL;
- for (i = 0; i < PAGEORDER_SIZE; i++) {
- vm_object_t backing_object, lobject;
- pt_entry_t *pte;
-
- addr = addra + pmap_prefault_pageorder[i];
- if (addr > addra + (PFFOR * PAGE_SIZE))
- addr = 0;
-
- if (addr < starta || addr >= entry->end)
- continue;
-
- pde = pmap_pde(pmap, addr);
- if (pde == NULL || (*pde & PG_V) == 0)
- continue;
-
- pte = vtopte(addr);
- if ((*pte & PG_V) == 0)
- continue;
-
- pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
- lobject = object;
- VM_OBJECT_LOCK(lobject);
- while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
- lobject->type == OBJT_DEFAULT &&
- (backing_object = lobject->backing_object) != NULL) {
- if (lobject->backing_object_offset & PAGE_MASK)
- break;
- pindex += lobject->backing_object_offset >> PAGE_SHIFT;
- VM_OBJECT_LOCK(backing_object);
- VM_OBJECT_UNLOCK(lobject);
- lobject = backing_object;
- }
- VM_OBJECT_UNLOCK(lobject);
- /*
- * give-up when a page is not in memory
- */
- if (m == NULL)
- break;
- vm_page_lock_queues();
- if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
- (m->busy == 0) &&
- (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
-
- if ((m->queue - m->pc) == PQ_CACHE) {
- vm_page_deactivate(m);
- }
- vm_page_busy(m);
- vm_page_unlock_queues();
- mpte = pmap_enter_quick(pmap, addr, m, mpte);
- vm_page_lock_queues();
- vm_page_wakeup(m);
- }
- vm_page_unlock_queues();
- }
-}
-
-/*
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
* pair.
@@ -2598,6 +2498,27 @@ pmap_is_modified(vm_page_t m)
}
/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+
+ pde = pmap_pde(pmap, addr);
+ if (pde == NULL || (*pde & PG_V) == 0)
+ return (FALSE);
+ pte = vtopte(addr);
+ if ((*pte & PG_V) == 0)
+ return (FALSE);
+ return (TRUE);
+}
+
+/*
* Clear the given bit in each of the given page's ptes.
*/
static __inline void
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index f1ce5a3..d070f90 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2192,104 +2192,6 @@ retry:
}
/*
- * pmap_prefault provides a quick way of clustering
- * pagefaults into a processes address space. It is a "cousin"
- * of pmap_object_init_pt, except it runs at page fault time instead
- * of mmap time.
- */
-#define PFBAK 4
-#define PFFOR 4
-#define PAGEORDER_SIZE (PFBAK+PFFOR)
-
-static int pmap_prefault_pageorder[] = {
- -1 * PAGE_SIZE, 1 * PAGE_SIZE,
- -2 * PAGE_SIZE, 2 * PAGE_SIZE,
- -3 * PAGE_SIZE, 3 * PAGE_SIZE,
- -4 * PAGE_SIZE, 4 * PAGE_SIZE
-};
-
-void
-pmap_prefault(pmap, addra, entry)
- pmap_t pmap;
- vm_offset_t addra;
- vm_map_entry_t entry;
-{
- int i;
- vm_offset_t starta;
- vm_offset_t addr;
- vm_pindex_t pindex;
- vm_page_t m, mpte;
- vm_object_t object;
-
- if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
- return;
-
- object = entry->object.vm_object;
-
- starta = addra - PFBAK * PAGE_SIZE;
- if (starta < entry->start) {
- starta = entry->start;
- } else if (starta > addra) {
- starta = 0;
- }
-
- mpte = NULL;
- for (i = 0; i < PAGEORDER_SIZE; i++) {
- vm_object_t backing_object, lobject;
- pt_entry_t *pte;
-
- addr = addra + pmap_prefault_pageorder[i];
- if (addr > addra + (PFFOR * PAGE_SIZE))
- addr = 0;
-
- if (addr < starta || addr >= entry->end)
- continue;
-
- if ((*pmap_pde(pmap, addr)) == 0)
- continue;
-
- pte = vtopte(addr);
- if (*pte)
- continue;
-
- pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
- lobject = object;
- VM_OBJECT_LOCK(lobject);
- while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
- lobject->type == OBJT_DEFAULT &&
- (backing_object = lobject->backing_object) != NULL) {
- if (lobject->backing_object_offset & PAGE_MASK)
- break;
- pindex += lobject->backing_object_offset >> PAGE_SHIFT;
- VM_OBJECT_LOCK(backing_object);
- VM_OBJECT_UNLOCK(lobject);
- lobject = backing_object;
- }
- VM_OBJECT_UNLOCK(lobject);
- /*
- * give-up when a page is not in memory
- */
- if (m == NULL)
- break;
- vm_page_lock_queues();
- if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
- (m->busy == 0) &&
- (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
-
- if ((m->queue - m->pc) == PQ_CACHE) {
- vm_page_deactivate(m);
- }
- vm_page_busy(m);
- vm_page_unlock_queues();
- mpte = pmap_enter_quick(pmap, addr, m, mpte);
- vm_page_lock_queues();
- vm_page_wakeup(m);
- }
- vm_page_unlock_queues();
- }
-}
-
-/*
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
* pair.
@@ -2764,6 +2666,25 @@ pmap_is_modified(vm_page_t m)
}
/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *pte;
+
+ if ((*pmap_pde(pmap, addr)) == 0)
+ return (FALSE);
+ pte = vtopte(addr);
+ if (*pte)
+ return (FALSE);
+ return (TRUE);
+}
+
+/*
* Clear the given bit in each of the given page's ptes.
*/
static __inline void
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index a404707..40400cf 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1736,101 +1736,6 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
}
/*
- * pmap_prefault provides a quick way of clustering
- * pagefaults into a processes address space. It is a "cousin"
- * of pmap_object_init_pt, except it runs at page fault time instead
- * of mmap time.
- */
-#define PFBAK 4
-#define PFFOR 4
-#define PAGEORDER_SIZE (PFBAK+PFFOR)
-
-static int pmap_prefault_pageorder[] = {
- -1 * PAGE_SIZE, 1 * PAGE_SIZE,
- -2 * PAGE_SIZE, 2 * PAGE_SIZE,
- -3 * PAGE_SIZE, 3 * PAGE_SIZE,
- -4 * PAGE_SIZE, 4 * PAGE_SIZE
-};
-
-void
-pmap_prefault(pmap, addra, entry)
- pmap_t pmap;
- vm_offset_t addra;
- vm_map_entry_t entry;
-{
- int i;
- vm_offset_t starta;
- vm_offset_t addr;
- vm_pindex_t pindex;
- vm_page_t m, mpte;
- vm_object_t object;
-
- if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
- return;
-
- object = entry->object.vm_object;
-
- starta = addra - PFBAK * PAGE_SIZE;
- if (starta < entry->start) {
- starta = entry->start;
- } else if (starta > addra) {
- starta = 0;
- }
-
- mpte = NULL;
- for (i = 0; i < PAGEORDER_SIZE; i++) {
- vm_object_t backing_object, lobject;
- struct ia64_lpte *pte;
-
- addr = addra + pmap_prefault_pageorder[i];
- if (addr > addra + (PFFOR * PAGE_SIZE))
- addr = 0;
-
- if (addr < starta || addr >= entry->end)
- continue;
-
- pte = pmap_find_vhpt(addr);
- if (pte && pte->pte_p)
- continue;
-
- pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
- lobject = object;
- VM_OBJECT_LOCK(lobject);
- while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
- lobject->type == OBJT_DEFAULT &&
- (backing_object = lobject->backing_object) != NULL) {
- if (lobject->backing_object_offset & PAGE_MASK)
- break;
- pindex += lobject->backing_object_offset >> PAGE_SHIFT;
- VM_OBJECT_LOCK(backing_object);
- VM_OBJECT_UNLOCK(lobject);
- lobject = backing_object;
- }
- VM_OBJECT_UNLOCK(lobject);
- /*
- * give-up when a page is not in memory
- */
- if (m == NULL)
- break;
- vm_page_lock_queues();
- if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
- (m->busy == 0) &&
- (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
-
- if ((m->queue - m->pc) == PQ_CACHE) {
- vm_page_deactivate(m);
- }
- vm_page_busy(m);
- vm_page_unlock_queues();
- pmap_enter_quick(pmap, addr, m, NULL);
- vm_page_lock_queues();
- vm_page_wakeup(m);
- }
- vm_page_unlock_queues();
- }
-}
-
-/*
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
* pair.
@@ -2148,6 +2053,23 @@ pmap_is_modified(vm_page_t m)
}
/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ struct ia64_lpte *pte;
+
+ pte = pmap_find_vhpt(addr);
+ if (pte && pte->pte_p)
+ return (FALSE);
+ return (TRUE);
+}
+
+/*
* Clear the modify bits on the specified physical page.
*/
void
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 1c789f7..30ce2f8 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1117,6 +1117,19 @@ pmap_is_modified(vm_page_t m)
return (pmap_query_bit(m, PTE_CHG));
}
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+
+ return (FALSE);
+}
+
void
pmap_clear_reference(vm_page_t m)
{
@@ -1424,14 +1437,6 @@ pmap_pinit2(pmap_t pmap)
/* XXX: Remove this stub when no longer called */
}
-void
-pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
-{
- KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_prefault: non current pmap"));
- /* XXX */
-}
-
/*
* Set the physical protection on the specified range of this map as requested.
*/
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 1c789f7..30ce2f8 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -1117,6 +1117,19 @@ pmap_is_modified(vm_page_t m)
return (pmap_query_bit(m, PTE_CHG));
}
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+
+ return (FALSE);
+}
+
void
pmap_clear_reference(vm_page_t m)
{
@@ -1424,14 +1437,6 @@ pmap_pinit2(pmap_t pmap)
/* XXX: Remove this stub when no longer called */
}
-void
-pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
-{
- KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_prefault: non current pmap"));
- /* XXX */
-}
-
/*
* Set the physical protection on the specified range of this map as requested.
*/
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index 1c789f7..30ce2f8 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -1117,6 +1117,19 @@ pmap_is_modified(vm_page_t m)
return (pmap_query_bit(m, PTE_CHG));
}
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+
+ return (FALSE);
+}
+
void
pmap_clear_reference(vm_page_t m)
{
@@ -1424,14 +1437,6 @@ pmap_pinit2(pmap_t pmap)
/* XXX: Remove this stub when no longer called */
}
-void
-pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
-{
- KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_prefault: non current pmap"));
- /* XXX */
-}
-
/*
* Set the physical protection on the specified range of this map as requested.
*/
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index a1cbf99..2d7ad37 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1374,12 +1374,6 @@ pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
("pmap_object_init_pt: non-device object"));
}
-void
-pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
-{
- /* XXX */
-}
-
/*
* Change the wiring attribute for a map/virtual-address pair.
* The mapping must already exist in the pmap.
@@ -1724,6 +1718,19 @@ pmap_is_modified(vm_page_t m)
return (FALSE);
}
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+
+ return (FALSE);
+}
+
void
pmap_clear_modify(vm_page_t m)
{
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 8776966..a6eff6ef 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -111,6 +111,7 @@ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va,
void pmap_growkernel(vm_offset_t);
void pmap_init(vm_paddr_t, vm_paddr_t);
boolean_t pmap_is_modified(vm_page_t m);
+boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
boolean_t pmap_ts_referenced(vm_page_t m);
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
@@ -130,7 +131,6 @@ void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
-void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_activate(struct thread *td);
vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 855ff71..76fe277 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -97,7 +97,19 @@ __FBSDID("$FreeBSD$");
#include <vm/vnode_pager.h>
#include <vm/vm_extern.h>
+#define PFBAK 4
+#define PFFOR 4
+#define PAGEORDER_SIZE (PFBAK+PFFOR)
+
+static int prefault_pageorder[] = {
+ -1 * PAGE_SIZE, 1 * PAGE_SIZE,
+ -2 * PAGE_SIZE, 2 * PAGE_SIZE,
+ -3 * PAGE_SIZE, 3 * PAGE_SIZE,
+ -4 * PAGE_SIZE, 4 * PAGE_SIZE
+};
+
static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
+static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
#define VM_FAULT_READ_AHEAD 8
#define VM_FAULT_READ_BEHIND 7
@@ -889,7 +901,7 @@ readrest:
}
pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
- pmap_prefault(fs.map->pmap, vaddr, fs.entry);
+ vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
}
vm_page_lock_queues();
vm_page_flag_clear(fs.m, PG_ZERO);
@@ -929,6 +941,84 @@ readrest:
}
/*
+ * vm_fault_prefault provides a quick way of clustering
+ * pagefaults into a processes address space. It is a "cousin"
+ * of vm_map_pmap_enter, except it runs at page fault time instead
+ * of mmap time.
+ */
+static void
+vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
+{
+ int i;
+ vm_offset_t addr, starta;
+ vm_pindex_t pindex;
+ vm_page_t m, mpte;
+ vm_object_t object;
+
+ if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
+ return;
+
+ object = entry->object.vm_object;
+
+ starta = addra - PFBAK * PAGE_SIZE;
+ if (starta < entry->start) {
+ starta = entry->start;
+ } else if (starta > addra) {
+ starta = 0;
+ }
+
+ mpte = NULL;
+ for (i = 0; i < PAGEORDER_SIZE; i++) {
+ vm_object_t backing_object, lobject;
+
+ addr = addra + prefault_pageorder[i];
+ if (addr > addra + (PFFOR * PAGE_SIZE))
+ addr = 0;
+
+ if (addr < starta || addr >= entry->end)
+ continue;
+
+ if (!pmap_is_prefaultable(pmap, addr))
+ continue;
+
+ pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
+ lobject = object;
+ VM_OBJECT_LOCK(lobject);
+ while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
+ lobject->type == OBJT_DEFAULT &&
+ (backing_object = lobject->backing_object) != NULL) {
+ if (lobject->backing_object_offset & PAGE_MASK)
+ break;
+ pindex += lobject->backing_object_offset >> PAGE_SHIFT;
+ VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_UNLOCK(lobject);
+ lobject = backing_object;
+ }
+ VM_OBJECT_UNLOCK(lobject);
+ /*
+ * give-up when a page is not in memory
+ */
+ if (m == NULL)
+ break;
+ vm_page_lock_queues();
+ if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
+ (m->busy == 0) &&
+ (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
+
+ if ((m->queue - m->pc) == PQ_CACHE) {
+ vm_page_deactivate(m);
+ }
+ vm_page_busy(m);
+ vm_page_unlock_queues();
+ mpte = pmap_enter_quick(pmap, addr, m, mpte);
+ vm_page_lock_queues();
+ vm_page_wakeup(m);
+ }
+ vm_page_unlock_queues();
+ }
+}
+
+/*
* vm_fault_quick:
*
* Ensure that the requested virtual address, which may be in userland,
OpenPOWER on IntegriCloud