summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2003-07-03 20:18:02 +0000
committeralc <alc@FreeBSD.org>2003-07-03 20:18:02 +0000
commit0699f7e17ff39a213cf2dd24b601d9d8e73271b9 (patch)
treed866e6a21d7e58ecc726f0d4972a8006256cf55d /sys
parent6f4ee681fdadfbb7008a43761f43ef8fe7d4f5c1 (diff)
downloadFreeBSD-src-0699f7e17ff39a213cf2dd24b601d9d8e73271b9.zip
FreeBSD-src-0699f7e17ff39a213cf2dd24b601d9d8e73271b9.tar.gz
Background: pmap_object_init_pt() premaps the pages of a object in
order to avoid the overhead of later page faults. In general, it implements two cases: one for vnode-backed objects and one for device-backed objects. Only the device-backed case is really machine-dependent, belonging in the pmap. This commit moves the vnode-backed case into the (relatively) new function vm_map_pmap_enter(). On amd64 and i386, this commit only amounts to code rearrangement. On alpha and ia64, the new machine independent (MI) implementation of the vnode case is smaller and more efficient than their pmap-based implementations. (The MI implementation takes advantage of the fact that objects in -CURRENT are ordered collections of pages.) On sparc64, pmap_object_init_pt() hadn't (yet) been implemented.
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/pmap.c109
-rw-r--r--sys/amd64/amd64/pmap.c96
-rw-r--r--sys/i386/i386/pmap.c97
-rw-r--r--sys/ia64/ia64/pmap.c110
-rw-r--r--sys/powerpc/aim/mmu_oea.c8
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c8
-rw-r--r--sys/powerpc/powerpc/pmap.c8
-rw-r--r--sys/sparc64/sparc64/pmap.c7
-rw-r--r--sys/vm/pmap.h3
-rw-r--r--sys/vm/vm_map.c75
10 files changed, 131 insertions, 390 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 2766797..e604882 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -2070,7 +2070,6 @@ pmap_kenter_temporary(vm_offset_t pa, int i)
return (void *) ALPHA_PHYS_TO_K0SEG(pa - (i * PAGE_SIZE));
}
-#define MAX_INIT_PT (96)
/*
* pmap_object_init_pt preloads the ptes for a given object
* into the specified pmap. This eliminates the blast of soft
@@ -2079,112 +2078,12 @@ pmap_kenter_temporary(vm_offset_t pa, int i)
void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex,
- vm_size_t size, int limit)
+ vm_size_t size)
{
- vm_offset_t tmpidx;
- int psize;
- vm_page_t p, mpte;
- int objpgs;
- if (pmap == NULL || object == NULL)
- return;
- VM_OBJECT_LOCK(object);
- psize = alpha_btop(size);
-
- if ((object->type != OBJT_VNODE) ||
- ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
- (object->resident_page_count > MAX_INIT_PT))) {
- goto unlock_return;
- }
-
- if (psize + pindex > object->size) {
- if (object->size < pindex)
- goto unlock_return;
- psize = object->size - pindex;
- }
-
- mpte = NULL;
- /*
- * if we are processing a major portion of the object, then scan the
- * entire thing.
- */
- if (psize > (object->resident_page_count >> 2)) {
- objpgs = psize;
-
- for (p = TAILQ_FIRST(&object->memq);
- ((objpgs > 0) && (p != NULL));
- p = TAILQ_NEXT(p, listq)) {
-
- tmpidx = p->pindex;
- if (tmpidx < pindex) {
- continue;
- }
- tmpidx -= pindex;
- if (tmpidx >= psize) {
- continue;
- }
- /*
- * don't allow an madvise to blow away our really
- * free pages allocating pv entries.
- */
- if ((limit & MAP_PREFAULT_MADVISE) &&
- cnt.v_free_count < cnt.v_free_reserved) {
- break;
- }
- vm_page_lock_queues();
- if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
- (p->busy == 0) &&
- (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
- if ((p->queue - p->pc) == PQ_CACHE)
- vm_page_deactivate(p);
- vm_page_busy(p);
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
- mpte = pmap_enter_quick(pmap,
- addr + alpha_ptob(tmpidx), p, mpte);
- VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
- vm_page_wakeup(p);
- }
- vm_page_unlock_queues();
- objpgs -= 1;
- }
- } else {
- /*
- * else lookup the pages one-by-one.
- */
- for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
- /*
- * don't allow an madvise to blow away our really
- * free pages allocating pv entries.
- */
- if ((limit & MAP_PREFAULT_MADVISE) &&
- cnt.v_free_count < cnt.v_free_reserved) {
- break;
- }
- p = vm_page_lookup(object, tmpidx + pindex);
- if (p == NULL)
- continue;
- vm_page_lock_queues();
- if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
- (p->busy == 0) &&
- (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
- if ((p->queue - p->pc) == PQ_CACHE)
- vm_page_deactivate(p);
- vm_page_busy(p);
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
- mpte = pmap_enter_quick(pmap,
- addr + alpha_ptob(tmpidx), p, mpte);
- VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
- vm_page_wakeup(p);
- }
- vm_page_unlock_queues();
- }
- }
-unlock_return:
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
}
/*
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 875f783..83aae58 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2057,31 +2057,22 @@ pmap_kenter_temporary(vm_offset_t pa, int i)
return ((void *)crashdumpmap);
}
-#define MAX_INIT_PT (96)
/*
- * pmap_object_init_pt preloads the ptes for a given object
- * into the specified pmap. This eliminates the blast of soft
- * faults on process startup and immediately after an mmap.
+ * This code maps large physical mmap regions into the
+ * processor address space. Note that some shortcuts
+ * are taken, but the code works.
*/
void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex,
- vm_size_t size, int limit)
+ vm_size_t size)
{
- vm_pindex_t tmpidx;
- int psize;
- vm_page_t p, mpte;
+ vm_page_t p;
- if (pmap == NULL || object == NULL)
- return;
- VM_OBJECT_LOCK(object);
- /*
- * This code maps large physical mmap regions into the
- * processor address space. Note that some shortcuts
- * are taken, but the code works.
- */
- if ((object->type == OBJT_DEVICE) &&
- ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
+ if (((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
int i;
vm_page_t m[1];
int npdes;
@@ -2089,7 +2080,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
pde = pmap_pde(pmap, addr);
if (pde != 0 && (*pde & PG_V) != 0)
- goto unlock_return;
+ return;
retry:
p = vm_page_lookup(object, pindex);
if (p != NULL) {
@@ -2099,14 +2090,14 @@ retry:
} else {
p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
if (p == NULL)
- goto unlock_return;
+ return;
m[0] = p;
if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
- goto unlock_return;
+ return;
}
p = vm_page_lookup(object, pindex);
@@ -2116,9 +2107,8 @@ retry:
vm_page_unlock_queues();
ptepa = VM_PAGE_TO_PHYS(p);
- if (ptepa & (NBPDR - 1)) {
- goto unlock_return;
- }
+ if (ptepa & (NBPDR - 1))
+ return;
p->valid = VM_PAGE_BITS_ALL;
@@ -2130,65 +2120,7 @@ retry:
pde++;
}
pmap_invalidate_all(kernel_pmap);
- goto unlock_return;
- }
-
- psize = amd64_btop(size);
-
- if ((object->type != OBJT_VNODE) ||
- ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
- (object->resident_page_count > MAX_INIT_PT))) {
- goto unlock_return;
- }
-
- if (psize + pindex > object->size) {
- if (object->size < pindex)
- goto unlock_return;
- psize = object->size - pindex;
- }
-
- mpte = NULL;
-
- if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
- if (p->pindex < pindex) {
- p = vm_page_splay(pindex, object->root);
- if ((object->root = p)->pindex < pindex)
- p = TAILQ_NEXT(p, listq);
- }
- }
- /*
- * Assert: the variable p is either (1) the page with the
- * least pindex greater than or equal to the parameter pindex
- * or (2) NULL.
- */
- for (;
- p != NULL && (tmpidx = p->pindex - pindex) < psize;
- p = TAILQ_NEXT(p, listq)) {
- /*
- * don't allow an madvise to blow away our really
- * free pages allocating pv entries.
- */
- if ((limit & MAP_PREFAULT_MADVISE) &&
- cnt.v_free_count < cnt.v_free_reserved) {
- break;
- }
- vm_page_lock_queues();
- if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
- (p->busy == 0) &&
- (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
- if ((p->queue - p->pc) == PQ_CACHE)
- vm_page_deactivate(p);
- vm_page_busy(p);
- vm_page_unlock_queues();
- mpte = pmap_enter_quick(pmap,
- addr + amd64_ptob(tmpidx), p, mpte);
- vm_page_lock_queues();
- vm_page_wakeup(p);
- }
- vm_page_unlock_queues();
}
-unlock_return:
- VM_OBJECT_UNLOCK(object);
}
/*
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index f2da703..8966010 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2200,30 +2200,22 @@ pmap_kenter_temporary(vm_offset_t pa, int i)
return ((void *)crashdumpmap);
}
-#define MAX_INIT_PT (96)
/*
- * pmap_object_init_pt preloads the ptes for a given object
- * into the specified pmap. This eliminates the blast of soft
- * faults on process startup and immediately after an mmap.
+ * This code maps large physical mmap regions into the
+ * processor address space. Note that some shortcuts
+ * are taken, but the code works.
*/
void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex,
- vm_size_t size, int limit)
+ vm_size_t size)
{
- vm_offset_t tmpidx;
- int psize;
- vm_page_t p, mpte;
+ vm_page_t p;
- if (pmap == NULL || object == NULL)
- return;
- VM_OBJECT_LOCK(object);
- /*
- * This code maps large physical mmap regions into the
- * processor address space. Note that some shortcuts
- * are taken, but the code works.
- */
- if (pseflag && (object->type == OBJT_DEVICE) &&
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
+ if (pseflag &&
((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
int i;
vm_page_t m[1];
@@ -2232,7 +2224,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
pd_entry_t ptepa;
if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
- goto unlock_return;
+ return;
retry:
p = vm_page_lookup(object, pindex);
if (p != NULL) {
@@ -2242,14 +2234,14 @@ retry:
} else {
p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
if (p == NULL)
- goto unlock_return;
+ return;
m[0] = p;
if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
- goto unlock_return;
+ return;
}
p = vm_page_lookup(object, pindex);
@@ -2259,9 +2251,8 @@ retry:
vm_page_unlock_queues();
ptepa = VM_PAGE_TO_PHYS(p);
- if (ptepa & (NBPDR - 1)) {
- goto unlock_return;
- }
+ if (ptepa & (NBPDR - 1))
+ return;
p->valid = VM_PAGE_BITS_ALL;
@@ -2274,67 +2265,7 @@ retry:
ptepindex += 1;
}
pmap_invalidate_all(kernel_pmap);
- goto unlock_return;
- }
-
- psize = i386_btop(size);
-
- if ((object->type != OBJT_VNODE) ||
- ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
- (object->resident_page_count > MAX_INIT_PT))) {
- goto unlock_return;
- }
-
- if (psize + pindex > object->size) {
- if (object->size < pindex)
- goto unlock_return;
- psize = object->size - pindex;
- }
-
- mpte = NULL;
-
- if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
- if (p->pindex < pindex) {
- p = vm_page_splay(pindex, object->root);
- if ((object->root = p)->pindex < pindex)
- p = TAILQ_NEXT(p, listq);
- }
- }
- /*
- * Assert: the variable p is either (1) the page with the
- * least pindex greater than or equal to the parameter pindex
- * or (2) NULL.
- */
- for (;
- p != NULL && (tmpidx = p->pindex - pindex) < psize;
- p = TAILQ_NEXT(p, listq)) {
- /*
- * don't allow an madvise to blow away our really
- * free pages allocating pv entries.
- */
- if ((limit & MAP_PREFAULT_MADVISE) &&
- cnt.v_free_count < cnt.v_free_reserved) {
- break;
- }
- vm_page_lock_queues();
- if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
- (p->busy == 0) &&
- (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
- if ((p->queue - p->pc) == PQ_CACHE)
- vm_page_deactivate(p);
- vm_page_busy(p);
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
- mpte = pmap_enter_quick(pmap,
- addr + i386_ptob(tmpidx), p, mpte);
- VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
- vm_page_wakeup(p);
- }
- vm_page_unlock_queues();
}
-unlock_return:
- VM_OBJECT_UNLOCK(object);
}
/*
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index e866e6f..42204d7 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1751,7 +1751,6 @@ pmap_kenter_temporary(vm_offset_t pa, int i)
return (void *) IA64_PHYS_TO_RR7(pa - (i * PAGE_SIZE));
}
-#define MAX_INIT_PT (96)
/*
* pmap_object_init_pt preloads the ptes for a given object
* into the specified pmap. This eliminates the blast of soft
@@ -1760,113 +1759,12 @@ pmap_kenter_temporary(vm_offset_t pa, int i)
void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex,
- vm_size_t size, int limit)
+ vm_size_t size)
{
- vm_offset_t tmpidx;
- int psize;
- vm_page_t p;
- int objpgs;
- if (pmap == NULL || object == NULL)
- return;
- VM_OBJECT_LOCK(object);
- psize = ia64_btop(size);
-
- if ((object->type != OBJT_VNODE) ||
- ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
- (object->resident_page_count > MAX_INIT_PT))) {
- goto unlock_return;
- }
-
- if (psize + pindex > object->size) {
- if (object->size < pindex)
- goto unlock_return;
- psize = object->size - pindex;
- }
-
- /*
- * if we are processing a major portion of the object, then scan the
- * entire thing.
- */
- if (psize > (object->resident_page_count >> 2)) {
- objpgs = psize;
-
- for (p = TAILQ_FIRST(&object->memq);
- ((objpgs > 0) && (p != NULL));
- p = TAILQ_NEXT(p, listq)) {
-
- tmpidx = p->pindex;
- if (tmpidx < pindex) {
- continue;
- }
- tmpidx -= pindex;
- if (tmpidx >= psize) {
- continue;
- }
- /*
- * don't allow an madvise to blow away our really
- * free pages allocating pv entries.
- */
- if ((limit & MAP_PREFAULT_MADVISE) &&
- cnt.v_free_count < cnt.v_free_reserved) {
- break;
- }
- vm_page_lock_queues();
- if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
- (p->busy == 0) &&
- (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
- if ((p->queue - p->pc) == PQ_CACHE)
- vm_page_deactivate(p);
- vm_page_busy(p);
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
- pmap_enter_quick(pmap,
- addr + ia64_ptob(tmpidx), p,
- NULL);
- VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
- vm_page_wakeup(p);
- }
- vm_page_unlock_queues();
- objpgs -= 1;
- }
- } else {
- /*
- * else lookup the pages one-by-one.
- */
- for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
- /*
- * don't allow an madvise to blow away our really
- * free pages allocating pv entries.
- */
- if ((limit & MAP_PREFAULT_MADVISE) &&
- cnt.v_free_count < cnt.v_free_reserved) {
- break;
- }
- p = vm_page_lookup(object, tmpidx + pindex);
- if (p == NULL)
- continue;
- vm_page_lock_queues();
- if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
- (p->busy == 0) &&
- (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
- if ((p->queue - p->pc) == PQ_CACHE)
- vm_page_deactivate(p);
- vm_page_busy(p);
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
- pmap_enter_quick(pmap,
- addr + ia64_ptob(tmpidx), p,
- NULL);
- VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
- vm_page_wakeup(p);
- }
- vm_page_unlock_queues();
- }
- }
-unlock_return:
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
}
/*
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 35f2a8e..1249adb 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1237,12 +1237,14 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
void
pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
- vm_pindex_t pindex, vm_size_t size, int limit)
+ vm_pindex_t pindex, vm_size_t size)
{
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_remove_pages: non current pmap"));
- /* XXX */
+ ("pmap_object_init_pt: non current pmap"));
}
/*
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 35f2a8e..1249adb 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -1237,12 +1237,14 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
void
pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
- vm_pindex_t pindex, vm_size_t size, int limit)
+ vm_pindex_t pindex, vm_size_t size)
{
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_remove_pages: non current pmap"));
- /* XXX */
+ ("pmap_object_init_pt: non current pmap"));
}
/*
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index 35f2a8e..1249adb 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -1237,12 +1237,14 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
void
pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
- vm_pindex_t pindex, vm_size_t size, int limit)
+ vm_pindex_t pindex, vm_size_t size)
{
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_remove_pages: non current pmap"));
- /* XXX */
+ ("pmap_object_init_pt: non current pmap"));
}
/*
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 947fd92..337982c 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1341,9 +1341,12 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
void
pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
- vm_pindex_t pindex, vm_size_t size, int limit)
+ vm_pindex_t pindex, vm_size_t size)
{
- /* XXX */
+
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
}
void
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index c623fda..af809bb 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -112,8 +112,7 @@ boolean_t pmap_is_modified(vm_page_t m);
boolean_t pmap_ts_referenced(vm_page_t m);
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
- vm_object_t object, vm_pindex_t pindex, vm_size_t size,
- int pagelimit);
+ vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
void pmap_page_protect(vm_page_t m, vm_prot_t prot);
void pmap_pinit(pmap_t);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 206162f..fdfb63c 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1270,6 +1270,11 @@ vm_map_submap(
}
/*
+ * The maximum number of pages to map
+ */
+#define MAX_INIT_PT 96
+
+/*
* vm_map_pmap_enter:
*
* Preload the mappings for the given object into the specified
@@ -1280,9 +1285,77 @@ void
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
{
+ vm_offset_t tmpidx;
+ int psize;
+ vm_page_t p, mpte;
+ if (object == NULL)
+ return;
mtx_lock(&Giant);
- pmap_object_init_pt(map->pmap, addr, object, pindex, size, flags);
+ VM_OBJECT_LOCK(object);
+ if (object->type == OBJT_DEVICE) {
+ pmap_object_init_pt(map->pmap, addr, object, pindex, size);
+ goto unlock_return;
+ }
+
+ psize = atop(size);
+
+ if (object->type != OBJT_VNODE ||
+ ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
+ (object->resident_page_count > MAX_INIT_PT))) {
+ goto unlock_return;
+ }
+
+ if (psize + pindex > object->size) {
+ if (object->size < pindex)
+ goto unlock_return;
+ psize = object->size - pindex;
+ }
+
+ mpte = NULL;
+
+ if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
+ if (p->pindex < pindex) {
+ p = vm_page_splay(pindex, object->root);
+ if ((object->root = p)->pindex < pindex)
+ p = TAILQ_NEXT(p, listq);
+ }
+ }
+ /*
+ * Assert: the variable p is either (1) the page with the
+ * least pindex greater than or equal to the parameter pindex
+ * or (2) NULL.
+ */
+ for (;
+ p != NULL && (tmpidx = p->pindex - pindex) < psize;
+ p = TAILQ_NEXT(p, listq)) {
+ /*
+ * don't allow an madvise to blow away our really
+ * free pages allocating pv entries.
+ */
+ if ((flags & MAP_PREFAULT_MADVISE) &&
+ cnt.v_free_count < cnt.v_free_reserved) {
+ break;
+ }
+ vm_page_lock_queues();
+ if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
+ (p->busy == 0) &&
+ (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
+ if ((p->queue - p->pc) == PQ_CACHE)
+ vm_page_deactivate(p);
+ vm_page_busy(p);
+ vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(object);
+ mpte = pmap_enter_quick(map->pmap,
+ addr + ptoa(tmpidx), p, mpte);
+ VM_OBJECT_LOCK(object);
+ vm_page_lock_queues();
+ vm_page_wakeup(p);
+ }
+ vm_page_unlock_queues();
+ }
+unlock_return:
+ VM_OBJECT_UNLOCK(object);
mtx_unlock(&Giant);
}
OpenPOWER on IntegriCloud