summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2006-06-05 20:35:27 +0000
committeralc <alc@FreeBSD.org>2006-06-05 20:35:27 +0000
commitff4adb11fea6aec1b2e943f8d750e9b222b7c687 (patch)
tree22cfa4dc7fcc450f872692f11ffb813adfd588ae /sys/vm
parent2007942da5c954dc499909f31282b8b2f4f3b360 (diff)
downloadFreeBSD-src-ff4adb11fea6aec1b2e943f8d750e9b222b7c687.zip
FreeBSD-src-ff4adb11fea6aec1b2e943f8d750e9b222b7c687.tar.gz
Introduce the function pmap_enter_object(). It maps a sequence of resident
pages from the same object. Use it in vm_map_pmap_enter() to reduce the locking overhead of premapping objects. Reviewed by: tegge@
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h2
-rw-r--r--sys/vm/vm_map.c20
2 files changed, 17 insertions, 5 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 2bddd7b..937fc71 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -99,6 +99,8 @@ void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte);
+void pmap_enter_object(pmap_t pmap, vm_offset_t start,
+ vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va,
vm_prot_t prot);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 93a9863..5fa5673 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1437,9 +1437,9 @@ void
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
{
- vm_offset_t tmpidx;
+ vm_offset_t start, tmpidx;
int psize;
- vm_page_t p, mpte;
+ vm_page_t p, p_start;
boolean_t are_queues_locked;
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
@@ -1465,7 +1465,8 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
}
are_queues_locked = FALSE;
- mpte = NULL;
+ start = 0;
+ p_start = NULL;
if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
if (p->pindex < pindex) {
@@ -1493,16 +1494,25 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
+ if (p_start == NULL) {
+ start = addr + ptoa(tmpidx);
+ p_start = p;
+ }
if (!are_queues_locked) {
are_queues_locked = TRUE;
vm_page_lock_queues();
}
if (VM_PAGE_INQUEUE1(p, PQ_CACHE))
vm_page_deactivate(p);
- mpte = pmap_enter_quick(map->pmap,
- addr + ptoa(tmpidx), p, prot, mpte);
+ } else if (p_start != NULL) {
+ pmap_enter_object(map->pmap, start, addr +
+ ptoa(tmpidx), p_start, prot);
+ p_start = NULL;
}
}
+ if (p_start != NULL)
+ pmap_enter_object(map->pmap, start, addr + size, p_start,
+ prot);
if (are_queues_locked)
vm_page_unlock_queues();
unlock_return:
OpenPOWER on IntegriCloud