summaryrefslogtreecommitdiffstats
path: root/sys/alpha
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-12-15 19:55:05 +0000
committeralc <alc@FreeBSD.org>2004-12-15 19:55:05 +0000
commitede2fb9751ccd8ce53c764a80e9c92cf19817e80 (patch)
tree04c4387210403e2f15960fa9ce72542103f52ac1 /sys/alpha
parentb9999a1836b74c6d5710f20cd8f2e3e6e51ef007 (diff)
downloadFreeBSD-src-ede2fb9751ccd8ce53c764a80e9c92cf19817e80.zip
FreeBSD-src-ede2fb9751ccd8ce53c764a80e9c92cf19817e80.tar.gz
In the common case, pmap_enter_quick() completes without sleeping.
In such cases, the busying of the page and the unlocking of the containing object by vm_map_pmap_enter() and vm_fault_prefault() is unnecessary overhead. To eliminate this overhead, this change modifies pmap_enter_quick() so that it expects the object to be locked on entry and it assumes the responsibility for busying the page and unlocking the object if it must sleep. Note: alpha, amd64, i386 and ia64 are the only implementations optimized by this change; arm, powerpc, and sparc64 still conservatively busy the page and unlock the object within every pmap_enter_quick() call. Additionally, this change is the first case where we synchronize access to the page's PG_BUSY flag and busy field using the containing object's lock rather than the global page queues lock. (Modifications to the page's PG_BUSY flag and busy field have asserted both locks for several weeks, enabling an incremental transition.)
Diffstat (limited to 'sys/alpha')
-rw-r--r--sys/alpha/alpha/pmap.c43
1 files changed, 30 insertions, 13 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 789a93a..41a5558 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -153,6 +153,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
+#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/msgbuf.h>
#include <sys/vmmeter.h>
@@ -334,7 +335,7 @@ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va,
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va);
-static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex);
+static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
#ifdef SMP
@@ -1038,24 +1039,28 @@ pmap_pinit(pmap)
* mapped correctly.
*/
static vm_page_t
-_pmap_allocpte(pmap, ptepindex)
- pmap_t pmap;
- unsigned ptepindex;
+_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
{
pt_entry_t* pte;
vm_offset_t ptepa;
vm_page_t m;
+ KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
+ (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
+ ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
+
/*
* Find or fabricate a new pagetable page
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
- PMAP_UNLOCK(pmap);
- vm_page_unlock_queues();
- VM_WAIT;
- vm_page_lock_queues();
- PMAP_LOCK(pmap);
+ if (flags & M_WAITOK) {
+ PMAP_UNLOCK(pmap);
+ vm_page_unlock_queues();
+ VM_WAIT;
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+ }
/*
* Indicate the need to retry. While waiting, the page table
@@ -1082,7 +1087,8 @@ _pmap_allocpte(pmap, ptepindex)
pt_entry_t* l1pte = &pmap->pm_lev1[l1index];
pt_entry_t* l2map;
if (!pmap_pte_v(l1pte)) {
- if (_pmap_allocpte(pmap, NUSERLEV3MAPS + l1index) == NULL) {
+ if (_pmap_allocpte(pmap, NUSERLEV3MAPS + l1index,
+ flags) == NULL) {
--m->wire_count;
vm_page_free(m);
return (NULL);
@@ -1146,7 +1152,7 @@ retry:
* Here if the pte page isn't mapped, or if it has been
* deallocated.
*/
- m = _pmap_allocpte(pmap, ptepindex);
+ m = _pmap_allocpte(pmap, ptepindex, M_WAITOK);
if (m == NULL)
goto retry;
}
@@ -1842,9 +1848,20 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
}
mpte->wire_count++;
} else {
- mpte = _pmap_allocpte(pmap, ptepindex);
- if (mpte == NULL)
+ mpte = _pmap_allocpte(pmap, ptepindex,
+ M_NOWAIT);
+ if (mpte == NULL) {
+ PMAP_UNLOCK(pmap);
+ vm_page_busy(m);
+ vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(m->object);
+ VM_WAIT;
+ VM_OBJECT_LOCK(m->object);
+ vm_page_lock_queues();
+ vm_page_wakeup(m);
+ PMAP_LOCK(pmap);
goto retry;
+ }
}
}
} else {
OpenPOWER on IntegriCloud