summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-08-25 21:21:29 +0000
committerkib <kib@FreeBSD.org>2014-08-25 21:21:29 +0000
commit4e198665c508b72fb221139ec91629bc98646b9c (patch)
treefa9db04e68e8e2aac37c5bb9cdba140527f7dcdf /sys/vm
parent35c4352f4cf19313edd692b31a703fbb51b80021 (diff)
downloadFreeBSD-src-4e198665c508b72fb221139ec91629bc98646b9c.zip
FreeBSD-src-4e198665c508b72fb221139ec91629bc98646b9c.tar.gz
MFC r270011:
Implement 'fast path' for the vm page fault handler. MFC r270387 (by alc): Relax one of the conditions for mapping a page on the fast path. Approved by: re (gjb)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c55
1 files changed, 51 insertions, 4 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 19c0eee..09f7423 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -237,6 +237,7 @@ vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int hardfault;
struct faultstate fs;
struct vnode *vp;
+ vm_page_t m;
int locked, error;
hardfault = 0;
@@ -290,6 +291,56 @@ RetryFault:;
goto RetryFault;
}
+ if (wired)
+ fault_type = prot | (fault_type & VM_PROT_COPY);
+
+ if (fs.vp == NULL /* avoid locked vnode leak */ &&
+ (fault_flags & (VM_FAULT_CHANGE_WIRING | VM_FAULT_DIRTY)) == 0 &&
+ /* avoid calling vm_object_set_writeable_dirty() */
+ ((prot & VM_PROT_WRITE) == 0 ||
+ fs.first_object->type != OBJT_VNODE ||
+ (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) {
+ VM_OBJECT_RLOCK(fs.first_object);
+ if ((prot & VM_PROT_WRITE) != 0 &&
+ fs.first_object->type == OBJT_VNODE &&
+ (fs.first_object->flags & OBJ_MIGHTBEDIRTY) == 0)
+ goto fast_failed;
+ m = vm_page_lookup(fs.first_object, fs.first_pindex);
+ /* A busy page can be mapped for read|execute access. */
+ if (m == NULL || ((prot & VM_PROT_WRITE) != 0 &&
+ vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL)
+ goto fast_failed;
+ result = pmap_enter(fs.map->pmap, vaddr, m, prot,
+ fault_type | PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED :
+ 0), 0);
+ if (result != KERN_SUCCESS)
+ goto fast_failed;
+ if (m_hold != NULL) {
+ *m_hold = m;
+ vm_page_lock(m);
+ vm_page_hold(m);
+ vm_page_unlock(m);
+ }
+ if ((fault_type & VM_PROT_WRITE) != 0 &&
+ (m->oflags & VPO_UNMANAGED) == 0) {
+ vm_page_dirty(m);
+ vm_pager_page_unswapped(m);
+ }
+ VM_OBJECT_RUNLOCK(fs.first_object);
+ if (!wired)
+ vm_fault_prefault(&fs, vaddr, 0, 0);
+ vm_map_lookup_done(fs.map, fs.entry);
+ curthread->td_ru.ru_minflt++;
+ return (KERN_SUCCESS);
+fast_failed:
+ if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {
+ VM_OBJECT_RUNLOCK(fs.first_object);
+ VM_OBJECT_WLOCK(fs.first_object);
+ }
+ } else {
+ VM_OBJECT_WLOCK(fs.first_object);
+ }
+
/*
* Make a reference to this object to prevent its disposal while we
* are messing with it. Once we have the reference, the map is free
@@ -300,15 +351,11 @@ RetryFault:;
* truncation operations) during I/O. This must be done after
* obtaining the vnode lock in order to avoid possible deadlocks.
*/
- VM_OBJECT_WLOCK(fs.first_object);
vm_object_reference_locked(fs.first_object);
vm_object_pip_add(fs.first_object, 1);
fs.lookup_still_valid = TRUE;
- if (wired)
- fault_type = prot | (fault_type & VM_PROT_COPY);
-
fs.first_m = NULL;
/*
OpenPOWER on IntegriCloud