summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_fault.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-08-04 21:07:24 +0000
committerattilio <attilio@FreeBSD.org>2013-08-04 21:07:24 +0000
commit19b2ea9f815db5f4ef1071a79ee4f27a2a444a3f (patch)
tree66d78aa520f99833b11e6eca180f8fa7216b21f8 /sys/vm/vm_fault.c
parenta56cdf0d3470151f45842530ddf3cadef1d2819c (diff)
downloadFreeBSD-src-19b2ea9f815db5f4ef1071a79ee4f27a2a444a3f.zip
FreeBSD-src-19b2ea9f815db5f4ef1071a79ee4f27a2a444a3f.tar.gz
The page hold mechanism is fast but it has couple of fallouts:
- It does not let pages respect the LRU policy - It bloats the active/inactive queues of few pages Try to avoid it as much as possible with the long-term target to completely remove it. Use the soft-busy mechanism to protect page content accesses during short-term operations (like uiomove_fromphys()). After this change only vm_fault_quick_hold_pages() is still using the hold mechanism for page content access. There is an additional complexity there as the quick path cannot immediately access the page object to busy the page and the slow path cannot however busy more than one page a time (to avoid deadlocks). Fixing such primitive can bring to complete removal of the page hold mechanism. Sponsored by: EMC / Isilon storage division Discussed with: alc Reviewed by: jeff Tested by: pho
Diffstat (limited to 'sys/vm/vm_fault.c')
-rw-r--r--sys/vm/vm_fault.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index f7f1889..b482709 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -221,8 +221,8 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
if (map != kernel_map && KTRPOINT(td, KTR_FAULT))
ktrfault(vaddr, fault_type);
#endif
- result = vm_fault_hold(map, trunc_page(vaddr), fault_type, fault_flags,
- NULL);
+ result = vm_fault_handle(map, trunc_page(vaddr), fault_type,
+ fault_flags, NULL);
#ifdef KTRACE
if (map != kernel_map && KTRPOINT(td, KTR_FAULTEND))
ktrfaultend(result);
@@ -231,7 +231,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
}
int
-vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
+vm_fault_handle(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags, vm_page_t *m_hold)
{
vm_prot_t prot;
@@ -943,7 +943,10 @@ vnode_locked:
vm_page_activate(fs.m);
if (m_hold != NULL) {
*m_hold = fs.m;
- vm_page_hold(fs.m);
+ if (fault_flags & VM_FAULT_IOBUSY)
+ vm_page_io_start(fs.m);
+ else
+ vm_page_hold(fs.m);
}
vm_page_unlock(fs.m);
vm_page_wakeup(fs.m);
@@ -1145,7 +1148,7 @@ vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
* and hold these pages.
*/
for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE)
- if (*mp == NULL && vm_fault_hold(map, va, prot,
+ if (*mp == NULL && vm_fault_handle(map, va, prot,
VM_FAULT_NORMAL, mp) != KERN_SUCCESS)
goto error;
}
OpenPOWER on IntegriCloud