summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_fault.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-12-20 22:49:31 +0000
committeralc <alc@FreeBSD.org>2010-12-20 22:49:31 +0000
commitbe5201b0d1c733f2bfb8fa4bef0225259f9acb2c (patch)
treef6dd1a0aa7e3213a5adcdd9bdd3123f868ee9dea /sys/vm/vm_fault.c
parent04f9658f5ccf72df9c57e4e218f11574121fc70f (diff)
downloadFreeBSD-src-be5201b0d1c733f2bfb8fa4bef0225259f9acb2c.zip
FreeBSD-src-be5201b0d1c733f2bfb8fa4bef0225259f9acb2c.tar.gz
Introduce vm_fault_hold() and use it to (1) eliminate a long-standing race
condition in proc_rwmem() and to (2) simplify the implementation of the cxgb driver's vm_fault_hold_user_pages(). Specifically, in proc_rwmem() the requested read or write could fail because the targeted page could be reclaimed between the calls to vm_fault() and vm_page_hold(). In collaboration with: kib@ MFC after: 6 weeks
Diffstat (limited to 'sys/vm/vm_fault.c')
-rw-r--r--sys/vm/vm_fault.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 11489a8..57b72e5 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -201,13 +201,20 @@ unlock_and_deallocate(struct faultstate *fs)
* KERN_SUCCESS is returned if the page fault is handled; otherwise,
* a standard error specifying why the fault is fatal is returned.
*
- *
* The map in question must be referenced, and remains so.
* Caller may hold no locks.
*/
int
vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
- int fault_flags)
+ int fault_flags)
+{
+
+ return (vm_fault_hold(map, vaddr, fault_type, fault_flags, NULL));
+}
+
+int
+vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
+ int fault_flags, vm_page_t *m_hold)
{
vm_prot_t prot;
int is_first_object_locked, result;
@@ -880,7 +887,8 @@ vnode_locked:
if (hardfault)
fs.entry->lastr = fs.pindex + faultcount - behind;
- if (prot & VM_PROT_WRITE) {
+ if ((prot & VM_PROT_WRITE) != 0 ||
+ (fault_flags & VM_FAULT_DIRTY) != 0) {
vm_object_set_writeable_dirty(fs.object);
/*
@@ -906,8 +914,9 @@ vnode_locked:
* Also tell the backing pager, if any, that it should remove
* any swap backing since the page is now dirty.
*/
- if ((fault_type & VM_PROT_WRITE) != 0 &&
- (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) {
+ if (((fault_type & VM_PROT_WRITE) != 0 &&
+ (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) ||
+ (fault_flags & VM_FAULT_DIRTY) != 0) {
vm_page_dirty(fs.m);
vm_pager_page_unswapped(fs.m);
}
@@ -949,6 +958,10 @@ vnode_locked:
vm_page_unwire(fs.m, 1);
} else
vm_page_activate(fs.m);
+ if (m_hold != NULL) {
+ *m_hold = fs.m;
+ vm_page_hold(fs.m);
+ }
vm_page_unlock(fs.m);
vm_page_wakeup(fs.m);
OpenPOWER on IntegriCloud