diff options
Diffstat (limited to 'sys/vm/vm_fault.c')
-rw-r--r-- | sys/vm/vm_fault.c | 79 |
1 files changed, 59 insertions, 20 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index f409856..2978f7f 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -137,9 +137,11 @@ release_page(struct faultstate *fs) { vm_page_wakeup(fs->m); + vm_page_lock(fs->m); vm_page_lock_queues(); vm_page_deactivate(fs->m); vm_page_unlock_queues(); + vm_page_unlock(fs->m); fs->m = NULL; } @@ -161,9 +163,11 @@ unlock_and_deallocate(struct faultstate *fs) VM_OBJECT_UNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_LOCK(fs->first_object); + vm_page_lock(fs->first_m); vm_page_lock_queues(); vm_page_free(fs->first_m); vm_page_unlock_queues(); + vm_page_unlock(fs->first_m); vm_object_pip_wakeup(fs->first_object); VM_OBJECT_UNLOCK(fs->first_object); fs->first_m = NULL; @@ -211,7 +215,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, { vm_prot_t prot; int is_first_object_locked, result; - boolean_t are_queues_locked, growstack, wired; + boolean_t growstack, wired; int map_generation; vm_object_t next_object; vm_page_t marray[VM_FAULT_READ]; @@ -305,12 +309,12 @@ RetryFault:; * removes the page from the backing object, * which is not what we want. */ + vm_page_lock(fs.m); vm_page_lock_queues(); if ((fs.m->cow) && (fault_type & VM_PROT_WRITE) && (fs.object == fs.first_object)) { vm_page_cowfault(fs.m); - vm_page_unlock_queues(); unlock_and_deallocate(&fs); goto RetryFault; } @@ -332,13 +336,22 @@ RetryFault:; * to pmap it. */ if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) { + /* + * Reference the page before unlocking and + * sleeping so that the page daemon is less + * likely to reclaim it. + */ + vm_page_flag_set(fs.m, PG_REFERENCED); vm_page_unlock_queues(); + vm_page_unlock(fs.m); VM_OBJECT_UNLOCK(fs.object); if (fs.object != fs.first_object) { VM_OBJECT_LOCK(fs.first_object); + vm_page_lock(fs.first_m); vm_page_lock_queues(); vm_page_free(fs.first_m); vm_page_unlock_queues(); + vm_page_unlock(fs.first_m); vm_object_pip_wakeup(fs.first_object); VM_OBJECT_UNLOCK(fs.first_object); fs.first_m = NULL; @@ -358,6 +371,7 @@ RetryFault:; } vm_pageq_remove(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); /* * Mark page busy for other processes, and the @@ -462,7 +476,6 @@ readrest: else firstpindex = fs.first_pindex - 2 * VM_FAULT_READ; - are_queues_locked = FALSE; /* * note: partially valid pages cannot be * included in the lookahead - NFS piecemeal @@ -479,22 +492,23 @@ readrest: if (mt->busy || (mt->oflags & VPO_BUSY)) continue; - if (!are_queues_locked) { - are_queues_locked = TRUE; - vm_page_lock_queues(); - } + vm_page_lock(mt); + vm_page_lock_queues(); if (mt->hold_count || - mt->wire_count) + mt->wire_count) { + vm_page_unlock_queues(); + vm_page_unlock(mt); continue; + } pmap_remove_all(mt); if (mt->dirty) { vm_page_deactivate(mt); } else { vm_page_cache(mt); } - } - if (are_queues_locked) vm_page_unlock_queues(); + vm_page_unlock(mt); + } ahead += behind; behind = 0; } @@ -623,17 +637,21 @@ vnode_locked: */ if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { + vm_page_lock(fs.m); vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); fs.m = NULL; unlock_and_deallocate(&fs); return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); } if (fs.object != fs.first_object) { + vm_page_lock(fs.m); vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); fs.m = NULL; /* * XXX - we cannot just fall out at this @@ -746,18 +764,22 @@ vnode_locked: * We don't chase down the shadow chain */ fs.object == fs.first_object->backing_object) { + vm_page_lock(fs.first_m); vm_page_lock_queues(); /* * get rid of the unnecessary page */ vm_page_free(fs.first_m); + vm_page_unlock_queues(); + vm_page_unlock(fs.first_m); /* * grab the page and put it into the * process'es object. The page is * automatically made dirty. */ + vm_page_lock(fs.m); vm_page_rename(fs.m, fs.first_object, fs.first_pindex); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); vm_page_busy(fs.m); fs.first_m = fs.m; fs.m = NULL; @@ -770,10 +792,13 @@ vnode_locked: fs.first_m->valid = VM_PAGE_BITS_ALL; if (wired && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) { - vm_page_lock_queues(); + vm_page_lock(fs.first_m); vm_page_wire(fs.first_m); + vm_page_unlock(fs.first_m); + + vm_page_lock(fs.m); vm_page_unwire(fs.m, FALSE); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); } /* * We no longer need the old page or object. @@ -923,8 +948,8 @@ vnode_locked: if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0) vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); VM_OBJECT_LOCK(fs.object); + vm_page_lock(fs.m); vm_page_lock_queues(); - vm_page_flag_set(fs.m, PG_REFERENCED); /* * If the page is not wired down, then put it where the pageout daemon @@ -939,6 +964,7 @@ vnode_locked: vm_page_activate(fs.m); } vm_page_unlock_queues(); + vm_page_unlock(fs.m); vm_page_wakeup(fs.m); /* @@ -1015,9 +1041,11 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) } if (m->valid == VM_PAGE_BITS_ALL && (m->flags & PG_FICTITIOUS) == 0) { + vm_page_lock(m); vm_page_lock_queues(); pmap_enter_quick(pmap, addr, m, entry->protection); vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(lobject); } @@ -1080,6 +1108,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, { vm_paddr_t pa; vm_offset_t va; + vm_page_t m; pmap_t pmap; pmap = vm_map_pmap(map); @@ -1093,9 +1122,10 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, if (pa != 0) { pmap_change_wiring(pmap, va, FALSE); if (!fictitious) { - vm_page_lock_queues(); - vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); - vm_page_unlock_queues(); + m = PHYS_TO_VM_PAGE(pa); + vm_page_lock(m); + vm_page_unwire(m, TRUE); + vm_page_unlock(m); } } } @@ -1238,13 +1268,22 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, * Mark it no longer busy, and put it on the active list. */ VM_OBJECT_LOCK(dst_object); - vm_page_lock_queues(); + if (upgrade) { + vm_page_lock(src_m); vm_page_unwire(src_m, 0); + vm_page_unlock(src_m); + + vm_page_lock(dst_m); vm_page_wire(dst_m); - } else + vm_page_unlock(dst_m); + } else { + vm_page_lock(dst_m); + vm_page_lock_queues(); vm_page_activate(dst_m); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(dst_m); + } vm_page_wakeup(dst_m); } VM_OBJECT_UNLOCK(dst_object); |