diff options
Diffstat (limited to 'sys/vm/vm_object.c')
-rw-r--r-- | sys/vm/vm_object.c | 207 |
1 files changed, 105 insertions, 102 deletions
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index c219069..7c499ff 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$"); #include <sys/proc.h> /* for curproc, pageproc */ #include <sys/socket.h> #include <sys/resourcevar.h> +#include <sys/rwlock.h> #include <sys/vnode.h> #include <sys/vmmeter.h> #include <sys/sx.h> @@ -176,7 +177,7 @@ vm_object_zdtor(void *mem, int size, void *arg) ("object %p has reservations", object)); #endif - KASSERT(object->cached_page_count == 0, + KASSERT(vm_object_cache_is_empty(object), ("object %p has cached pages", object)); KASSERT(object->paging_in_progress == 0, @@ -194,8 +195,8 @@ vm_object_zinit(void *mem, int size, int flags) vm_object_t object; object = (vm_object_t)mem; - bzero(&object->mtx, sizeof(object->mtx)); - mtx_init(&object->mtx, "vm object", NULL, MTX_DEF | MTX_DUPOK); + bzero(&object->lock, sizeof(object->lock)); + rw_init_flags(&object->lock, "vm object", RW_DUPOK); /* These are true for any object that has been freed */ object->root = NULL; @@ -266,7 +267,7 @@ vm_object_init(void) TAILQ_INIT(&vm_object_list); mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); - mtx_init(&kernel_object->mtx, "vm object", "kernel object", MTX_DEF); + rw_init(&kernel_object->lock, "kernel vm object"); _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kernel_object); #if VM_NRESERVLEVEL > 0 @@ -274,7 +275,7 @@ vm_object_init(void) kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); #endif - mtx_init(&kmem_object->mtx, "vm object", "kmem object", MTX_DEF); + rw_init(&kmem_object->lock, "kmem vm object"); _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kmem_object); #if VM_NRESERVLEVEL > 0 @@ -300,7 +301,7 @@ void vm_object_clear_flag(vm_object_t object, u_short bits) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); object->flags &= ~bits; } @@ -317,7 +318,7 @@ int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); switch (object->type) { case OBJT_DEFAULT: case OBJT_DEVICE: @@ -343,7 +344,7 @@ void vm_object_pip_add(vm_object_t object, short i) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); object->paging_in_progress += i; } @@ -351,7 +352,7 @@ void vm_object_pip_subtract(vm_object_t object, short i) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); object->paging_in_progress -= i; } @@ -359,7 +360,7 @@ void vm_object_pip_wakeup(vm_object_t object) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); object->paging_in_progress--; if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { vm_object_clear_flag(object, OBJ_PIPWNT); @@ -371,7 +372,7 @@ void vm_object_pip_wakeupn(vm_object_t object, short i) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); if (i) object->paging_in_progress -= i; if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { @@ -384,7 +385,7 @@ void vm_object_pip_wait(vm_object_t object, char *waitid) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); while (object->paging_in_progress) { object->flags |= OBJ_PIPWNT; VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); @@ -418,9 +419,9 @@ vm_object_reference(vm_object_t object) { if (object == NULL) return; - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); vm_object_reference_locked(object); - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); } /* @@ -435,7 +436,7 @@ vm_object_reference_locked(vm_object_t object) { struct vnode *vp; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); object->ref_count++; if (object->type == OBJT_VNODE) { vp = object->handle; @@ -451,7 +452,7 @@ vm_object_vndeallocate(vm_object_t object) { struct vnode *vp = (struct vnode *) object->handle; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_VNODE, ("vm_object_vndeallocate: not a vnode object")); KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); @@ -464,23 +465,23 @@ vm_object_vndeallocate(vm_object_t object) if (object->ref_count > 1) { object->ref_count--; - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); /* vrele may need the vnode lock. */ vrele(vp); } else { vhold(vp); - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vdrop(vp); - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); object->ref_count--; if (object->type == OBJT_DEAD) { - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); VOP_UNLOCK(vp, 0); } else { if (object->ref_count == 0) VOP_UNSET_TEXT(vp); - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); vput(vp); } } @@ -503,7 +504,7 @@ vm_object_deallocate(vm_object_t object) vm_object_t temp; while (object != NULL) { - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); if (object->type == OBJT_VNODE) { vm_object_vndeallocate(object); return; @@ -520,7 +521,7 @@ vm_object_deallocate(vm_object_t object) */ object->ref_count--; if (object->ref_count > 1) { - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); return; } else if (object->ref_count == 1) { if (object->shadow_count == 0 && @@ -539,12 +540,12 @@ vm_object_deallocate(vm_object_t object) ("vm_object_deallocate: ref_count: %d, shadow_count: %d", object->ref_count, object->shadow_count)); - if (!VM_OBJECT_TRYLOCK(robject)) { + if (!VM_OBJECT_TRYWLOCK(robject)) { /* * Avoid a potential deadlock. */ object->ref_count++; - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); /* * More likely than not the thread * holding robject's lock has lower @@ -568,27 +569,27 @@ vm_object_deallocate(vm_object_t object) robject->ref_count++; retry: if (robject->paging_in_progress) { - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); vm_object_pip_wait(robject, "objde1"); temp = robject->backing_object; if (object == temp) { - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); goto retry; } } else if (object->paging_in_progress) { - VM_OBJECT_UNLOCK(robject); + VM_OBJECT_WUNLOCK(robject); object->flags |= OBJ_PIPWNT; VM_OBJECT_SLEEP(object, object, PDROP | PVM, "objde2", 0); - VM_OBJECT_LOCK(robject); + VM_OBJECT_WLOCK(robject); temp = robject->backing_object; if (object == temp) { - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); goto retry; } } else - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); if (robject->ref_count == 1) { robject->ref_count--; @@ -597,21 +598,21 @@ retry: } object = robject; vm_object_collapse(object); - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); continue; } - VM_OBJECT_UNLOCK(robject); + VM_OBJECT_WUNLOCK(robject); } - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); return; } doterm: temp = object->backing_object; if (temp != NULL) { - VM_OBJECT_LOCK(temp); + VM_OBJECT_WLOCK(temp); LIST_REMOVE(object, shadow_list); temp->shadow_count--; - VM_OBJECT_UNLOCK(temp); + VM_OBJECT_WUNLOCK(temp); object->backing_object = NULL; } /* @@ -622,7 +623,7 @@ doterm: if ((object->flags & OBJ_DEAD) == 0) vm_object_terminate(object); else - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); object = temp; } } @@ -678,7 +679,7 @@ vm_object_terminate(vm_object_t object) u_int exhausted; int n, i; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); /* * Make sure no one uses us. @@ -704,11 +705,11 @@ vm_object_terminate(vm_object_t object) * Clean pages and flush buffers. */ vm_object_page_clean(object, 0, 0, OBJPC_SYNC); - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); vinvalbuf(vp, V_SAVE, 0, 0); - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); } KASSERT(object->ref_count == 0, @@ -793,12 +794,14 @@ vm_object_terminate(vm_object_t object) if (__predict_false(!LIST_EMPTY(&object->rvq))) vm_reserv_break_all(object); #endif + if (__predict_false(!vm_object_cache_is_empty(object))) + vm_page_cache_free(object, 0, 0); /* * Let the pager know object is dead. */ vm_pager_deallocate(object); - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); vm_object_destroy(object); } @@ -854,7 +857,7 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, int curgeneration, n, pagerflags; boolean_t clearobjflags, eio, res; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_VNODE, ("Not a vnode object")); if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || object->resident_page_count == 0) @@ -940,7 +943,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, int count, i, mreq, runlen; vm_page_lock_assert(p, MA_NOTOWNED); - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); count = 1; mreq = 0; @@ -998,11 +1001,11 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, return (TRUE); res = TRUE; error = 0; - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); while ((backing_object = object->backing_object) != NULL) { - VM_OBJECT_LOCK(backing_object); + VM_OBJECT_WLOCK(backing_object); offset += object->backing_object_offset; - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); object = backing_object; if (object->size < OFF_TO_IDX(offset + size)) size = IDX_TO_OFF(object->size) - offset; @@ -1022,7 +1025,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, if (object->type == OBJT_VNODE && (object->flags & OBJ_MIGHTBEDIRTY) != 0) { vp = object->handle; - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); (void) vn_start_write(vp, &mp, V_WAIT); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (syncio && !invalidate && offset == 0 && @@ -1040,17 +1043,17 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; fsync_after = FALSE; } - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); res = vm_object_page_clean(object, offset, offset + size, flags); - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); if (fsync_after) error = VOP_FSYNC(vp, MNT_WAIT, curthread); VOP_UNLOCK(vp, 0); vn_finished_write(mp); if (error != 0) res = FALSE; - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); } if ((object->type == OBJT_VNODE || object->type == OBJT_DEVICE) && invalidate) { @@ -1068,7 +1071,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, vm_object_page_remove(object, OFF_TO_IDX(offset), OFF_TO_IDX(offset + size + PAGE_MASK), flags); } - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); return (res); } @@ -1103,7 +1106,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, if (object == NULL) return; - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); /* * Locate and adjust resident pages */ @@ -1144,10 +1147,10 @@ shadowlookup: backing_object = tobject->backing_object; if (backing_object == NULL) goto unlock_tobject; - VM_OBJECT_LOCK(backing_object); + VM_OBJECT_WLOCK(backing_object); tpindex += OFF_TO_IDX(tobject->backing_object_offset); if (tobject != object) - VM_OBJECT_UNLOCK(tobject); + VM_OBJECT_WUNLOCK(tobject); tobject = backing_object; goto shadowlookup; } else if (m->valid != VM_PAGE_BITS_ALL) @@ -1175,10 +1178,10 @@ shadowlookup: } vm_page_unlock(m); if (object != tobject) - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); m->oflags |= VPO_WANTED; VM_OBJECT_SLEEP(tobject, m, PDROP | PVM, "madvpo", 0); - VM_OBJECT_LOCK(object); + VM_OBJECT_WLOCK(object); goto relookup; } if (advise == MADV_WILLNEED) { @@ -1211,9 +1214,9 @@ shadowlookup: swap_pager_freespace(tobject, tpindex, 1); unlock_tobject: if (tobject != object) - VM_OBJECT_UNLOCK(tobject); + VM_OBJECT_WUNLOCK(tobject); } - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); } /* @@ -1241,15 +1244,15 @@ vm_object_shadow( * Don't create the new object if the old object isn't shared. */ if (source != NULL) { - VM_OBJECT_LOCK(source); + VM_OBJECT_WLOCK(source); if (source->ref_count == 1 && source->handle == NULL && (source->type == OBJT_DEFAULT || source->type == OBJT_SWAP)) { - VM_OBJECT_UNLOCK(source); + VM_OBJECT_WUNLOCK(source); return; } - VM_OBJECT_UNLOCK(source); + VM_OBJECT_WUNLOCK(source); } /* @@ -1274,7 +1277,7 @@ vm_object_shadow( */ result->backing_object_offset = *offset; if (source != NULL) { - VM_OBJECT_LOCK(source); + VM_OBJECT_WLOCK(source); LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); source->shadow_count++; #if VM_NRESERVLEVEL > 0 @@ -1282,7 +1285,7 @@ vm_object_shadow( result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER - 1)) - 1); #endif - VM_OBJECT_UNLOCK(source); + VM_OBJECT_WUNLOCK(source); } @@ -1316,7 +1319,7 @@ vm_object_split(vm_map_entry_t entry) return; if (orig_object->ref_count <= 1) return; - VM_OBJECT_UNLOCK(orig_object); + VM_OBJECT_WUNLOCK(orig_object); offidxstart = OFF_TO_IDX(entry->offset); size = atop(entry->end - entry->start); @@ -1331,17 +1334,17 @@ vm_object_split(vm_map_entry_t entry) * At this point, the new object is still private, so the order in * which the original and new objects are locked does not matter. */ - VM_OBJECT_LOCK(new_object); - VM_OBJECT_LOCK(orig_object); + VM_OBJECT_WLOCK(new_object); + VM_OBJECT_WLOCK(orig_object); source = orig_object->backing_object; if (source != NULL) { - VM_OBJECT_LOCK(source); + VM_OBJECT_WLOCK(source); if ((source->flags & OBJ_DEAD) != 0) { - VM_OBJECT_UNLOCK(source); - VM_OBJECT_UNLOCK(orig_object); - VM_OBJECT_UNLOCK(new_object); + VM_OBJECT_WUNLOCK(source); + VM_OBJECT_WUNLOCK(orig_object); + VM_OBJECT_WUNLOCK(new_object); vm_object_deallocate(new_object); - VM_OBJECT_LOCK(orig_object); + VM_OBJECT_WLOCK(orig_object); return; } LIST_INSERT_HEAD(&source->shadow_head, @@ -1349,7 +1352,7 @@ vm_object_split(vm_map_entry_t entry) source->shadow_count++; vm_object_reference_locked(source); /* for new_object */ vm_object_clear_flag(source, OBJ_ONEMAPPING); - VM_OBJECT_UNLOCK(source); + VM_OBJECT_WUNLOCK(source); new_object->backing_object_offset = orig_object->backing_object_offset + entry->offset; new_object->backing_object = source; @@ -1429,14 +1432,14 @@ retry: */ swap_pager_copy(orig_object, new_object, offidxstart, 0); } - VM_OBJECT_UNLOCK(orig_object); + VM_OBJECT_WUNLOCK(orig_object); TAILQ_FOREACH(m, &new_object->memq, listq) vm_page_wakeup(m); - VM_OBJECT_UNLOCK(new_object); + VM_OBJECT_WUNLOCK(new_object); entry->object.vm_object = new_object; entry->offset = 0LL; vm_object_deallocate(orig_object); - VM_OBJECT_LOCK(new_object); + VM_OBJECT_WLOCK(new_object); } #define OBSC_TEST_ALL_SHADOWED 0x0001 @@ -1455,8 +1458,8 @@ vm_object_backing_scan(vm_object_t object, int op) int color, i, n; int r = 1; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); + VM_OBJECT_ASSERT_WLOCKED(object->backing_object); backing_object = object->backing_object; backing_offset_index = OFF_TO_IDX(object->backing_object_offset); @@ -1563,12 +1566,12 @@ restart: continue; } else if (op & OBSC_COLLAPSE_WAIT) { if ((p->oflags & VPO_BUSY) || p->busy) { - VM_OBJECT_UNLOCK(object); + VM_OBJECT_WUNLOCK(object); p->oflags |= VPO_WANTED; VM_OBJECT_SLEEP(backing_object, p, PDROP | PVM, "vmocol", 0); - VM_OBJECT_LOCK(object); - VM_OBJECT_LOCK(backing_object); + VM_OBJECT_WLOCK(object); + VM_OBJECT_WLOCK(backing_object); /* * If we slept, anything could have * happened. Since the object is @@ -1688,8 +1691,8 @@ vm_object_qcollapse(vm_object_t object) { vm_object_t backing_object = object->backing_object; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); + VM_OBJECT_ASSERT_WLOCKED(backing_object); if (backing_object->ref_count != 1) return; @@ -1707,7 +1710,7 @@ vm_object_qcollapse(vm_object_t object) void vm_object_collapse(vm_object_t object) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); while (TRUE) { vm_object_t backing_object; @@ -1724,7 +1727,7 @@ vm_object_collapse(vm_object_t object) * we check the backing object first, because it is most likely * not collapsable. */ - VM_OBJECT_LOCK(backing_object); + VM_OBJECT_WLOCK(backing_object); if (backing_object->handle != NULL || (backing_object->type != OBJT_DEFAULT && backing_object->type != OBJT_SWAP) || @@ -1733,7 +1736,7 @@ vm_object_collapse(vm_object_t object) (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) || (object->flags & OBJ_DEAD)) { - VM_OBJECT_UNLOCK(backing_object); + VM_OBJECT_WUNLOCK(backing_object); break; } @@ -1742,7 +1745,7 @@ vm_object_collapse(vm_object_t object) backing_object->paging_in_progress != 0 ) { vm_object_qcollapse(object); - VM_OBJECT_UNLOCK(backing_object); + VM_OBJECT_WUNLOCK(backing_object); break; } /* @@ -1796,7 +1799,7 @@ vm_object_collapse(vm_object_t object) LIST_REMOVE(object, shadow_list); backing_object->shadow_count--; if (backing_object->backing_object) { - VM_OBJECT_LOCK(backing_object->backing_object); + VM_OBJECT_WLOCK(backing_object->backing_object); LIST_REMOVE(backing_object, shadow_list); LIST_INSERT_HEAD( &backing_object->backing_object->shadow_head, @@ -1804,7 +1807,7 @@ vm_object_collapse(vm_object_t object) /* * The shadow_count has not changed. */ - VM_OBJECT_UNLOCK(backing_object->backing_object); + VM_OBJECT_WUNLOCK(backing_object->backing_object); } object->backing_object = backing_object->backing_object; object->backing_object_offset += @@ -1820,7 +1823,7 @@ vm_object_collapse(vm_object_t object) KASSERT(backing_object->ref_count == 1, ( "backing_object %p was somehow re-referenced during collapse!", backing_object)); - VM_OBJECT_UNLOCK(backing_object); + VM_OBJECT_WUNLOCK(backing_object); vm_object_destroy(backing_object); object_collapses++; @@ -1834,7 +1837,7 @@ vm_object_collapse(vm_object_t object) if (object->resident_page_count != object->size && vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { - VM_OBJECT_UNLOCK(backing_object); + VM_OBJECT_WUNLOCK(backing_object); break; } @@ -1848,7 +1851,7 @@ vm_object_collapse(vm_object_t object) new_backing_object = backing_object->backing_object; if ((object->backing_object = new_backing_object) != NULL) { - VM_OBJECT_LOCK(new_backing_object); + VM_OBJECT_WLOCK(new_backing_object); LIST_INSERT_HEAD( &new_backing_object->shadow_head, object, @@ -1856,7 +1859,7 @@ vm_object_collapse(vm_object_t object) ); new_backing_object->shadow_count++; vm_object_reference_locked(new_backing_object); - VM_OBJECT_UNLOCK(new_backing_object); + VM_OBJECT_WUNLOCK(new_backing_object); object->backing_object_offset += backing_object->backing_object_offset; } @@ -1866,7 +1869,7 @@ vm_object_collapse(vm_object_t object) * its ref_count was at least 2, it will not vanish. */ backing_object->ref_count--; - VM_OBJECT_UNLOCK(backing_object); + VM_OBJECT_WUNLOCK(backing_object); object_bypasses++; } @@ -1913,7 +1916,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int i, n; int wirings; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_UNMANAGED) == 0 || (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, ("vm_object_page_remove: illegal options for object %p", object)); @@ -2036,7 +2039,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end) struct mtx *mtx, *new_mtx; vm_page_t p, next; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, ("vm_object_page_cache: illegal object %p", object)); if (object->resident_page_count == 0) @@ -2084,7 +2087,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) vm_pindex_t pindex; int rv; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); for (pindex = start; pindex < end; pindex++) { m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); @@ -2145,10 +2148,10 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, if (prev_object == NULL) return (TRUE); - VM_OBJECT_LOCK(prev_object); + VM_OBJECT_WLOCK(prev_object); if (prev_object->type != OBJT_DEFAULT && prev_object->type != OBJT_SWAP) { - VM_OBJECT_UNLOCK(prev_object); + VM_OBJECT_WUNLOCK(prev_object); return (FALSE); } @@ -2163,7 +2166,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, * pages not mapped to prev_entry may be in use anyway) */ if (prev_object->backing_object != NULL) { - VM_OBJECT_UNLOCK(prev_object); + VM_OBJECT_WUNLOCK(prev_object); return (FALSE); } @@ -2173,7 +2176,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, if ((prev_object->ref_count > 1) && (prev_object->size != next_pindex)) { - VM_OBJECT_UNLOCK(prev_object); + VM_OBJECT_WUNLOCK(prev_object); return (FALSE); } @@ -2227,7 +2230,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, if (next_pindex + next_size > prev_object->size) prev_object->size = next_pindex + next_size; - VM_OBJECT_UNLOCK(prev_object); + VM_OBJECT_WUNLOCK(prev_object); return (TRUE); } @@ -2235,7 +2238,7 @@ void vm_object_set_writeable_dirty(vm_object_t object) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); if (object->type != OBJT_VNODE) return; object->generation++; |