summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_object.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-03-09 03:19:53 +0000
committerattilio <attilio@FreeBSD.org>2013-03-09 03:19:53 +0000
commit76954ad68a25c559c6a8b2911674760afd4962f6 (patch)
tree80cdb7116c19e2e4f42aeed31a65f76a54db11df /sys/vm/vm_object.c
parent993799493c64eb0b9faeab971fbe4ecfe0214278 (diff)
parent16a80466e5837ad617b6b144297fd6069188b9b3 (diff)
downloadFreeBSD-src-76954ad68a25c559c6a8b2911674760afd4962f6.zip
FreeBSD-src-76954ad68a25c559c6a8b2911674760afd4962f6.tar.gz
Merge from vmcontention.
Diffstat (limited to 'sys/vm/vm_object.c')
-rw-r--r--sys/vm/vm_object.c207
1 files changed, 104 insertions, 103 deletions
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 606b605..255d919 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h> /* for curproc, pageproc */
#include <sys/socket.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/sx.h>
@@ -195,8 +196,8 @@ vm_object_zinit(void *mem, int size, int flags)
vm_object_t object;
object = (vm_object_t)mem;
- bzero(&object->mtx, sizeof(object->mtx));
- mtx_init(&object->mtx, "vm object", NULL, MTX_DEF | MTX_DUPOK);
+ bzero(&object->lock, sizeof(object->lock));
+ rw_init_flags(&object->lock, "vm object", RW_DUPOK);
/* These are true for any object that has been freed */
object->rtree.rt_root = 0;
@@ -267,7 +268,7 @@ vm_object_init(void)
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
- mtx_init(&kernel_object->mtx, "vm object", "kernel object", MTX_DEF);
+ rw_init(&kernel_object->lock, "kernel vm object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
#if VM_NRESERVLEVEL > 0
@@ -275,7 +276,7 @@ vm_object_init(void)
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
#endif
- mtx_init(&kmem_object->mtx, "vm object", "kmem object", MTX_DEF);
+ rw_init(&kmem_object->lock, "kmem vm object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
#if VM_NRESERVLEVEL > 0
@@ -303,7 +304,7 @@ void
vm_object_clear_flag(vm_object_t object, u_short bits)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->flags &= ~bits;
}
@@ -320,7 +321,7 @@ int
vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
switch (object->type) {
case OBJT_DEFAULT:
case OBJT_DEVICE:
@@ -346,7 +347,7 @@ void
vm_object_pip_add(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress += i;
}
@@ -354,7 +355,7 @@ void
vm_object_pip_subtract(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress -= i;
}
@@ -362,7 +363,7 @@ void
vm_object_pip_wakeup(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress--;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
@@ -374,7 +375,7 @@ void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (i)
object->paging_in_progress -= i;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
@@ -387,7 +388,7 @@ void
vm_object_pip_wait(vm_object_t object, char *waitid)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object, PVM, waitid, 0);
@@ -421,9 +422,9 @@ vm_object_reference(vm_object_t object)
{
if (object == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -438,7 +439,7 @@ vm_object_reference_locked(vm_object_t object)
{
struct vnode *vp;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->ref_count++;
if (object->type == OBJT_VNODE) {
vp = object->handle;
@@ -454,7 +455,7 @@ vm_object_vndeallocate(vm_object_t object)
{
struct vnode *vp = (struct vnode *) object->handle;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
@@ -467,23 +468,23 @@ vm_object_vndeallocate(vm_object_t object)
if (object->ref_count > 1) {
object->ref_count--;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/* vrele may need the vnode lock. */
vrele(vp);
} else {
vhold(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vdrop(vp);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->ref_count--;
if (object->type == OBJT_DEAD) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
} else {
if (object->ref_count == 0)
VOP_UNSET_TEXT(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vput(vp);
}
}
@@ -506,7 +507,7 @@ vm_object_deallocate(vm_object_t object)
vm_object_t temp;
while (object != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_VNODE) {
vm_object_vndeallocate(object);
return;
@@ -523,7 +524,7 @@ vm_object_deallocate(vm_object_t object)
*/
object->ref_count--;
if (object->ref_count > 1) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
} else if (object->ref_count == 1) {
if (object->shadow_count == 0 &&
@@ -542,12 +543,12 @@ vm_object_deallocate(vm_object_t object)
("vm_object_deallocate: ref_count: %d, shadow_count: %d",
object->ref_count,
object->shadow_count));
- if (!VM_OBJECT_TRYLOCK(robject)) {
+ if (!VM_OBJECT_TRYWLOCK(robject)) {
/*
* Avoid a potential deadlock.
*/
object->ref_count++;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* More likely than not the thread
* holding robject's lock has lower
@@ -571,27 +572,27 @@ vm_object_deallocate(vm_object_t object)
robject->ref_count++;
retry:
if (robject->paging_in_progress) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_object_pip_wait(robject,
"objde1");
temp = robject->backing_object;
if (object == temp) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
} else if (object->paging_in_progress) {
- VM_OBJECT_UNLOCK(robject);
+ VM_OBJECT_WUNLOCK(robject);
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object,
PDROP | PVM, "objde2", 0);
- VM_OBJECT_LOCK(robject);
+ VM_OBJECT_WLOCK(robject);
temp = robject->backing_object;
if (object == temp) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
} else
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (robject->ref_count == 1) {
robject->ref_count--;
@@ -600,21 +601,21 @@ retry:
}
object = robject;
vm_object_collapse(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
- VM_OBJECT_UNLOCK(robject);
+ VM_OBJECT_WUNLOCK(robject);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
doterm:
temp = object->backing_object;
if (temp != NULL) {
- VM_OBJECT_LOCK(temp);
+ VM_OBJECT_WLOCK(temp);
LIST_REMOVE(object, shadow_list);
temp->shadow_count--;
- VM_OBJECT_UNLOCK(temp);
+ VM_OBJECT_WUNLOCK(temp);
object->backing_object = NULL;
}
/*
@@ -625,7 +626,7 @@ doterm:
if ((object->flags & OBJ_DEAD) == 0)
vm_object_terminate(object);
else
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = temp;
}
}
@@ -677,7 +678,7 @@ vm_object_terminate(vm_object_t object)
{
vm_page_t p, p_next;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Make sure no one uses us.
@@ -703,11 +704,11 @@ vm_object_terminate(vm_object_t object)
* Clean pages and flush buffers.
*/
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vinvalbuf(vp, V_SAVE, 0, 0);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
KASSERT(object->ref_count == 0,
@@ -762,7 +763,7 @@ vm_object_terminate(vm_object_t object)
* Let the pager know object is dead.
*/
vm_pager_deallocate(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_object_destroy(object);
}
@@ -818,7 +819,7 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
int curgeneration, n, pagerflags;
boolean_t clearobjflags, eio, res;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
object->resident_page_count == 0)
@@ -904,7 +905,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
int count, i, mreq, runlen;
vm_page_lock_assert(p, MA_NOTOWNED);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
count = 1;
mreq = 0;
@@ -962,11 +963,11 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
return (TRUE);
res = TRUE;
error = 0;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
offset += object->backing_object_offset;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = backing_object;
if (object->size < OFF_TO_IDX(offset + size))
size = IDX_TO_OFF(object->size) - offset;
@@ -986,7 +987,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
if (object->type == OBJT_VNODE &&
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (syncio && !invalidate && offset == 0 &&
@@ -1004,17 +1005,17 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
fsync_after = FALSE;
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
res = vm_object_page_clean(object, offset, offset + size,
flags);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (fsync_after)
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
if (error != 0)
res = FALSE;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
if ((object->type == OBJT_VNODE ||
object->type == OBJT_DEVICE) && invalidate) {
@@ -1032,7 +1033,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
vm_object_page_remove(object, OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK), flags);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (res);
}
@@ -1067,7 +1068,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
if (object == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
/*
* Locate and adjust resident pages
*/
@@ -1108,10 +1109,10 @@ shadowlookup:
backing_object = tobject->backing_object;
if (backing_object == NULL)
goto unlock_tobject;
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
tpindex += OFF_TO_IDX(tobject->backing_object_offset);
if (tobject != object)
- VM_OBJECT_UNLOCK(tobject);
+ VM_OBJECT_WUNLOCK(tobject);
tobject = backing_object;
goto shadowlookup;
} else if (m->valid != VM_PAGE_BITS_ALL)
@@ -1139,10 +1140,10 @@ shadowlookup:
}
vm_page_unlock(m);
if (object != tobject)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
m->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(tobject, m, PDROP | PVM, "madvpo", 0);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto relookup;
}
if (advise == MADV_WILLNEED) {
@@ -1175,9 +1176,9 @@ shadowlookup:
swap_pager_freespace(tobject, tpindex, 1);
unlock_tobject:
if (tobject != object)
- VM_OBJECT_UNLOCK(tobject);
+ VM_OBJECT_WUNLOCK(tobject);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -1205,15 +1206,15 @@ vm_object_shadow(
* Don't create the new object if the old object isn't shared.
*/
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
if (source->ref_count == 1 &&
source->handle == NULL &&
(source->type == OBJT_DEFAULT ||
source->type == OBJT_SWAP)) {
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
return;
}
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
}
/*
@@ -1238,7 +1239,7 @@ vm_object_shadow(
*/
result->backing_object_offset = *offset;
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
source->shadow_count++;
#if VM_NRESERVLEVEL > 0
@@ -1246,7 +1247,7 @@ vm_object_shadow(
result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
((1 << (VM_NFREEORDER - 1)) - 1);
#endif
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
}
@@ -1277,7 +1278,7 @@ vm_object_split(vm_map_entry_t entry)
return;
if (orig_object->ref_count <= 1)
return;
- VM_OBJECT_UNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(orig_object);
offidxstart = OFF_TO_IDX(entry->offset);
size = atop(entry->end - entry->start);
@@ -1292,17 +1293,17 @@ vm_object_split(vm_map_entry_t entry)
* At this point, the new object is still private, so the order in
* which the original and new objects are locked does not matter.
*/
- VM_OBJECT_LOCK(new_object);
- VM_OBJECT_LOCK(orig_object);
+ VM_OBJECT_WLOCK(new_object);
+ VM_OBJECT_WLOCK(orig_object);
source = orig_object->backing_object;
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
if ((source->flags & OBJ_DEAD) != 0) {
- VM_OBJECT_UNLOCK(source);
- VM_OBJECT_UNLOCK(orig_object);
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(source);
+ VM_OBJECT_WUNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(new_object);
vm_object_deallocate(new_object);
- VM_OBJECT_LOCK(orig_object);
+ VM_OBJECT_WLOCK(orig_object);
return;
}
LIST_INSERT_HEAD(&source->shadow_head,
@@ -1310,7 +1311,7 @@ vm_object_split(vm_map_entry_t entry)
source->shadow_count++;
vm_object_reference_locked(source); /* for new_object */
vm_object_clear_flag(source, OBJ_ONEMAPPING);
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
new_object->backing_object_offset =
orig_object->backing_object_offset + entry->offset;
new_object->backing_object = source;
@@ -1337,10 +1338,10 @@ retry:
* not be changed by this operation.
*/
if ((m->oflags & VPO_BUSY) || m->busy) {
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(new_object);
m->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(orig_object, m, PVM, "spltwt", 0);
- VM_OBJECT_LOCK(new_object);
+ VM_OBJECT_WLOCK(new_object);
goto retry;
}
#if VM_NRESERVLEVEL > 0
@@ -1384,14 +1385,14 @@ retry:
vm_page_cache_transfer(orig_object, offidxstart,
new_object);
}
- VM_OBJECT_UNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(orig_object);
TAILQ_FOREACH(m, &new_object->memq, listq)
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(new_object);
entry->object.vm_object = new_object;
entry->offset = 0LL;
vm_object_deallocate(orig_object);
- VM_OBJECT_LOCK(new_object);
+ VM_OBJECT_WLOCK(new_object);
}
#define OBSC_TEST_ALL_SHADOWED 0x0001
@@ -1406,8 +1407,8 @@ vm_object_backing_scan(vm_object_t object, int op)
vm_object_t backing_object;
vm_pindex_t backing_offset_index;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
backing_object = object->backing_object;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
@@ -1495,12 +1496,12 @@ vm_object_backing_scan(vm_object_t object, int op)
}
} else if (op & OBSC_COLLAPSE_WAIT) {
if ((p->oflags & VPO_BUSY) || p->busy) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
p->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(backing_object, p,
PDROP | PVM, "vmocol", 0);
- VM_OBJECT_LOCK(object);
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(object);
+ VM_OBJECT_WLOCK(backing_object);
/*
* If we slept, anything could have
* happened. Since the object is
@@ -1627,8 +1628,8 @@ vm_object_qcollapse(vm_object_t object)
{
vm_object_t backing_object = object->backing_object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ VM_OBJECT_ASSERT_WLOCKED(backing_object);
if (backing_object->ref_count != 1)
return;
@@ -1646,7 +1647,7 @@ vm_object_qcollapse(vm_object_t object)
void
vm_object_collapse(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
while (TRUE) {
vm_object_t backing_object;
@@ -1663,7 +1664,7 @@ vm_object_collapse(vm_object_t object)
* we check the backing object first, because it is most likely
* not collapsable.
*/
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
if (backing_object->handle != NULL ||
(backing_object->type != OBJT_DEFAULT &&
backing_object->type != OBJT_SWAP) ||
@@ -1672,7 +1673,7 @@ vm_object_collapse(vm_object_t object)
(object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP) ||
(object->flags & OBJ_DEAD)) {
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
@@ -1681,7 +1682,7 @@ vm_object_collapse(vm_object_t object)
backing_object->paging_in_progress != 0
) {
vm_object_qcollapse(object);
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
/*
@@ -1742,7 +1743,7 @@ vm_object_collapse(vm_object_t object)
LIST_REMOVE(object, shadow_list);
backing_object->shadow_count--;
if (backing_object->backing_object) {
- VM_OBJECT_LOCK(backing_object->backing_object);
+ VM_OBJECT_WLOCK(backing_object->backing_object);
LIST_REMOVE(backing_object, shadow_list);
LIST_INSERT_HEAD(
&backing_object->backing_object->shadow_head,
@@ -1750,7 +1751,7 @@ vm_object_collapse(vm_object_t object)
/*
* The shadow_count has not changed.
*/
- VM_OBJECT_UNLOCK(backing_object->backing_object);
+ VM_OBJECT_WUNLOCK(backing_object->backing_object);
}
object->backing_object = backing_object->backing_object;
object->backing_object_offset +=
@@ -1766,7 +1767,7 @@ vm_object_collapse(vm_object_t object)
KASSERT(backing_object->ref_count == 1, (
"backing_object %p was somehow re-referenced during collapse!",
backing_object));
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
vm_object_destroy(backing_object);
object_collapses++;
@@ -1780,7 +1781,7 @@ vm_object_collapse(vm_object_t object)
if (object->resident_page_count != object->size &&
vm_object_backing_scan(object,
OBSC_TEST_ALL_SHADOWED) == 0) {
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
@@ -1794,7 +1795,7 @@ vm_object_collapse(vm_object_t object)
new_backing_object = backing_object->backing_object;
if ((object->backing_object = new_backing_object) != NULL) {
- VM_OBJECT_LOCK(new_backing_object);
+ VM_OBJECT_WLOCK(new_backing_object);
LIST_INSERT_HEAD(
&new_backing_object->shadow_head,
object,
@@ -1802,7 +1803,7 @@ vm_object_collapse(vm_object_t object)
);
new_backing_object->shadow_count++;
vm_object_reference_locked(new_backing_object);
- VM_OBJECT_UNLOCK(new_backing_object);
+ VM_OBJECT_WUNLOCK(new_backing_object);
object->backing_object_offset +=
backing_object->backing_object_offset;
}
@@ -1812,7 +1813,7 @@ vm_object_collapse(vm_object_t object)
* its ref_count was at least 2, it will not vanish.
*/
backing_object->ref_count--;
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
object_bypasses++;
}
@@ -1855,7 +1856,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
vm_page_t p, next;
int wirings;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
("vm_object_page_remove: illegal options for object %p", object));
@@ -1950,7 +1951,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
struct mtx *mtx, *new_mtx;
vm_page_t p, next;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
("vm_object_page_cache: illegal object %p", object));
if (object->resident_page_count == 0)
@@ -1998,7 +1999,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
vm_pindex_t pindex;
int rv;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (pindex = start; pindex < end; pindex++) {
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
VM_ALLOC_RETRY);
@@ -2059,10 +2060,10 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (prev_object == NULL)
return (TRUE);
- VM_OBJECT_LOCK(prev_object);
+ VM_OBJECT_WLOCK(prev_object);
if (prev_object->type != OBJT_DEFAULT &&
prev_object->type != OBJT_SWAP) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2077,7 +2078,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
* pages not mapped to prev_entry may be in use anyway)
*/
if (prev_object->backing_object != NULL) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2087,7 +2088,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if ((prev_object->ref_count > 1) &&
(prev_object->size != next_pindex)) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2141,7 +2142,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (next_pindex + next_size > prev_object->size)
prev_object->size = next_pindex + next_size;
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (TRUE);
}
@@ -2149,7 +2150,7 @@ void
vm_object_set_writeable_dirty(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_VNODE)
return;
object->generation++;
OpenPOWER on IntegriCloud