summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-02-21 21:54:53 +0000
committerattilio <attilio@FreeBSD.org>2013-02-21 21:54:53 +0000
commit905e648d428b4f43651b120c5e7f4a9f46074308 (patch)
tree5ec5c4181bdee409366e113e2fb2016d8daa172b /sys
parent81a3802a87e56d02e5acb78c627a9c3125941921 (diff)
downloadFreeBSD-src-905e648d428b4f43651b120c5e7f4a9f46074308.zip
FreeBSD-src-905e648d428b4f43651b120c5e7f4a9f46074308.tar.gz
Hide the details for the assertion for VM_OBJECT_LOCK operations.
Rename current VM_OBJECT_LOCK_ASSERT(foo, RA_WLOCKED) into VM_OBJECT_ASSERT_WLOCKED(foo) Sponsored by: EMC / Isilon storage division Requested by: alc
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c12
-rw-r--r--sys/arm/arm/pmap-v6.c6
-rw-r--r--sys/arm/arm/pmap.c6
-rw-r--r--sys/cddl/compat/opensolaris/kern/opensolaris_vm.c2
-rw-r--r--sys/dev/agp/agp.c2
-rw-r--r--sys/dev/drm2/i915/i915_gem.c2
-rw-r--r--sys/i386/i386/pmap.c12
-rw-r--r--sys/i386/xen/pmap.c12
-rw-r--r--sys/ia64/ia64/pmap.c10
-rw-r--r--sys/kern/vfs_bio.c10
-rw-r--r--sys/kern/vfs_cluster.c5
-rw-r--r--sys/mips/mips/pmap.c10
-rw-r--r--sys/powerpc/aim/mmu_oea.c8
-rw-r--r--sys/powerpc/aim/mmu_oea64.c8
-rw-r--r--sys/powerpc/booke/pmap.c10
-rw-r--r--sys/sparc64/sparc64/pmap.c10
-rw-r--r--sys/vm/device_pager.c8
-rw-r--r--sys/vm/phys_pager.c2
-rw-r--r--sys/vm/sg_pager.c2
-rw-r--r--sys/vm/swap_pager.c22
-rw-r--r--sys/vm/vm_fault.c4
-rw-r--r--sys/vm/vm_object.c42
-rw-r--r--sys/vm/vm_object.h4
-rw-r--r--sys/vm/vm_page.c62
-rw-r--r--sys/vm/vm_pageout.c8
-rw-r--r--sys/vm/vm_pager.c2
-rw-r--r--sys/vm/vm_pager.h8
-rw-r--r--sys/vm/vm_reserv.c6
-rw-r--r--sys/vm/vnode_pager.c6
29 files changed, 150 insertions, 151 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c1ffe5c..7d63f0c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3493,7 +3493,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
pa = VM_PAGE_TO_PHYS(m);
newpte = (pt_entry_t)(pa | PG_A | PG_V);
if ((access & VM_PROT_WRITE) != 0)
@@ -3760,7 +3760,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
- VM_OBJECT_LOCK_ASSERT(m_start->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@@ -3942,7 +3942,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p, pdpg;
int pat_mode;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
@@ -4556,7 +4556,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -4687,7 +4687,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -4831,7 +4831,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index bfebcef..8650515 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -2213,7 +2213,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -3429,7 +3429,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@@ -3476,7 +3476,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) != 0 ||
(m->aflags & PGA_WRITEABLE) != 0)
pmap_clearbit(m, PVF_WRITE);
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 84a9cfd..7e0bc5a 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -3007,7 +3007,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -4462,7 +4462,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@@ -4524,7 +4524,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) != 0 ||
(m->aflags & PGA_WRITEABLE) != 0)
pmap_clearbit(m, PVF_WRITE);
diff --git a/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c b/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c
index a97e0bb..0851dbd 100644
--- a/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c
+++ b/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c
@@ -66,7 +66,7 @@ zfs_vmobject_assert_wlocked(vm_object_t object)
* be too helpful, but it must be an hard function for
* compatibility reasons.
*/
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
}
void
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index 9c943cc..2c3e4b9 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -623,7 +623,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
return 0;
bad:
mtx_unlock(&sc->as_lock);
- VM_OBJECT_LOCK_ASSERT(mem->am_obj, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(mem->am_obj);
for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
if (k >= i)
diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c
index 99eab7c..f3f82e7 100644
--- a/sys/dev/drm2/i915/i915_gem.c
+++ b/sys/dev/drm2/i915/i915_gem.c
@@ -2488,7 +2488,7 @@ i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
vm_page_t m;
int rv;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(object, pindex, NULL, NULL)) {
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index e4567df..764219f 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3457,7 +3457,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
mpte = NULL;
@@ -3711,7 +3711,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
- VM_OBJECT_LOCK_ASSERT(m_start->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@@ -3889,7 +3889,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p;
int pat_mode;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if (pseflag &&
@@ -4508,7 +4508,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -4643,7 +4643,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -4795,7 +4795,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index a662eaf..9a5aec0 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -2667,7 +2667,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
mpte = NULL;
@@ -2870,7 +2870,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
multicall_entry_t *mclp = mcl;
int error, count = 0;
- VM_OBJECT_LOCK_ASSERT(m_start->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@@ -3110,7 +3110,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p;
int pat_mode;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if (pseflag &&
@@ -3656,7 +3656,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@@ -3787,7 +3787,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -3888,7 +3888,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index f4bdb3f..c8aa9901 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1802,7 +1802,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m;
vm_pindex_t diff, psize;
- VM_OBJECT_LOCK_ASSERT(m_start->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
m = m_start;
rw_wlock(&pvh_global_lock);
@@ -1893,7 +1893,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -2211,7 +2211,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be dirty.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@@ -2295,7 +2295,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@@ -2373,7 +2373,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 12217e4..66da0d0 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -459,7 +459,7 @@ vfs_buf_test_cache(struct buf *bp,
vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (bp->b_flags & B_CACHE) {
int base = (foff + off) & PAGE_MASK;
if (vm_page_is_valid(m, base, size) == 0)
@@ -2534,7 +2534,7 @@ vfs_setdirty_locked_object(struct buf *bp)
int i;
object = bp->b_bufobj->bo_object;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* We qualify the scan for modified pages on whether the
@@ -3567,7 +3567,7 @@ vfs_drain_busy_pages(struct buf *bp)
vm_page_t m;
int i, last_busied;
- VM_OBJECT_LOCK_ASSERT(bp->b_bufobj->bo_object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
last_busied = 0;
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
@@ -3720,7 +3720,7 @@ vfs_bio_clrbuf(struct buf *bp)
if (bp->b_pages[0] == bogus_page)
goto unlock;
mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
- VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
if ((bp->b_pages[0]->valid & mask) == mask)
goto unlock;
if ((bp->b_pages[0]->valid & mask) == 0) {
@@ -3739,7 +3739,7 @@ vfs_bio_clrbuf(struct buf *bp)
continue;
j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
- VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
if ((bp->b_pages[i]->valid & mask) == mask)
continue;
if ((bp->b_pages[i]->valid & mask) == 0)
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 1660cb4..70937a2 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -413,8 +413,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
tinc = tsize;
if (toff + tinc > PAGE_SIZE)
tinc = PAGE_SIZE - toff;
- VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object,
- RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object);
if ((tbp->b_pages[j]->valid &
vm_page_bits(toff, tinc)) != 0)
break;
@@ -490,7 +489,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
*/
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (j = 0; j < bp->b_npages; j++) {
- VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object);
if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
bp->b_pages[j] = bogus_page;
}
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index d1eaea7..9e1b812 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -2399,7 +2399,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
- VM_OBJECT_LOCK_ASSERT(m_start->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@@ -2423,7 +2423,7 @@ void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -2768,7 +2768,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -2834,7 +2834,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_D set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -2882,7 +2882,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index aad9bd5..6a339ca 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1122,7 +1122,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
/* XXX change the pvo head for fake pages */
if ((m->oflags & VPO_UNMANAGED) != 0) {
@@ -1291,7 +1291,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_CHG set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -1331,7 +1331,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("moea_clear_modify: page %p is busy", m));
@@ -1366,7 +1366,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 5db1ac9..14b88f0 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1184,7 +1184,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
}
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
/* XXX change the pvo head for fake pages */
if ((m->oflags & VPO_UNMANAGED) != 0) {
@@ -1447,7 +1447,7 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have LPTE_CHG set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -1482,7 +1482,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea64_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("moea64_clear_modify: page %p is busy", m));
@@ -1515,7 +1515,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index c3c18da..0785e42 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -1562,7 +1562,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
("mmu_booke_enter_locked: user pmap, non user va"));
}
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1959,7 +1959,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -2174,7 +2174,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be modified.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@@ -2246,7 +2246,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("mmu_booke_clear_modify: page %p is busy", m));
@@ -2661,7 +2661,7 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("mmu_booke_object_init_pt: non-device object"));
}
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 399e53f..8bfc454 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1495,7 +1495,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
@@ -1662,7 +1662,7 @@ pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -2060,7 +2060,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no TTEs can have TD_W set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@@ -2128,7 +2128,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@@ -2183,7 +2183,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index fd38add..fd20664 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -207,7 +207,7 @@ void
cdev_pager_free_page(vm_object_t object, vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type == OBJT_MGTDEVICE) {
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
pmap_remove_all(m);
@@ -222,7 +222,7 @@ static void
dev_pager_free_page(vm_object_t object, vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->type == OBJT_DEVICE &&
(m->oflags & VPO_UNMANAGED) != 0),
("Managed device or page obj %p m %p", object, m));
@@ -259,11 +259,11 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
{
int error, i;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
error = object->un_pager.devp.ops->cdev_pg_fault(object,
IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]);
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (i = 0; i < count; i++) {
if (i != reqpage) {
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index 0dabac7..7b9f7b2 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -140,7 +140,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
int i;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (i = 0; i < count; i++) {
if (m[i]->valid == 0) {
if ((m[i]->flags & PG_ZERO) == 0)
diff --git a/sys/vm/sg_pager.c b/sys/vm/sg_pager.c
index ae2542a..76cae68 100644
--- a/sys/vm/sg_pager.c
+++ b/sys/vm/sg_pager.c
@@ -143,7 +143,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
size_t space;
int i;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
sg = object->handle;
memattr = object->memattr;
VM_OBJECT_WUNLOCK(object);
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 8d8660a..4505536 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -676,7 +676,7 @@ swap_pager_dealloc(vm_object_t object)
mtx_unlock(&sw_alloc_mtx);
}
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
vm_object_pip_wait(object, "swpdea");
/*
@@ -817,7 +817,7 @@ void
swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
swp_pager_meta_free(object, start, size);
}
@@ -885,8 +885,8 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
{
vm_pindex_t i;
- VM_OBJECT_LOCK_ASSERT(srcobject, RA_WLOCKED);
- VM_OBJECT_LOCK_ASSERT(dstobject, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(srcobject);
+ VM_OBJECT_ASSERT_WLOCKED(dstobject);
/*
* If destroysource is set, we remove the source object from the
@@ -989,7 +989,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *aft
{
daddr_t blk0;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* do we have good backing store at the requested index ?
*/
@@ -1060,7 +1060,7 @@ static void
swap_pager_unswapped(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
}
@@ -1654,7 +1654,7 @@ swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
int bcount;
int i;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return (0);
@@ -1810,7 +1810,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
struct swblock **pswap;
int idx;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Convert default object to swap object if necessary
*/
@@ -1908,7 +1908,7 @@ static void
swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return;
@@ -1954,7 +1954,7 @@ swp_pager_meta_free_all(vm_object_t object)
{
daddr_t index = 0;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return;
@@ -2013,7 +2013,7 @@ swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
daddr_t r1;
int idx;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* The meta data only exists of the object is OBJT_SWAP
* and even then might not be allocated yet.
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 23a0d6d..6c41c07 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -960,7 +960,7 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
vm_pindex_t pindex;
object = fs->object;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
first_object = fs->first_object;
if (first_object != object) {
if (!VM_OBJECT_TRYWLOCK(first_object)) {
@@ -1403,7 +1403,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t rtm;
int cbehind, cahead;
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
object = m->object;
pindex = m->pindex;
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 46c30b8..5fd0e64 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -301,7 +301,7 @@ void
vm_object_clear_flag(vm_object_t object, u_short bits)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->flags &= ~bits;
}
@@ -318,7 +318,7 @@ int
vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
switch (object->type) {
case OBJT_DEFAULT:
case OBJT_DEVICE:
@@ -344,7 +344,7 @@ void
vm_object_pip_add(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress += i;
}
@@ -352,7 +352,7 @@ void
vm_object_pip_subtract(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress -= i;
}
@@ -360,7 +360,7 @@ void
vm_object_pip_wakeup(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress--;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
@@ -372,7 +372,7 @@ void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (i)
object->paging_in_progress -= i;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
@@ -385,7 +385,7 @@ void
vm_object_pip_wait(vm_object_t object, char *waitid)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object, PVM, waitid, 0);
@@ -436,7 +436,7 @@ vm_object_reference_locked(vm_object_t object)
{
struct vnode *vp;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->ref_count++;
if (object->type == OBJT_VNODE) {
vp = object->handle;
@@ -452,7 +452,7 @@ vm_object_vndeallocate(vm_object_t object)
{
struct vnode *vp = (struct vnode *) object->handle;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
@@ -675,7 +675,7 @@ vm_object_terminate(vm_object_t object)
{
vm_page_t p, p_next;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Make sure no one uses us.
@@ -816,7 +816,7 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
int curgeneration, n, pagerflags;
boolean_t clearobjflags, eio, res;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
object->resident_page_count == 0)
@@ -902,7 +902,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
int count, i, mreq, runlen;
vm_page_lock_assert(p, MA_NOTOWNED);
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
count = 1;
mreq = 0;
@@ -1404,8 +1404,8 @@ vm_object_backing_scan(vm_object_t object, int op)
vm_object_t backing_object;
vm_pindex_t backing_offset_index;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
- VM_OBJECT_LOCK_ASSERT(object->backing_object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
backing_object = object->backing_object;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
@@ -1625,8 +1625,8 @@ vm_object_qcollapse(vm_object_t object)
{
vm_object_t backing_object = object->backing_object;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
- VM_OBJECT_LOCK_ASSERT(backing_object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ VM_OBJECT_ASSERT_WLOCKED(backing_object);
if (backing_object->ref_count != 1)
return;
@@ -1644,7 +1644,7 @@ vm_object_qcollapse(vm_object_t object)
void
vm_object_collapse(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
while (TRUE) {
vm_object_t backing_object;
@@ -1852,7 +1852,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
vm_page_t p, next;
int wirings;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
("vm_object_page_remove: illegal options for object %p", object));
@@ -1947,7 +1947,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
struct mtx *mtx, *new_mtx;
vm_page_t p, next;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
("vm_object_page_cache: illegal object %p", object));
if (object->resident_page_count == 0)
@@ -1995,7 +1995,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
vm_pindex_t pindex;
int rv;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (pindex = start; pindex < end; pindex++) {
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
VM_ALLOC_RETRY);
@@ -2146,7 +2146,7 @@ void
vm_object_set_writeable_dirty(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_VNODE)
return;
object->generation++;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 5440fd8..a40d360 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -204,8 +204,8 @@ extern struct vm_object kmem_object_store;
#define kernel_object (&kernel_object_store)
#define kmem_object (&kmem_object_store)
-#define VM_OBJECT_LOCK_ASSERT(object, type) \
- rw_assert(&(object)->lock, (type))
+#define VM_OBJECT_ASSERT_WLOCKED(object) \
+ rw_assert(&(object)->lock, RA_WLOCKED)
#define VM_OBJECT_LOCK_INIT(object, name) \
rw_init_flags(&(object)->lock, (name), RW_DUPOK)
#define VM_OBJECT_RLOCK(object) \
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 2ed65b6..18f1918 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -469,7 +469,7 @@ void
vm_page_busy(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("vm_page_busy: page already busy!!!"));
m->oflags |= VPO_BUSY;
@@ -484,7 +484,7 @@ void
vm_page_flash(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->oflags & VPO_WANTED) {
m->oflags &= ~VPO_WANTED;
wakeup(m);
@@ -502,7 +502,7 @@ void
vm_page_wakeup(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
m->oflags &= ~VPO_BUSY;
vm_page_flash(m);
@@ -512,7 +512,7 @@ void
vm_page_io_start(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
m->busy++;
}
@@ -520,7 +520,7 @@ void
vm_page_io_finish(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m));
m->busy--;
if (m->busy == 0)
@@ -752,7 +752,7 @@ void
vm_page_sleep(vm_page_t m, const char *msg)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (mtx_owned(vm_page_lockptr(m)))
vm_page_unlock(m);
@@ -867,7 +867,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
{
vm_page_t root;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (m->object != NULL)
panic("vm_page_insert: page already inserted");
@@ -943,7 +943,7 @@ vm_page_remove(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if ((object = m->object) == NULL)
return;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (m->oflags & VPO_BUSY) {
m->oflags &= ~VPO_BUSY;
vm_page_flash(m);
@@ -1017,7 +1017,7 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
{
vm_page_t m;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((m = object->root) != NULL && m->pindex != pindex) {
m = vm_page_splay(pindex, m);
if ((object->root = m)->pindex != pindex)
@@ -1039,7 +1039,7 @@ vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
{
vm_page_t m;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((m = TAILQ_FIRST(&object->memq)) != NULL) {
if (m->pindex < pindex) {
m = vm_page_splay(pindex, object->root);
@@ -1061,7 +1061,7 @@ vm_page_next(vm_page_t m)
{
vm_page_t next;
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((next = TAILQ_NEXT(m, listq)) != NULL &&
next->pindex != m->pindex + 1)
next = NULL;
@@ -1079,7 +1079,7 @@ vm_page_prev(vm_page_t m)
{
vm_page_t prev;
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
prev->pindex != m->pindex - 1)
prev = NULL;
@@ -1257,7 +1257,7 @@ vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
* requires the object to be locked. In contrast, removal does
* not.
*/
- VM_OBJECT_LOCK_ASSERT(new_object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(new_object);
KASSERT(new_object->cache == NULL,
("vm_page_cache_transfer: object %p has cached pages",
new_object));
@@ -1327,7 +1327,7 @@ vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
* page queues lock in order to prove that the specified page doesn't
* exist.
*/
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (__predict_true(object->cache == NULL))
return (FALSE);
mtx_lock(&vm_page_queue_free_mtx);
@@ -1376,7 +1376,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
("vm_page_alloc: inconsistent object/req"));
if (object != NULL)
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
req_class = req & VM_ALLOC_CLASS_MASK;
@@ -1583,7 +1583,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
("vm_page_alloc_contig: inconsistent object/req"));
if (object != NULL) {
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_PHYS,
("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
object));
@@ -1993,7 +1993,7 @@ vm_page_activate(vm_page_t m)
int queue;
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((queue = m->queue) != PQ_ACTIVE) {
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@@ -2277,7 +2277,7 @@ vm_page_try_to_cache(vm_page_t m)
{
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
@@ -2300,7 +2300,7 @@ vm_page_try_to_free(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if (m->object != NULL)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
@@ -2326,7 +2326,7 @@ vm_page_cache(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
m->hold_count || m->wire_count)
panic("vm_page_cache: attempting to cache busy page");
@@ -2483,7 +2483,7 @@ vm_page_dontneed(vm_page_t m)
int head;
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
dnw = PCPU_GET(dnweight);
PCPU_INC(dnweight);
@@ -2548,7 +2548,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
{
vm_page_t m;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
("vm_page_grab: VM_ALLOC_RETRY is required"));
retrylookup:
@@ -2629,7 +2629,7 @@ vm_page_set_valid_range(vm_page_t m, int base, int size)
{
int endoff, frag;
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (size == 0) /* handle degenerate case */
return;
@@ -2682,7 +2682,7 @@ vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
* write mapped, then the page's dirty field cannot possibly be
* set by a concurrent pmap operation.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
m->dirty &= ~pagebits;
else {
@@ -2736,7 +2736,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
vm_page_bits_t oldvalid, pagebits;
int endoff, frag;
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (size == 0) /* handle degenerate case */
return;
@@ -2826,7 +2826,7 @@ vm_page_set_invalid(vm_page_t m, int base, int size)
{
vm_page_bits_t bits;
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("vm_page_set_invalid: page %p is busy", m));
bits = vm_page_bits(base, size);
@@ -2855,7 +2855,7 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
int b;
int i;
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
/*
* Scan the valid bits looking for invalid sections that
* must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
@@ -2894,7 +2894,7 @@ vm_page_is_valid(vm_page_t m, int base, int size)
{
vm_page_bits_t bits;
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
bits = vm_page_bits(base, size);
if (m->valid && ((m->valid & bits) == bits))
return 1;
@@ -2909,7 +2909,7 @@ void
vm_page_test_dirty(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
vm_page_dirty(m);
}
@@ -2963,7 +2963,7 @@ vm_page_cowfault(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->paging_in_progress != 0,
("vm_page_cowfault: object %p's paging-in-progress count is zero.",
object));
@@ -3056,7 +3056,7 @@ vm_page_object_lock_assert(vm_page_t m)
* here.
*/
if (m->object != NULL && (m->oflags & VPO_BUSY) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
}
#endif
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index e87bc3a..c0a0da4 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -347,7 +347,7 @@ vm_pageout_clean(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
@@ -485,7 +485,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
int numpagedout = 0;
int i, runlen;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Initiate I/O. Bump the vm_page_t->busy counter and
@@ -714,13 +714,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
vm_page_t p;
int actcount, remove_mode;
- VM_OBJECT_LOCK_ASSERT(first_object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(first_object);
if ((first_object->flags & OBJ_FICTITIOUS) != 0)
return;
for (object = first_object;; object = backing_object) {
if (pmap_resident_count(pmap) <= desired)
goto unlock_return;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((object->flags & OBJ_UNMANAGED) != 0 ||
object->paging_in_progress != 0)
goto unlock_return;
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 9e4d76f..a991e41 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -249,7 +249,7 @@ vm_pager_deallocate(object)
vm_object_t object;
{
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
(*pagertab[object->type]->pgo_dealloc) (object);
}
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index eaf45a07..b5d923c 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -124,7 +124,7 @@ vm_pager_get_pages(
) {
int r;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
vm_page_zero_invalid(m[reqpage], TRUE);
@@ -141,7 +141,7 @@ vm_pager_put_pages(
int *rtvals
) {
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
(*pagertab[object->type]->pgo_putpages)
(object, m, count, flags, rtvals);
}
@@ -165,7 +165,7 @@ vm_pager_has_page(
) {
boolean_t ret;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
ret = (*pagertab[object->type]->pgo_haspage)
(object, offset, before, after);
return (ret);
@@ -188,7 +188,7 @@ static __inline void
vm_pager_page_unswapped(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (pagertab[m->object->type]->pgo_pageunswapped)
(*pagertab[m->object->type]->pgo_pageunswapped)(m);
}
diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c
index 869742e..98b0de2 100644
--- a/sys/vm/vm_reserv.c
+++ b/sys/vm/vm_reserv.c
@@ -312,7 +312,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
int i, index, n;
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
/*
@@ -496,7 +496,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
vm_reserv_t rv;
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Is a reservation fundamentally impossible?
@@ -871,7 +871,7 @@ vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
{
vm_reserv_t rv;
- VM_OBJECT_LOCK_ASSERT(new_object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(new_object);
rv = vm_reserv_from_page(m);
if (rv->object == old_object) {
mtx_lock(&vm_page_queue_free_mtx);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 7160cf4..99c4023 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -260,7 +260,7 @@ vnode_pager_dealloc(object)
if (vp == NULL)
panic("vnode_pager_dealloc: pager already dealloced");
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
vm_object_pip_wait(object, "vnpdea");
refs = object->ref_count;
@@ -300,7 +300,7 @@ vnode_pager_haspage(object, pindex, before, after)
int bsize;
int pagesperblock, blocksperpage;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
@@ -595,7 +595,7 @@ vnode_pager_input_old(object, m)
struct sf_buf *sf;
struct vnode *vp;
- VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
error = 0;
/*
OpenPOWER on IntegriCloud