From 658534ed5a02db4fef5b0630008502474d6c26d6 Mon Sep 17 00:00:00 2001 From: attilio Date: Wed, 20 Feb 2013 10:38:34 +0000 Subject: Switch vm_object lock to be a rwlock. * VM_OBJECT_LOCK and VM_OBJECT_UNLOCK are mapped to write operations * VM_OBJECT_SLEEP() is introduced as a general purpose primitve to get a sleep operation using a VM_OBJECT_LOCK() as protection * The approach must bear with vm_pager.h namespace pollution so many files require including directly rwlock.h --- sys/vm/default_pager.c | 2 +- sys/vm/device_pager.c | 9 +++---- sys/vm/phys_pager.c | 3 ++- sys/vm/sg_pager.c | 3 ++- sys/vm/swap_pager.c | 25 +++++++++---------- sys/vm/uma_core.c | 3 ++- sys/vm/vm_fault.c | 6 ++--- sys/vm/vm_glue.c | 1 + sys/vm/vm_init.c | 2 +- sys/vm/vm_kern.c | 2 +- sys/vm/vm_map.c | 1 + sys/vm/vm_meter.c | 1 + sys/vm/vm_mmap.c | 1 + sys/vm/vm_object.c | 65 +++++++++++++++++++++++++------------------------- sys/vm/vm_object.h | 35 ++++++++++++++++----------- sys/vm/vm_page.c | 65 +++++++++++++++++++++++++------------------------- sys/vm/vm_pageout.c | 9 +++---- sys/vm/vm_pager.c | 3 ++- sys/vm/vm_pager.h | 8 +++---- sys/vm/vm_reserv.c | 7 +++--- sys/vm/vnode_pager.c | 11 +++++---- 21 files changed, 141 insertions(+), 121 deletions(-) (limited to 'sys/vm') diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c index 12dc823..16ee405 100644 --- a/sys/vm/default_pager.c +++ b/sys/vm/default_pager.c @@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include +#include #include #include diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c index 809c32c..ae68d15 100644 --- a/sys/vm/device_pager.c +++ b/sys/vm/device_pager.c @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -206,7 +207,7 @@ void cdev_pager_free_page(vm_object_t object, vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (object->type == OBJT_MGTDEVICE) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m)); pmap_remove_all(m); @@ -221,7 +222,7 @@ static void dev_pager_free_page(vm_object_t object, vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT((object->type == OBJT_DEVICE && (m->oflags & VPO_UNMANAGED) != 0), ("Managed device or page obj %p m %p", object, m)); @@ -258,11 +259,11 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage) { int error, i; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); error = object->un_pager.devp.ops->cdev_pg_fault(object, IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]); - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); for (i = 0; i < count; i++) { if (i != reqpage) { diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c index 0ffafea..f81a7d1 100644 --- a/sys/vm/phys_pager.c +++ b/sys/vm/phys_pager.c @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -139,7 +140,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) { int i; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); for (i = 0; i < count; i++) { if (m[i]->valid == 0) { if ((m[i]->flags & PG_ZERO) == 0) diff --git a/sys/vm/sg_pager.c b/sys/vm/sg_pager.c index 097039e..b78b032 100644 --- a/sys/vm/sg_pager.c +++ b/sys/vm/sg_pager.c @@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -142,7 +143,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) size_t space; int i; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); sg = object->handle; memattr = object->memattr; VM_OBJECT_UNLOCK(object); diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 44bff25..fcfefb5 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -89,6 +89,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -675,7 +676,7 @@ swap_pager_dealloc(vm_object_t object) mtx_unlock(&sw_alloc_mtx); } - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); vm_object_pip_wait(object, "swpdea"); /* @@ -816,7 +817,7 @@ void swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); swp_pager_meta_free(object, start, size); } @@ -884,8 +885,8 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, { vm_pindex_t i; - VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(srcobject, RA_WLOCKED); + VM_OBJECT_LOCK_ASSERT(dstobject, RA_WLOCKED); /* * If destroysource is set, we remove the source object from the @@ -988,7 +989,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *aft { daddr_t blk0; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); /* * do we have good backing store at the requested index ? */ @@ -1059,7 +1060,7 @@ static void swap_pager_unswapped(vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); } @@ -1213,7 +1214,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) while ((mreq->oflags & VPO_SWAPINPROG) != 0) { mreq->oflags |= VPO_WANTED; PCPU_INC(cnt.v_intrans); - if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) { + if (VM_OBJECT_SLEEP(mreq, object, PSWP, "swread", hz * 20)) { printf( "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n", bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); @@ -1653,7 +1654,7 @@ swap_pager_isswapped(vm_object_t object, struct swdevt *sp) int bcount; int i; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (object->type != OBJT_SWAP) return (0); @@ -1809,7 +1810,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk) struct swblock **pswap; int idx; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); /* * Convert default object to swap object if necessary */ @@ -1907,7 +1908,7 @@ static void swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (object->type != OBJT_SWAP) return; @@ -1953,7 +1954,7 @@ swp_pager_meta_free_all(vm_object_t object) { daddr_t index = 0; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (object->type != OBJT_SWAP) return; @@ -2012,7 +2013,7 @@ swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags) daddr_t r1; int idx; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); /* * The meta data only exists of the object is OBJT_SWAP * and even then might not be allocated yet. diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 2d5b555..aeeec39 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -3031,7 +3032,7 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) if (obj == NULL) obj = vm_object_allocate(OBJT_PHYS, pages); else { - VM_OBJECT_LOCK_INIT(obj, "uma object"); + VM_OBJECT_LOCK_INIT(obj, "uma vm object"); _vm_object_allocate(OBJT_PHYS, pages, obj); } ZONE_LOCK(zone); diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index b79b3f5..9517cd3 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -81,9 +81,9 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include +#include #include #include #include @@ -960,7 +960,7 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance) vm_pindex_t pindex; object = fs->object; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); first_object = fs->first_object; if (first_object != object) { if (!VM_OBJECT_TRYLOCK(first_object)) { @@ -1403,7 +1403,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) vm_page_t rtm; int cbehind, cahead; - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); object = m->object; pindex = m->pindex; diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index f44f04c..a1f5db5 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c index c507691..c265e30 100644 --- a/sys/vm/vm_init.c +++ b/sys/vm/vm_init.c @@ -68,8 +68,8 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include +#include #include #include #include diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index ad9aa0d..593a37d 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -70,9 +70,9 @@ __FBSDID("$FreeBSD$"); #include /* for ticks and hz */ #include #include -#include #include #include +#include #include #include diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 26de826..e915079 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -77,6 +77,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c index 05174e9..2e1d11c 100644 --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index 05bb8ae..af271f1 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 32b0779..5b9415b 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$"); #include /* for curproc, pageproc */ #include #include +#include #include #include #include @@ -193,8 +194,8 @@ vm_object_zinit(void *mem, int size, int flags) vm_object_t object; object = (vm_object_t)mem; - bzero(&object->mtx, sizeof(object->mtx)); - VM_OBJECT_LOCK_INIT(object, "standard object"); + bzero(&object->lock, sizeof(object->lock)); + VM_OBJECT_LOCK_INIT(object, "standard vm object"); /* These are true for any object that has been freed */ object->paging_in_progress = 0; @@ -266,7 +267,7 @@ vm_object_init(void) TAILQ_INIT(&vm_object_list); mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); - VM_OBJECT_LOCK_INIT(kernel_object, "kernel object"); + VM_OBJECT_LOCK_INIT(kernel_object, "kernel vm object"); _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kernel_object); #if VM_NRESERVLEVEL > 0 @@ -274,7 +275,7 @@ vm_object_init(void) kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); #endif - VM_OBJECT_LOCK_INIT(kmem_object, "kmem object"); + VM_OBJECT_LOCK_INIT(kmem_object, "kmem vm object"); _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kmem_object); #if VM_NRESERVLEVEL > 0 @@ -300,7 +301,7 @@ void vm_object_clear_flag(vm_object_t object, u_short bits) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); object->flags &= ~bits; } @@ -317,7 +318,7 @@ int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); switch (object->type) { case OBJT_DEFAULT: case OBJT_DEVICE: @@ -343,7 +344,7 @@ void vm_object_pip_add(vm_object_t object, short i) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); object->paging_in_progress += i; } @@ -351,7 +352,7 @@ void vm_object_pip_subtract(vm_object_t object, short i) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); object->paging_in_progress -= i; } @@ -359,7 +360,7 @@ void vm_object_pip_wakeup(vm_object_t object) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); object->paging_in_progress--; if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { vm_object_clear_flag(object, OBJ_PIPWNT); @@ -371,7 +372,7 @@ void vm_object_pip_wakeupn(vm_object_t object, short i) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (i) object->paging_in_progress -= i; if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { @@ -384,10 +385,10 @@ void vm_object_pip_wait(vm_object_t object, char *waitid) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); while (object->paging_in_progress) { object->flags |= OBJ_PIPWNT; - msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0); + VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); } } @@ -435,7 +436,7 @@ vm_object_reference_locked(vm_object_t object) { struct vnode *vp; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); object->ref_count++; if (object->type == OBJT_VNODE) { vp = object->handle; @@ -451,7 +452,7 @@ vm_object_vndeallocate(vm_object_t object) { struct vnode *vp = (struct vnode *) object->handle; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT(object->type == OBJT_VNODE, ("vm_object_vndeallocate: not a vnode object")); KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); @@ -579,9 +580,8 @@ retry: } else if (object->paging_in_progress) { VM_OBJECT_UNLOCK(robject); object->flags |= OBJ_PIPWNT; - msleep(object, - VM_OBJECT_MTX(object), - PDROP | PVM, "objde2", 0); + VM_OBJECT_SLEEP(object, object, + PDROP | PVM, "objde2" , 0); VM_OBJECT_LOCK(robject); temp = robject->backing_object; if (object == temp) { @@ -675,7 +675,7 @@ vm_object_terminate(vm_object_t object) { vm_page_t p, p_next; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); /* * Make sure no one uses us. @@ -816,7 +816,7 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, int curgeneration, n, pagerflags; boolean_t clearobjflags, eio, res; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT(object->type == OBJT_VNODE, ("Not a vnode object")); if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || object->resident_page_count == 0) @@ -902,7 +902,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, int count, i, mreq, runlen; vm_page_lock_assert(p, MA_NOTOWNED); - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); count = 1; mreq = 0; @@ -1139,8 +1139,7 @@ shadowlookup: if (object != tobject) VM_OBJECT_UNLOCK(object); m->oflags |= VPO_WANTED; - msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", - 0); + VM_OBJECT_SLEEP(m, tobject, PDROP | PVM, "madvpo" , 0); VM_OBJECT_LOCK(object); goto relookup; } @@ -1338,7 +1337,7 @@ retry: if ((m->oflags & VPO_BUSY) || m->busy) { VM_OBJECT_UNLOCK(new_object); m->oflags |= VPO_WANTED; - msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0); + VM_OBJECT_SLEEP(m, orig_object, PVM, "spltwt" , 0); VM_OBJECT_LOCK(new_object); goto retry; } @@ -1405,8 +1404,8 @@ vm_object_backing_scan(vm_object_t object, int op) vm_object_t backing_object; vm_pindex_t backing_offset_index; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); + VM_OBJECT_LOCK_ASSERT(object->backing_object, RA_WLOCKED); backing_object = object->backing_object; backing_offset_index = OFF_TO_IDX(object->backing_object_offset); @@ -1496,7 +1495,7 @@ vm_object_backing_scan(vm_object_t object, int op) if ((p->oflags & VPO_BUSY) || p->busy) { VM_OBJECT_UNLOCK(object); p->oflags |= VPO_WANTED; - msleep(p, VM_OBJECT_MTX(backing_object), + VM_OBJECT_SLEEP(p, backing_object, PDROP | PVM, "vmocol", 0); VM_OBJECT_LOCK(object); VM_OBJECT_LOCK(backing_object); @@ -1626,8 +1625,8 @@ vm_object_qcollapse(vm_object_t object) { vm_object_t backing_object = object->backing_object; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); + VM_OBJECT_LOCK_ASSERT(backing_object, RA_WLOCKED); if (backing_object->ref_count != 1) return; @@ -1645,7 +1644,7 @@ vm_object_qcollapse(vm_object_t object) void vm_object_collapse(vm_object_t object) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); while (TRUE) { vm_object_t backing_object; @@ -1853,7 +1852,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, vm_page_t p, next; int wirings; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT((object->flags & OBJ_UNMANAGED) == 0 || (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, ("vm_object_page_remove: illegal options for object %p", object)); @@ -1948,7 +1947,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end) struct mtx *mtx, *new_mtx; vm_page_t p, next; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, ("vm_object_page_cache: illegal object %p", object)); if (object->resident_page_count == 0) @@ -1996,7 +1995,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) vm_pindex_t pindex; int rv; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); for (pindex = start; pindex < end; pindex++) { m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); @@ -2147,7 +2146,7 @@ void vm_object_set_writeable_dirty(vm_object_t object) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (object->type != OBJT_VNODE) return; object->generation++; diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index 8134752..c01c638 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -70,15 +70,16 @@ #include #include #include +#include /* * Types defined: * * vm_object_t Virtual memory object. * - * The root of cached pages pool is protected by both the per-object mutex + * The root of cached pages pool is protected by both the per-object lock * and the free pages queue mutex. - * On insert in the cache splay tree, the per-object mutex is expected + * On insert in the cache splay tree, the per-object lock is expected * to be already held and the free pages queue mutex will be * acquired during the operation too. * On remove and lookup from the cache splay tree, only the free @@ -89,13 +90,13 @@ * * List of locks * (c) const until freed - * (o) per-object mutex + * (o) per-object lock * (f) free pages queue mutex * */ struct vm_object { - struct mtx mtx; + struct rwlock lock; TAILQ_ENTRY(vm_object) object_list; /* list of all objects */ LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */ LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */ @@ -203,16 +204,22 @@ extern struct vm_object kmem_object_store; #define kernel_object (&kernel_object_store) #define kmem_object (&kmem_object_store) -#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx) -#define VM_OBJECT_LOCK_ASSERT(object, type) \ - mtx_assert(&(object)->mtx, (type)) -#define VM_OBJECT_LOCK_INIT(object, type) \ - mtx_init(&(object)->mtx, "vm object", \ - (type), MTX_DEF | MTX_DUPOK) -#define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx) -#define VM_OBJECT_MTX(object) (&(object)->mtx) -#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx) -#define VM_OBJECT_UNLOCK(object) mtx_unlock(&(object)->mtx) +#define VM_OBJECT_LOCK(object) \ + rw_wlock(&(object)->lock) +#define VM_OBJECT_LOCK_ASSERT(object, type) \ + rw_assert(&(object)->lock, (type)) +#define VM_OBJECT_LOCK_INIT(object, name) \ + rw_init_flags(&(object)->lock, (name), RW_DUPOK) +#define VM_OBJECT_LOCKED(object) \ + rw_wowned(&(object)->lock) +#define VM_OBJECT_LOCKPTR(object) \ + (&(object)->lock) +#define VM_OBJECT_SLEEP(wchan, object, pri, wmesg, timo) \ + rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo)) +#define VM_OBJECT_TRYLOCK(object) \ + rw_try_wlock(&(object)->lock) +#define VM_OBJECT_UNLOCK(object) \ + rw_wunlock(&(object)->lock) /* * The object must be locked or thread private. diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index c952c05..2908d11 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -96,6 +96,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -468,7 +469,7 @@ void vm_page_busy(vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); KASSERT((m->oflags & VPO_BUSY) == 0, ("vm_page_busy: page already busy!!!")); m->oflags |= VPO_BUSY; @@ -483,7 +484,7 @@ void vm_page_flash(vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if (m->oflags & VPO_WANTED) { m->oflags &= ~VPO_WANTED; wakeup(m); @@ -501,7 +502,7 @@ void vm_page_wakeup(vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!")); m->oflags &= ~VPO_BUSY; vm_page_flash(m); @@ -511,7 +512,7 @@ void vm_page_io_start(vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); m->busy++; } @@ -519,7 +520,7 @@ void vm_page_io_finish(vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m)); m->busy--; if (m->busy == 0) @@ -751,7 +752,7 @@ void vm_page_sleep(vm_page_t m, const char *msg) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if (mtx_owned(vm_page_lockptr(m))) vm_page_unlock(m); @@ -763,7 +764,7 @@ vm_page_sleep(vm_page_t m, const char *msg) * it. */ m->oflags |= VPO_WANTED; - msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); + VM_OBJECT_SLEEP(m, m->object, PVM, msg, 0); } /* @@ -866,7 +867,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { vm_page_t root; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (m->object != NULL) panic("vm_page_insert: page already inserted"); @@ -942,7 +943,7 @@ vm_page_remove(vm_page_t m) vm_page_lock_assert(m, MA_OWNED); if ((object = m->object) == NULL) return; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (m->oflags & VPO_BUSY) { m->oflags &= ~VPO_BUSY; vm_page_flash(m); @@ -1016,7 +1017,7 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if ((m = object->root) != NULL && m->pindex != pindex) { m = vm_page_splay(pindex, m); if ((object->root = m)->pindex != pindex) @@ -1038,7 +1039,7 @@ vm_page_find_least(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if ((m = TAILQ_FIRST(&object->memq)) != NULL) { if (m->pindex < pindex) { m = vm_page_splay(pindex, object->root); @@ -1060,7 +1061,7 @@ vm_page_next(vm_page_t m) { vm_page_t next; - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if ((next = TAILQ_NEXT(m, listq)) != NULL && next->pindex != m->pindex + 1) next = NULL; @@ -1078,7 +1079,7 @@ vm_page_prev(vm_page_t m) { vm_page_t prev; - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && prev->pindex != m->pindex - 1) prev = NULL; @@ -1256,7 +1257,7 @@ vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, * requires the object to be locked. In contrast, removal does * not. */ - VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(new_object, RA_WLOCKED); KASSERT(new_object->cache == NULL, ("vm_page_cache_transfer: object %p has cached pages", new_object)); @@ -1326,7 +1327,7 @@ vm_page_is_cached(vm_object_t object, vm_pindex_t pindex) * page queues lock in order to prove that the specified page doesn't * exist. */ - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if (__predict_true(object->cache == NULL)) return (FALSE); mtx_lock(&vm_page_queue_free_mtx); @@ -1375,7 +1376,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0), ("vm_page_alloc: inconsistent object/req")); if (object != NULL) - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); req_class = req & VM_ALLOC_CLASS_MASK; @@ -1582,7 +1583,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0), ("vm_page_alloc_contig: inconsistent object/req")); if (object != NULL) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT(object->type == OBJT_PHYS, ("vm_page_alloc_contig: object %p isn't OBJT_PHYS", object)); @@ -1992,7 +1993,7 @@ vm_page_activate(vm_page_t m) int queue; vm_page_lock_assert(m, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if ((queue = m->queue) != PQ_ACTIVE) { if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { if (m->act_count < ACT_INIT) @@ -2276,7 +2277,7 @@ vm_page_try_to_cache(vm_page_t m) { vm_page_lock_assert(m, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if (m->dirty || m->hold_count || m->busy || m->wire_count || (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) return (0); @@ -2299,7 +2300,7 @@ vm_page_try_to_free(vm_page_t m) vm_page_lock_assert(m, MA_OWNED); if (m->object != NULL) - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if (m->dirty || m->hold_count || m->busy || m->wire_count || (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) return (0); @@ -2325,7 +2326,7 @@ vm_page_cache(vm_page_t m) vm_page_lock_assert(m, MA_OWNED); object = m->object; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy || m->hold_count || m->wire_count) panic("vm_page_cache: attempting to cache busy page"); @@ -2482,7 +2483,7 @@ vm_page_dontneed(vm_page_t m) int head; vm_page_lock_assert(m, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); dnw = PCPU_GET(dnweight); PCPU_INC(dnweight); @@ -2547,7 +2548,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) { vm_page_t m; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT((allocflags & VM_ALLOC_RETRY) != 0, ("vm_page_grab: VM_ALLOC_RETRY is required")); retrylookup: @@ -2628,7 +2629,7 @@ vm_page_set_valid_range(vm_page_t m, int base, int size) { int endoff, frag; - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if (size == 0) /* handle degenerate case */ return; @@ -2681,7 +2682,7 @@ vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) * write mapped, then the page's dirty field cannot possibly be * set by a concurrent pmap operation. */ - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m)) m->dirty &= ~pagebits; else { @@ -2735,7 +2736,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size) vm_page_bits_t oldvalid, pagebits; int endoff, frag; - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if (size == 0) /* handle degenerate case */ return; @@ -2825,7 +2826,7 @@ vm_page_set_invalid(vm_page_t m, int base, int size) { vm_page_bits_t bits; - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); KASSERT((m->oflags & VPO_BUSY) == 0, ("vm_page_set_invalid: page %p is busy", m)); bits = vm_page_bits(base, size); @@ -2854,7 +2855,7 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) int b; int i; - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); /* * Scan the valid bits looking for invalid sections that * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the @@ -2893,7 +2894,7 @@ vm_page_is_valid(vm_page_t m, int base, int size) { vm_page_bits_t bits; - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); bits = vm_page_bits(base, size); if (m->valid && ((m->valid & bits) == bits)) return 1; @@ -2908,7 +2909,7 @@ void vm_page_test_dirty(vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) vm_page_dirty(m); } @@ -2962,7 +2963,7 @@ vm_page_cowfault(vm_page_t m) vm_page_lock_assert(m, MA_OWNED); object = m->object; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT(object->paging_in_progress != 0, ("vm_page_cowfault: object %p's paging-in-progress count is zero.", object)); @@ -3055,7 +3056,7 @@ vm_page_object_lock_assert(vm_page_t m) * here. */ if (m->object != NULL && (m->oflags & VPO_BUSY) == 0) - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); } #endif diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index ac593a4..f6fe303 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -92,6 +92,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -346,7 +347,7 @@ vm_pageout_clean(vm_page_t m) vm_page_lock_assert(m, MA_OWNED); object = m->object; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); /* * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP @@ -484,7 +485,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, int numpagedout = 0; int i, runlen; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); /* * Initiate I/O. Bump the vm_page_t->busy counter and @@ -713,13 +714,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, vm_page_t p; int actcount, remove_mode; - VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(first_object, RA_WLOCKED); if ((first_object->flags & OBJ_FICTITIOUS) != 0) return; for (object = first_object;; object = backing_object) { if (pmap_resident_count(pmap) <= desired) goto unlock_return; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); if ((object->flags & OBJ_UNMANAGED) != 0 || object->paging_in_progress != 0) goto unlock_return; diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index 6ed64ea..d2c114f 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -74,6 +74,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -248,7 +249,7 @@ vm_pager_deallocate(object) vm_object_t object; { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); (*pagertab[object->type]->pgo_dealloc) (object); } diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h index bb7a5ec..eaf45a07 100644 --- a/sys/vm/vm_pager.h +++ b/sys/vm/vm_pager.h @@ -124,7 +124,7 @@ vm_pager_get_pages( ) { int r; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage); if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) { vm_page_zero_invalid(m[reqpage], TRUE); @@ -141,7 +141,7 @@ vm_pager_put_pages( int *rtvals ) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); (*pagertab[object->type]->pgo_putpages) (object, m, count, flags, rtvals); } @@ -165,7 +165,7 @@ vm_pager_has_page( ) { boolean_t ret; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); ret = (*pagertab[object->type]->pgo_haspage) (object, offset, before, after); return (ret); @@ -188,7 +188,7 @@ static __inline void vm_pager_page_unswapped(vm_page_t m) { - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED); if (pagertab[m->object->type]->pgo_pageunswapped) (*pagertab[m->object->type]->pgo_pageunswapped)(m); } diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index e5ac1a5..869742e 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -311,7 +312,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages, int i, index, n; mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); /* @@ -495,7 +496,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex) vm_reserv_t rv; mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); /* * Is a reservation fundamentally impossible? @@ -870,7 +871,7 @@ vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object, { vm_reserv_t rv; - VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(new_object, RA_WLOCKED); rv = vm_reserv_from_page(m); if (rv->object == old_object) { mtx_lock(&vm_page_queue_free_mtx); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index a6d78f4..597c6a7 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -116,7 +117,7 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) } VOP_UNLOCK(vp, 0); vm_object_set_flag(object, OBJ_DISCONNECTWNT); - msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0); + VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vodead" , 0); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } @@ -210,7 +211,7 @@ retry: if ((object->flags & OBJ_DEAD) == 0) break; vm_object_set_flag(object, OBJ_DISCONNECTWNT); - msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); + VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vadead" , 0); } if (vp->v_usecount == 0) @@ -259,7 +260,7 @@ vnode_pager_dealloc(object) if (vp == NULL) panic("vnode_pager_dealloc: pager already dealloced"); - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); vm_object_pip_wait(object, "vnpdea"); refs = object->ref_count; @@ -299,7 +300,7 @@ vnode_pager_haspage(object, pindex, before, after) int bsize; int pagesperblock, blocksperpage; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); /* * If no vp or vp is doomed or marked transparent to VM, we do not * have the page. @@ -594,7 +595,7 @@ vnode_pager_input_old(object, m) struct sf_buf *sf; struct vnode *vp; - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED); error = 0; /* -- cgit v1.1