summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-03-09 03:19:53 +0000
committerattilio <attilio@FreeBSD.org>2013-03-09 03:19:53 +0000
commit76954ad68a25c559c6a8b2911674760afd4962f6 (patch)
tree80cdb7116c19e2e4f42aeed31a65f76a54db11df /sys/vm
parent993799493c64eb0b9faeab971fbe4ecfe0214278 (diff)
parent16a80466e5837ad617b6b144297fd6069188b9b3 (diff)
downloadFreeBSD-src-76954ad68a25c559c6a8b2911674760afd4962f6.zip
FreeBSD-src-76954ad68a25c559c6a8b2911674760afd4962f6.tar.gz
Merge from vmcontention.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/default_pager.c6
-rw-r--r--sys/vm/device_pager.c23
-rw-r--r--sys/vm/phys_pager.c7
-rw-r--r--sys/vm/sg_pager.c7
-rw-r--r--sys/vm/swap_pager.c73
-rw-r--r--sys/vm/uma_core.c1
-rw-r--r--sys/vm/vm_fault.c84
-rw-r--r--sys/vm/vm_glue.c21
-rw-r--r--sys/vm/vm_init.c9
-rw-r--r--sys/vm/vm_kern.c32
-rw-r--r--sys/vm/vm_map.c45
-rw-r--r--sys/vm/vm_meter.c9
-rw-r--r--sys/vm/vm_mmap.c13
-rw-r--r--sys/vm/vm_object.c207
-rw-r--r--sys/vm/vm_object.h37
-rw-r--r--sys/vm/vm_page.c75
-rw-r--r--sys/vm/vm_pageout.c87
-rw-r--r--sys/vm/vm_pager.c9
-rw-r--r--sys/vm/vm_pager.h8
-rw-r--r--sys/vm/vm_reserv.c7
-rw-r--r--sys/vm/vnode_pager.c117
21 files changed, 449 insertions, 428 deletions
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c
index 12dc823..a71a22a 100644
--- a/sys/vm/default_pager.c
+++ b/sys/vm/default_pager.c
@@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
-#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@@ -91,10 +91,10 @@ default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(round_page(offset + size)));
if (cred != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->cred = cred;
object->charge = size;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
return (object);
}
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index 809c32c..fd20664 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <vm/vm.h>
@@ -206,7 +207,7 @@ void
cdev_pager_free_page(vm_object_t object, vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type == OBJT_MGTDEVICE) {
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
pmap_remove_all(m);
@@ -221,7 +222,7 @@ static void
dev_pager_free_page(vm_object_t object, vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->type == OBJT_DEVICE &&
(m->oflags & VPO_UNMANAGED) != 0),
("Managed device or page obj %p m %p", object, m));
@@ -235,13 +236,13 @@ dev_pager_dealloc(object)
{
vm_page_t m;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
mtx_lock(&dev_pager_mtx);
TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
mtx_unlock(&dev_pager_mtx);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_DEVICE) {
/*
@@ -258,11 +259,11 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
{
int error, i;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
error = object->un_pager.devp.ops->cdev_pg_fault(object,
IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (i = 0; i < count; i++) {
if (i != reqpage) {
@@ -304,12 +305,12 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
pidx = OFF_TO_IDX(offset);
memattr = object->memattr;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
dev = object->handle;
csw = dev_refthread(dev, &ref);
if (csw == NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
return (VM_PAGER_FAIL);
}
td = curthread;
@@ -321,7 +322,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
if (ret != 0) {
printf(
"WARNING: dev_pager_getpage: map function returns error %d", ret);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
return (VM_PAGER_FAIL);
}
@@ -338,7 +339,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
* the new physical address.
*/
page = *mres;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_updatefake(page, paddr, memattr);
} else {
/*
@@ -346,7 +347,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
* free up the all of the original pages.
*/
page = vm_page_getfake(paddr, memattr);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_lock(*mres);
vm_page_free(*mres);
vm_page_unlock(*mres);
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index 0ffafea..7b9f7b2 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -123,11 +124,11 @@ phys_pager_dealloc(vm_object_t object)
{
if (object->handle != NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
mtx_lock(&phys_pager_mtx);
TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
mtx_unlock(&phys_pager_mtx);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
}
@@ -139,7 +140,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
int i;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (i = 0; i < count; i++) {
if (m[i]->valid == 0) {
if ((m[i]->flags & PG_ZERO) == 0)
diff --git a/sys/vm/sg_pager.c b/sys/vm/sg_pager.c
index 097039e..76cae68 100644
--- a/sys/vm/sg_pager.c
+++ b/sys/vm/sg_pager.c
@@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/sglist.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -142,10 +143,10 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
size_t space;
int i;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
sg = object->handle;
memattr = object->memattr;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
offset = m[reqpage]->pindex;
/*
@@ -180,7 +181,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
/* Construct a new fake page. */
page = vm_page_getfake(paddr, memattr);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq);
/* Free the original pages and insert this fake page into the object. */
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 712fd83..2049996 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -89,6 +89,7 @@ __FBSDID("$FreeBSD$");
#include <sys/racct.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
#include <sys/blist.h>
@@ -621,14 +622,14 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->handle = handle;
if (cred != NULL) {
object->cred = cred;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
sx_xunlock(&sw_alloc_sx);
mtx_unlock(&Giant);
@@ -639,13 +640,13 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (cred != NULL) {
object->cred = cred;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
return (object);
}
@@ -674,7 +675,7 @@ swap_pager_dealloc(vm_object_t object)
mtx_unlock(&sw_alloc_mtx);
}
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
vm_object_pip_wait(object, "swpdea");
/*
@@ -815,7 +816,7 @@ void
swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
swp_pager_meta_free(object, start, size);
}
@@ -834,7 +835,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
daddr_t blk = SWAPBLK_NONE;
vm_pindex_t beg = start; /* save start index */
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while (size) {
if (n == 0) {
n = BLIST_MAX_ALLOC;
@@ -842,7 +843,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
n >>= 1;
if (n == 0) {
swp_pager_meta_free(object, beg, start - beg);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (-1);
}
}
@@ -854,7 +855,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
--n;
}
swp_pager_meta_free(object, start, n);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
@@ -883,8 +884,8 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
{
vm_pindex_t i;
- VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(srcobject);
+ VM_OBJECT_ASSERT_WLOCKED(dstobject);
/*
* If destroysource is set, we remove the source object from the
@@ -934,11 +935,11 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
* swp_pager_meta_build() can sleep.
*/
vm_object_pip_add(srcobject, 1);
- VM_OBJECT_UNLOCK(srcobject);
+ VM_OBJECT_WUNLOCK(srcobject);
vm_object_pip_add(dstobject, 1);
swp_pager_meta_build(dstobject, i, srcaddr);
vm_object_pip_wakeup(dstobject);
- VM_OBJECT_LOCK(srcobject);
+ VM_OBJECT_WLOCK(srcobject);
vm_object_pip_wakeup(srcobject);
}
} else {
@@ -987,7 +988,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *aft
{
daddr_t blk0;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* do we have good backing store at the requested index ?
*/
@@ -1058,7 +1059,7 @@ static void
swap_pager_unswapped(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
}
@@ -1147,7 +1148,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
/*
* Getpbuf() can sleep.
*/
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* Get a swap buffer header to perform the IO
*/
@@ -1168,7 +1169,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
bp->b_bufsize = PAGE_SIZE * (j - i);
bp->b_pager.pg_reqpage = reqpage - i;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
{
int k;
@@ -1187,7 +1188,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
* does not remove it.
*/
vm_object_pip_add(object, bp->b_npages);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* perform the I/O. NOTE!!! bp cannot be considered valid after
@@ -1208,7 +1209,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
* cleared on completion. If an I/O error occurs, SWAPBLK_NONE
* is set in the meta-data.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while ((mreq->oflags & VPO_SWAPINPROG) != 0) {
mreq->oflags |= VPO_WANTED;
PCPU_INC(cnt.v_intrans);
@@ -1283,7 +1284,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
*/
if (object->type != OBJT_SWAP)
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (curproc != pageproc)
sync = TRUE;
@@ -1378,7 +1379,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
bp->b_bufsize = PAGE_SIZE * n;
bp->b_blkno = blk;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (j = 0; j < n; ++j) {
vm_page_t mreq = m[i+j];
@@ -1393,7 +1394,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
mreq->oflags |= VPO_SWAPINPROG;
bp->b_pages[j] = mreq;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
bp->b_npages = n;
/*
* Must set dirty range for NFS to work.
@@ -1443,7 +1444,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
*/
swp_pager_async_iodone(bp);
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
/*
@@ -1487,7 +1488,7 @@ swp_pager_async_iodone(struct buf *bp)
if (bp->b_npages) {
object = bp->b_pages[0]->object;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
/*
@@ -1611,7 +1612,7 @@ swp_pager_async_iodone(struct buf *bp)
*/
if (object != NULL) {
vm_object_pip_wakeupn(object, bp->b_npages);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -1652,7 +1653,7 @@ swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
int bcount;
int i;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return (0);
@@ -1746,13 +1747,13 @@ restart:
for (j = 0; j < SWAP_META_PAGES; ++j) {
if (swp_pager_isondev(swap->swb_pages[j], sp)) {
/* avoid deadlock */
- if (!VM_OBJECT_TRYLOCK(object)) {
+ if (!VM_OBJECT_TRYWLOCK(object)) {
break;
} else {
mtx_unlock(&swhash_mtx);
swp_pager_force_pagein(object,
pindex + j);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
mtx_lock(&swhash_mtx);
goto restart;
}
@@ -1808,7 +1809,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
struct swblock **pswap;
int idx;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Convert default object to swap object if necessary
*/
@@ -1845,7 +1846,7 @@ retry:
swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
if (swap == NULL) {
mtx_unlock(&swhash_mtx);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (uma_zone_exhausted(swap_zone)) {
if (atomic_cmpset_int(&exhausted, 0, 1))
printf("swap zone exhausted, "
@@ -1854,7 +1855,7 @@ retry:
pause("swzonex", 10);
} else
VM_WAIT;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
@@ -1906,7 +1907,7 @@ static void
swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return;
@@ -1952,7 +1953,7 @@ swp_pager_meta_free_all(vm_object_t object)
{
daddr_t index = 0;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return;
@@ -2011,7 +2012,7 @@ swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
daddr_t r1;
int idx;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* The meta data only exists of the object is OBJT_SWAP
* and even then might not be allocated yet.
@@ -2464,14 +2465,14 @@ vmspace_swap_count(struct vmspace *vmspace)
for (cur = map->header.next; cur != &map->header; cur = cur->next) {
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
(object = cur->object.vm_object) != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_SWAP &&
object->un_pager.swp.swp_bcount != 0) {
n = (cur->end - cur->start) / PAGE_SIZE;
count += object->un_pager.swp.swp_bcount *
SWAP_META_PAGES * n / object->size + 1;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
}
return (count);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index d298064..1879b7e 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/smp.h>
#include <sys/vmmeter.h>
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index b79b3f5..6c41c07 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -81,9 +81,9 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -163,14 +163,14 @@ unlock_and_deallocate(struct faultstate *fs)
{
vm_object_pip_wakeup(fs->object);
- VM_OBJECT_UNLOCK(fs->object);
+ VM_OBJECT_WUNLOCK(fs->object);
if (fs->object != fs->first_object) {
- VM_OBJECT_LOCK(fs->first_object);
+ VM_OBJECT_WLOCK(fs->first_object);
vm_page_lock(fs->first_m);
vm_page_free(fs->first_m);
vm_page_unlock(fs->first_m);
vm_object_pip_wakeup(fs->first_object);
- VM_OBJECT_UNLOCK(fs->first_object);
+ VM_OBJECT_WUNLOCK(fs->first_object);
fs->first_m = NULL;
}
vm_object_deallocate(fs->first_object);
@@ -290,7 +290,7 @@ RetryFault:;
* truncation operations) during I/O. This must be done after
* obtaining the vnode lock in order to avoid possible deadlocks.
*/
- VM_OBJECT_LOCK(fs.first_object);
+ VM_OBJECT_WLOCK(fs.first_object);
vm_object_reference_locked(fs.first_object);
vm_object_pip_add(fs.first_object, 1);
@@ -363,17 +363,17 @@ RetryFault:;
vm_page_aflag_set(fs.m, PGA_REFERENCED);
vm_page_unlock(fs.m);
if (fs.object != fs.first_object) {
- if (!VM_OBJECT_TRYLOCK(
+ if (!VM_OBJECT_TRYWLOCK(
fs.first_object)) {
- VM_OBJECT_UNLOCK(fs.object);
- VM_OBJECT_LOCK(fs.first_object);
- VM_OBJECT_LOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
+ VM_OBJECT_WLOCK(fs.first_object);
+ VM_OBJECT_WLOCK(fs.object);
}
vm_page_lock(fs.first_m);
vm_page_free(fs.first_m);
vm_page_unlock(fs.first_m);
vm_object_pip_wakeup(fs.first_object);
- VM_OBJECT_UNLOCK(fs.first_object);
+ VM_OBJECT_WUNLOCK(fs.first_object);
fs.first_m = NULL;
}
unlock_map(&fs);
@@ -383,7 +383,7 @@ RetryFault:;
"vmpfw");
}
vm_object_pip_wakeup(fs.object);
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
PCPU_INC(cnt.v_intrans);
vm_object_deallocate(fs.first_object);
goto RetryFault;
@@ -646,12 +646,12 @@ vnode_locked:
*/
if (fs.object != fs.first_object) {
vm_object_pip_wakeup(fs.object);
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
fs.object = fs.first_object;
fs.pindex = fs.first_pindex;
fs.m = fs.first_m;
- VM_OBJECT_LOCK(fs.object);
+ VM_OBJECT_WLOCK(fs.object);
}
fs.first_m = NULL;
@@ -669,11 +669,11 @@ vnode_locked:
} else {
KASSERT(fs.object != next_object,
("object loop %p", next_object));
- VM_OBJECT_LOCK(next_object);
+ VM_OBJECT_WLOCK(next_object);
vm_object_pip_add(next_object, 1);
if (fs.object != fs.first_object)
vm_object_pip_wakeup(fs.object);
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
fs.object = next_object;
}
}
@@ -725,7 +725,7 @@ vnode_locked:
*/
((fs.object->type == OBJT_DEFAULT) ||
(fs.object->type == OBJT_SWAP)) &&
- (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) &&
+ (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) &&
/*
* We don't chase down the shadow chain
*/
@@ -774,7 +774,7 @@ vnode_locked:
* conditional
*/
vm_object_pip_wakeup(fs.object);
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
/*
* Only use the new page below...
*/
@@ -782,7 +782,7 @@ vnode_locked:
fs.pindex = fs.first_pindex;
fs.m = fs.first_m;
if (!is_first_object_locked)
- VM_OBJECT_LOCK(fs.object);
+ VM_OBJECT_WLOCK(fs.object);
PCPU_INC(cnt.v_cow_faults);
curthread->td_cow++;
} else {
@@ -903,7 +903,7 @@ vnode_locked:
*/
KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
("vm_fault: page %p partially invalid", fs.m));
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
/*
* Put this page into the physical map. We had to do the unlock above
@@ -914,7 +914,7 @@ vnode_locked:
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
- VM_OBJECT_LOCK(fs.object);
+ VM_OBJECT_WLOCK(fs.object);
vm_page_lock(fs.m);
/*
@@ -960,13 +960,13 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
vm_pindex_t pindex;
object = fs->object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
first_object = fs->first_object;
if (first_object != object) {
- if (!VM_OBJECT_TRYLOCK(first_object)) {
- VM_OBJECT_UNLOCK(object);
- VM_OBJECT_LOCK(first_object);
- VM_OBJECT_LOCK(object);
+ if (!VM_OBJECT_TRYWLOCK(first_object)) {
+ VM_OBJECT_WUNLOCK(object);
+ VM_OBJECT_WLOCK(first_object);
+ VM_OBJECT_WLOCK(object);
}
}
/* Neither fictitious nor unmanaged pages can be cached. */
@@ -999,7 +999,7 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
}
}
if (first_object != object)
- VM_OBJECT_UNLOCK(first_object);
+ VM_OBJECT_WUNLOCK(first_object);
}
/*
@@ -1044,28 +1044,28 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = object;
- VM_OBJECT_LOCK(lobject);
+ VM_OBJECT_WLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
0, ("vm_fault_prefault: unaligned object offset"));
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
- VM_OBJECT_LOCK(backing_object);
- VM_OBJECT_UNLOCK(lobject);
+ VM_OBJECT_WLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(lobject);
lobject = backing_object;
}
/*
* give-up when a page is not in memory
*/
if (m == NULL) {
- VM_OBJECT_UNLOCK(lobject);
+ VM_OBJECT_WUNLOCK(lobject);
break;
}
if (m->valid == VM_PAGE_BITS_ALL &&
(m->flags & PG_FICTITIOUS) == 0)
pmap_enter_quick(pmap, addr, m, entry->protection);
- VM_OBJECT_UNLOCK(lobject);
+ VM_OBJECT_WUNLOCK(lobject);
}
}
@@ -1257,7 +1257,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
dst_object->pg_color = atop(dst_entry->start);
#endif
- VM_OBJECT_LOCK(dst_object);
+ VM_OBJECT_WLOCK(dst_object);
KASSERT(upgrade || dst_entry->object.vm_object == NULL,
("vm_fault_copy_entry: vm_object not NULL"));
dst_entry->object.vm_object = dst_object;
@@ -1307,9 +1307,9 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
dst_m = vm_page_alloc(dst_object, dst_pindex,
VM_ALLOC_NORMAL);
if (dst_m == NULL) {
- VM_OBJECT_UNLOCK(dst_object);
+ VM_OBJECT_WUNLOCK(dst_object);
VM_WAIT;
- VM_OBJECT_LOCK(dst_object);
+ VM_OBJECT_WLOCK(dst_object);
}
} while (dst_m == NULL);
@@ -1318,7 +1318,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
* (Because the source is wired down, the page will be in
* memory.)
*/
- VM_OBJECT_LOCK(src_object);
+ VM_OBJECT_WLOCK(src_object);
object = src_object;
pindex = src_pindex + dst_pindex;
while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
@@ -1327,18 +1327,18 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
/*
* Allow fallback to backing objects if we are reading.
*/
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
pindex += OFF_TO_IDX(object->backing_object_offset);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = backing_object;
}
if (src_m == NULL)
panic("vm_fault_copy_wired: page missing");
pmap_copy_page(src_m, dst_m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
dst_m->valid = VM_PAGE_BITS_ALL;
dst_m->dirty = VM_PAGE_BITS_ALL;
- VM_OBJECT_UNLOCK(dst_object);
+ VM_OBJECT_WUNLOCK(dst_object);
/*
* Enter it in the pmap. If a wired, copy-on-write
@@ -1350,7 +1350,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
/*
* Mark it no longer busy, and put it on the active list.
*/
- VM_OBJECT_LOCK(dst_object);
+ VM_OBJECT_WLOCK(dst_object);
if (upgrade) {
vm_page_lock(src_m);
@@ -1367,7 +1367,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
}
vm_page_wakeup(dst_m);
}
- VM_OBJECT_UNLOCK(dst_object);
+ VM_OBJECT_WUNLOCK(dst_object);
if (upgrade) {
dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
vm_object_deallocate(src_object);
@@ -1403,7 +1403,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t rtm;
int cbehind, cahead;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
object = m->object;
pindex = m->pindex;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index f44f04c..e0a8bf7 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/shm.h>
@@ -238,7 +239,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
vm_pindex_t pindex;
int rv;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
pindex = OFF_TO_IDX(offset);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
@@ -260,7 +261,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
vm_page_unlock(m);
vm_page_wakeup(m);
out:
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (m);
}
@@ -394,7 +395,7 @@ vm_thread_new(struct thread *td, int pages)
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
- VM_OBJECT_LOCK(ksobj);
+ VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
@@ -404,7 +405,7 @@ vm_thread_new(struct thread *td, int pages)
ma[i] = m;
m->valid = VM_PAGE_BITS_ALL;
}
- VM_OBJECT_UNLOCK(ksobj);
+ VM_OBJECT_WUNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
return (1);
}
@@ -417,7 +418,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
atomic_add_int(&kstacks, -1);
pmap_qremove(ks, pages);
- VM_OBJECT_LOCK(ksobj);
+ VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
@@ -427,7 +428,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
vm_page_free(m);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(ksobj);
+ VM_OBJECT_WUNLOCK(ksobj);
vm_object_deallocate(ksobj);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
@@ -505,7 +506,7 @@ vm_thread_swapout(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
pmap_qremove(td->td_kstack, pages);
- VM_OBJECT_LOCK(ksobj);
+ VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
@@ -515,7 +516,7 @@ vm_thread_swapout(struct thread *td)
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(ksobj);
+ VM_OBJECT_WUNLOCK(ksobj);
}
/*
@@ -530,7 +531,7 @@ vm_thread_swapin(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
- VM_OBJECT_LOCK(ksobj);
+ VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++)
ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
VM_ALLOC_WIRED);
@@ -557,7 +558,7 @@ vm_thread_swapin(struct thread *td)
} else if (ma[i]->oflags & VPO_BUSY)
vm_page_wakeup(ma[i]);
}
- VM_OBJECT_UNLOCK(ksobj);
+ VM_OBJECT_WUNLOCK(ksobj);
pmap_qenter(td->td_kstack, ma, pages);
cpu_thread_swapin(td);
}
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index c507691..08c9b03 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -68,8 +68,8 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/selinfo.h>
@@ -157,8 +157,6 @@ vm_ksubmap_init(struct kva_md_info *kmi)
again:
v = (caddr_t)firstaddr;
- v = kern_timeout_callwheel_alloc(v);
-
/*
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
@@ -202,10 +200,5 @@ again:
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
-
- /*
- * Initialize the callouts we just allocated.
- */
- kern_timeout_callwheel_init();
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index ad9aa0d..9f602b7 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -70,9 +70,9 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h> /* for ticks and hz */
#include <sys/eventhandler.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/malloc.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -234,7 +234,7 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
VM_PROT_ALL, 0);
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
end_offset = offset + size;
for (; offset < end_offset; offset += PAGE_SIZE) {
tries = 0;
@@ -242,12 +242,12 @@ retry:
m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
vm_map_lock(map);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
@@ -266,7 +266,7 @@ retry:
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_map_unlock(map);
vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
VM_MAP_WIRE_NOHOLES);
@@ -303,18 +303,18 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
VM_PROT_ALL, 0);
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
tries = 0;
retry:
m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
atop(size), low, high, alignment, boundary, memattr);
if (m == NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
vm_map_lock(map);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
@@ -328,7 +328,7 @@ retry:
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_map_unlock(map);
vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
VM_MAP_WIRE_NOHOLES);
@@ -488,7 +488,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
- VM_OBJECT_LOCK(kmem_object);
+ VM_OBJECT_WLOCK(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
retry:
m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
@@ -500,7 +500,7 @@ retry:
*/
if (m == NULL) {
if ((flags & M_NOWAIT) == 0) {
- VM_OBJECT_UNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(kmem_object);
entry->eflags |= MAP_ENTRY_IN_TRANSITION;
vm_map_unlock(map);
VM_WAIT;
@@ -510,7 +510,7 @@ retry:
MAP_ENTRY_IN_TRANSITION,
("kmem_back: volatile entry"));
entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
- VM_OBJECT_LOCK(kmem_object);
+ VM_OBJECT_WLOCK(kmem_object);
goto retry;
}
/*
@@ -526,7 +526,7 @@ retry:
vm_page_unwire(m, 0);
vm_page_free(m);
}
- VM_OBJECT_UNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(kmem_object);
vm_map_delete(map, addr, addr + size);
return (KERN_NO_SPACE);
}
@@ -536,7 +536,7 @@ retry:
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
}
- VM_OBJECT_UNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(kmem_object);
/*
* Mark map entry as non-pageable. Repeat the assert.
@@ -556,7 +556,7 @@ retry:
/*
* Loop thru pages, entering them in the pmap.
*/
- VM_OBJECT_LOCK(kmem_object);
+ VM_OBJECT_WLOCK(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
/*
@@ -566,7 +566,7 @@ retry:
TRUE);
vm_page_wakeup(m);
}
- VM_OBJECT_UNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(kmem_object);
return (KERN_SUCCESS);
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 35ac468..72d3983 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -77,6 +77,7 @@ __FBSDID("$FreeBSD$");
#include <sys/vnode.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/file.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@@ -1222,10 +1223,10 @@ charged:
* reference counting is insufficient to recognize
* aliases with precision.)
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->ref_count > 1 || object->shadow_count != 0)
vm_object_clear_flag(object, OBJ_ONEMAPPING);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
else if ((prev_entry != &map->header) &&
(prev_entry->eflags == protoeflags) &&
@@ -1623,12 +1624,12 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->cred != NULL) {
- VM_OBJECT_LOCK(entry->object.vm_object);
+ VM_OBJECT_WLOCK(entry->object.vm_object);
KASSERT(entry->object.vm_object->cred == NULL,
("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = entry->end - entry->start;
- VM_OBJECT_UNLOCK(entry->object.vm_object);
+ VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
@@ -1700,12 +1701,12 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->cred != NULL) {
- VM_OBJECT_LOCK(entry->object.vm_object);
+ VM_OBJECT_WLOCK(entry->object.vm_object);
KASSERT(entry->object.vm_object->cred == NULL,
("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = entry->end - entry->start;
- VM_OBJECT_UNLOCK(entry->object.vm_object);
+ VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
@@ -1805,7 +1806,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
pmap_object_init_pt(map->pmap, addr, object, pindex, size);
goto unlock_return;
@@ -1856,7 +1857,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
pmap_enter_object(map->pmap, start, addr + ptoa(psize),
p_start, prot);
unlock_return:
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -1932,9 +1933,9 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
continue;
}
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
continue;
}
@@ -1946,7 +1947,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
KASSERT(obj->charge == 0,
("vm_map_protect: object %p overcharged\n", obj));
if (!swap_reserve(ptoa(obj->size))) {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
vm_map_unlock(map);
return (KERN_RESOURCE_SHORTAGE);
}
@@ -1954,7 +1955,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
crhold(cred);
obj->cred = cred;
obj->charge = ptoa(obj->size);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
/*
@@ -2717,7 +2718,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
count = OFF_TO_IDX(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->ref_count != 1 &&
((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
object == kernel_object || object == kmem_object)) {
@@ -2746,7 +2747,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
}
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
} else
entry->object.vm_object = NULL;
if (map->system_map)
@@ -2954,7 +2955,7 @@ vm_map_copy_entry(
*/
size = src_entry->end - src_entry->start;
if ((src_object = src_entry->object.vm_object) != NULL) {
- VM_OBJECT_LOCK(src_object);
+ VM_OBJECT_WLOCK(src_object);
charged = ENTRY_CHARGED(src_entry);
if ((src_object->handle == NULL) &&
(src_object->type == OBJT_DEFAULT ||
@@ -2975,7 +2976,7 @@ vm_map_copy_entry(
src_object->cred = src_entry->cred;
src_object->charge = size;
}
- VM_OBJECT_UNLOCK(src_object);
+ VM_OBJECT_WUNLOCK(src_object);
dst_entry->object.vm_object = src_object;
if (charged) {
cred = curthread->td_ucred;
@@ -3151,7 +3152,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
vm_object_deallocate(object);
object = old_entry->object.vm_object;
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_clear_flag(object, OBJ_ONEMAPPING);
if (old_entry->cred != NULL) {
KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
@@ -3159,7 +3160,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
object->charge = old_entry->end - old_entry->start;
old_entry->cred = NULL;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* Clone the entry, referencing the shared object.
@@ -3845,10 +3846,10 @@ RetryLookup:;
crfree(entry->cred);
entry->cred = NULL;
} else if (entry->cred != NULL) {
- VM_OBJECT_LOCK(eobject);
+ VM_OBJECT_WLOCK(eobject);
eobject->cred = entry->cred;
eobject->charge = size;
- VM_OBJECT_UNLOCK(eobject);
+ VM_OBJECT_WUNLOCK(eobject);
entry->cred = NULL;
}
@@ -3873,10 +3874,10 @@ RetryLookup:;
atop(size));
entry->offset = 0;
if (entry->cred != NULL) {
- VM_OBJECT_LOCK(entry->object.vm_object);
+ VM_OBJECT_WLOCK(entry->object.vm_object);
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = size;
- VM_OBJECT_UNLOCK(entry->object.vm_object);
+ VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
vm_map_lock_downgrade(map);
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 05174e9..713a2be 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resource.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/vmmeter.h>
#include <sys/smp.h>
@@ -110,7 +111,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
*/
mtx_lock(&vm_object_list_mtx);
TAILQ_FOREACH(object, &vm_object_list, object_list) {
- if (!VM_OBJECT_TRYLOCK(object)) {
+ if (!VM_OBJECT_TRYWLOCK(object)) {
/*
* Avoid a lock-order reversal. Consequently,
* the reported number of active pages may be
@@ -119,7 +120,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
continue;
}
vm_object_clear_flag(object, OBJ_ACTIVE);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
mtx_unlock(&vm_object_list_mtx);
/*
@@ -178,10 +179,10 @@ vmtotal(SYSCTL_HANDLER_ARGS)
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
(object = entry->object.vm_object) == NULL)
continue;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_set_flag(object, OBJ_ACTIVE);
paging |= object->paging_in_progress;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
vm_map_unlock_read(map);
vmspace_free(vm);
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index cf94fe5..248d9e8 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/racct.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <sys/fcntl.h>
@@ -880,12 +881,12 @@ RestartScan:
m = PHYS_TO_VM_PAGE(locked_pa);
if (m->object != object) {
if (object != NULL)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = m->object;
- locked = VM_OBJECT_TRYLOCK(object);
+ locked = VM_OBJECT_TRYWLOCK(object);
vm_page_unlock(m);
if (!locked) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_lock(m);
goto retry;
}
@@ -903,9 +904,9 @@ RestartScan:
*/
if (current->object.vm_object != object) {
if (object != NULL)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = current->object.vm_object;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
if (object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP ||
@@ -942,7 +943,7 @@ RestartScan:
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
if (object != NULL)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* subyte may page fault. In case it needs to modify
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 606b605..255d919 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h> /* for curproc, pageproc */
#include <sys/socket.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/sx.h>
@@ -195,8 +196,8 @@ vm_object_zinit(void *mem, int size, int flags)
vm_object_t object;
object = (vm_object_t)mem;
- bzero(&object->mtx, sizeof(object->mtx));
- mtx_init(&object->mtx, "vm object", NULL, MTX_DEF | MTX_DUPOK);
+ bzero(&object->lock, sizeof(object->lock));
+ rw_init_flags(&object->lock, "vm object", RW_DUPOK);
/* These are true for any object that has been freed */
object->rtree.rt_root = 0;
@@ -267,7 +268,7 @@ vm_object_init(void)
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
- mtx_init(&kernel_object->mtx, "vm object", "kernel object", MTX_DEF);
+ rw_init(&kernel_object->lock, "kernel vm object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
#if VM_NRESERVLEVEL > 0
@@ -275,7 +276,7 @@ vm_object_init(void)
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
#endif
- mtx_init(&kmem_object->mtx, "vm object", "kmem object", MTX_DEF);
+ rw_init(&kmem_object->lock, "kmem vm object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
#if VM_NRESERVLEVEL > 0
@@ -303,7 +304,7 @@ void
vm_object_clear_flag(vm_object_t object, u_short bits)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->flags &= ~bits;
}
@@ -320,7 +321,7 @@ int
vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
switch (object->type) {
case OBJT_DEFAULT:
case OBJT_DEVICE:
@@ -346,7 +347,7 @@ void
vm_object_pip_add(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress += i;
}
@@ -354,7 +355,7 @@ void
vm_object_pip_subtract(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress -= i;
}
@@ -362,7 +363,7 @@ void
vm_object_pip_wakeup(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress--;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
@@ -374,7 +375,7 @@ void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (i)
object->paging_in_progress -= i;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
@@ -387,7 +388,7 @@ void
vm_object_pip_wait(vm_object_t object, char *waitid)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object, PVM, waitid, 0);
@@ -421,9 +422,9 @@ vm_object_reference(vm_object_t object)
{
if (object == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -438,7 +439,7 @@ vm_object_reference_locked(vm_object_t object)
{
struct vnode *vp;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->ref_count++;
if (object->type == OBJT_VNODE) {
vp = object->handle;
@@ -454,7 +455,7 @@ vm_object_vndeallocate(vm_object_t object)
{
struct vnode *vp = (struct vnode *) object->handle;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
@@ -467,23 +468,23 @@ vm_object_vndeallocate(vm_object_t object)
if (object->ref_count > 1) {
object->ref_count--;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/* vrele may need the vnode lock. */
vrele(vp);
} else {
vhold(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vdrop(vp);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->ref_count--;
if (object->type == OBJT_DEAD) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
} else {
if (object->ref_count == 0)
VOP_UNSET_TEXT(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vput(vp);
}
}
@@ -506,7 +507,7 @@ vm_object_deallocate(vm_object_t object)
vm_object_t temp;
while (object != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_VNODE) {
vm_object_vndeallocate(object);
return;
@@ -523,7 +524,7 @@ vm_object_deallocate(vm_object_t object)
*/
object->ref_count--;
if (object->ref_count > 1) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
} else if (object->ref_count == 1) {
if (object->shadow_count == 0 &&
@@ -542,12 +543,12 @@ vm_object_deallocate(vm_object_t object)
("vm_object_deallocate: ref_count: %d, shadow_count: %d",
object->ref_count,
object->shadow_count));
- if (!VM_OBJECT_TRYLOCK(robject)) {
+ if (!VM_OBJECT_TRYWLOCK(robject)) {
/*
* Avoid a potential deadlock.
*/
object->ref_count++;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* More likely than not the thread
* holding robject's lock has lower
@@ -571,27 +572,27 @@ vm_object_deallocate(vm_object_t object)
robject->ref_count++;
retry:
if (robject->paging_in_progress) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_object_pip_wait(robject,
"objde1");
temp = robject->backing_object;
if (object == temp) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
} else if (object->paging_in_progress) {
- VM_OBJECT_UNLOCK(robject);
+ VM_OBJECT_WUNLOCK(robject);
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object,
PDROP | PVM, "objde2", 0);
- VM_OBJECT_LOCK(robject);
+ VM_OBJECT_WLOCK(robject);
temp = robject->backing_object;
if (object == temp) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
} else
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (robject->ref_count == 1) {
robject->ref_count--;
@@ -600,21 +601,21 @@ retry:
}
object = robject;
vm_object_collapse(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
- VM_OBJECT_UNLOCK(robject);
+ VM_OBJECT_WUNLOCK(robject);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
doterm:
temp = object->backing_object;
if (temp != NULL) {
- VM_OBJECT_LOCK(temp);
+ VM_OBJECT_WLOCK(temp);
LIST_REMOVE(object, shadow_list);
temp->shadow_count--;
- VM_OBJECT_UNLOCK(temp);
+ VM_OBJECT_WUNLOCK(temp);
object->backing_object = NULL;
}
/*
@@ -625,7 +626,7 @@ doterm:
if ((object->flags & OBJ_DEAD) == 0)
vm_object_terminate(object);
else
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = temp;
}
}
@@ -677,7 +678,7 @@ vm_object_terminate(vm_object_t object)
{
vm_page_t p, p_next;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Make sure no one uses us.
@@ -703,11 +704,11 @@ vm_object_terminate(vm_object_t object)
* Clean pages and flush buffers.
*/
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vinvalbuf(vp, V_SAVE, 0, 0);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
KASSERT(object->ref_count == 0,
@@ -762,7 +763,7 @@ vm_object_terminate(vm_object_t object)
* Let the pager know object is dead.
*/
vm_pager_deallocate(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_object_destroy(object);
}
@@ -818,7 +819,7 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
int curgeneration, n, pagerflags;
boolean_t clearobjflags, eio, res;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
object->resident_page_count == 0)
@@ -904,7 +905,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
int count, i, mreq, runlen;
vm_page_lock_assert(p, MA_NOTOWNED);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
count = 1;
mreq = 0;
@@ -962,11 +963,11 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
return (TRUE);
res = TRUE;
error = 0;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
offset += object->backing_object_offset;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = backing_object;
if (object->size < OFF_TO_IDX(offset + size))
size = IDX_TO_OFF(object->size) - offset;
@@ -986,7 +987,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
if (object->type == OBJT_VNODE &&
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (syncio && !invalidate && offset == 0 &&
@@ -1004,17 +1005,17 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
fsync_after = FALSE;
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
res = vm_object_page_clean(object, offset, offset + size,
flags);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (fsync_after)
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
if (error != 0)
res = FALSE;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
if ((object->type == OBJT_VNODE ||
object->type == OBJT_DEVICE) && invalidate) {
@@ -1032,7 +1033,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
vm_object_page_remove(object, OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK), flags);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (res);
}
@@ -1067,7 +1068,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
if (object == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
/*
* Locate and adjust resident pages
*/
@@ -1108,10 +1109,10 @@ shadowlookup:
backing_object = tobject->backing_object;
if (backing_object == NULL)
goto unlock_tobject;
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
tpindex += OFF_TO_IDX(tobject->backing_object_offset);
if (tobject != object)
- VM_OBJECT_UNLOCK(tobject);
+ VM_OBJECT_WUNLOCK(tobject);
tobject = backing_object;
goto shadowlookup;
} else if (m->valid != VM_PAGE_BITS_ALL)
@@ -1139,10 +1140,10 @@ shadowlookup:
}
vm_page_unlock(m);
if (object != tobject)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
m->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(tobject, m, PDROP | PVM, "madvpo", 0);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto relookup;
}
if (advise == MADV_WILLNEED) {
@@ -1175,9 +1176,9 @@ shadowlookup:
swap_pager_freespace(tobject, tpindex, 1);
unlock_tobject:
if (tobject != object)
- VM_OBJECT_UNLOCK(tobject);
+ VM_OBJECT_WUNLOCK(tobject);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -1205,15 +1206,15 @@ vm_object_shadow(
* Don't create the new object if the old object isn't shared.
*/
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
if (source->ref_count == 1 &&
source->handle == NULL &&
(source->type == OBJT_DEFAULT ||
source->type == OBJT_SWAP)) {
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
return;
}
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
}
/*
@@ -1238,7 +1239,7 @@ vm_object_shadow(
*/
result->backing_object_offset = *offset;
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
source->shadow_count++;
#if VM_NRESERVLEVEL > 0
@@ -1246,7 +1247,7 @@ vm_object_shadow(
result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
((1 << (VM_NFREEORDER - 1)) - 1);
#endif
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
}
@@ -1277,7 +1278,7 @@ vm_object_split(vm_map_entry_t entry)
return;
if (orig_object->ref_count <= 1)
return;
- VM_OBJECT_UNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(orig_object);
offidxstart = OFF_TO_IDX(entry->offset);
size = atop(entry->end - entry->start);
@@ -1292,17 +1293,17 @@ vm_object_split(vm_map_entry_t entry)
* At this point, the new object is still private, so the order in
* which the original and new objects are locked does not matter.
*/
- VM_OBJECT_LOCK(new_object);
- VM_OBJECT_LOCK(orig_object);
+ VM_OBJECT_WLOCK(new_object);
+ VM_OBJECT_WLOCK(orig_object);
source = orig_object->backing_object;
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
if ((source->flags & OBJ_DEAD) != 0) {
- VM_OBJECT_UNLOCK(source);
- VM_OBJECT_UNLOCK(orig_object);
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(source);
+ VM_OBJECT_WUNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(new_object);
vm_object_deallocate(new_object);
- VM_OBJECT_LOCK(orig_object);
+ VM_OBJECT_WLOCK(orig_object);
return;
}
LIST_INSERT_HEAD(&source->shadow_head,
@@ -1310,7 +1311,7 @@ vm_object_split(vm_map_entry_t entry)
source->shadow_count++;
vm_object_reference_locked(source); /* for new_object */
vm_object_clear_flag(source, OBJ_ONEMAPPING);
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
new_object->backing_object_offset =
orig_object->backing_object_offset + entry->offset;
new_object->backing_object = source;
@@ -1337,10 +1338,10 @@ retry:
* not be changed by this operation.
*/
if ((m->oflags & VPO_BUSY) || m->busy) {
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(new_object);
m->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(orig_object, m, PVM, "spltwt", 0);
- VM_OBJECT_LOCK(new_object);
+ VM_OBJECT_WLOCK(new_object);
goto retry;
}
#if VM_NRESERVLEVEL > 0
@@ -1384,14 +1385,14 @@ retry:
vm_page_cache_transfer(orig_object, offidxstart,
new_object);
}
- VM_OBJECT_UNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(orig_object);
TAILQ_FOREACH(m, &new_object->memq, listq)
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(new_object);
entry->object.vm_object = new_object;
entry->offset = 0LL;
vm_object_deallocate(orig_object);
- VM_OBJECT_LOCK(new_object);
+ VM_OBJECT_WLOCK(new_object);
}
#define OBSC_TEST_ALL_SHADOWED 0x0001
@@ -1406,8 +1407,8 @@ vm_object_backing_scan(vm_object_t object, int op)
vm_object_t backing_object;
vm_pindex_t backing_offset_index;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
backing_object = object->backing_object;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
@@ -1495,12 +1496,12 @@ vm_object_backing_scan(vm_object_t object, int op)
}
} else if (op & OBSC_COLLAPSE_WAIT) {
if ((p->oflags & VPO_BUSY) || p->busy) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
p->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(backing_object, p,
PDROP | PVM, "vmocol", 0);
- VM_OBJECT_LOCK(object);
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(object);
+ VM_OBJECT_WLOCK(backing_object);
/*
* If we slept, anything could have
* happened. Since the object is
@@ -1627,8 +1628,8 @@ vm_object_qcollapse(vm_object_t object)
{
vm_object_t backing_object = object->backing_object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ VM_OBJECT_ASSERT_WLOCKED(backing_object);
if (backing_object->ref_count != 1)
return;
@@ -1646,7 +1647,7 @@ vm_object_qcollapse(vm_object_t object)
void
vm_object_collapse(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
while (TRUE) {
vm_object_t backing_object;
@@ -1663,7 +1664,7 @@ vm_object_collapse(vm_object_t object)
* we check the backing object first, because it is most likely
* not collapsable.
*/
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
if (backing_object->handle != NULL ||
(backing_object->type != OBJT_DEFAULT &&
backing_object->type != OBJT_SWAP) ||
@@ -1672,7 +1673,7 @@ vm_object_collapse(vm_object_t object)
(object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP) ||
(object->flags & OBJ_DEAD)) {
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
@@ -1681,7 +1682,7 @@ vm_object_collapse(vm_object_t object)
backing_object->paging_in_progress != 0
) {
vm_object_qcollapse(object);
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
/*
@@ -1742,7 +1743,7 @@ vm_object_collapse(vm_object_t object)
LIST_REMOVE(object, shadow_list);
backing_object->shadow_count--;
if (backing_object->backing_object) {
- VM_OBJECT_LOCK(backing_object->backing_object);
+ VM_OBJECT_WLOCK(backing_object->backing_object);
LIST_REMOVE(backing_object, shadow_list);
LIST_INSERT_HEAD(
&backing_object->backing_object->shadow_head,
@@ -1750,7 +1751,7 @@ vm_object_collapse(vm_object_t object)
/*
* The shadow_count has not changed.
*/
- VM_OBJECT_UNLOCK(backing_object->backing_object);
+ VM_OBJECT_WUNLOCK(backing_object->backing_object);
}
object->backing_object = backing_object->backing_object;
object->backing_object_offset +=
@@ -1766,7 +1767,7 @@ vm_object_collapse(vm_object_t object)
KASSERT(backing_object->ref_count == 1, (
"backing_object %p was somehow re-referenced during collapse!",
backing_object));
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
vm_object_destroy(backing_object);
object_collapses++;
@@ -1780,7 +1781,7 @@ vm_object_collapse(vm_object_t object)
if (object->resident_page_count != object->size &&
vm_object_backing_scan(object,
OBSC_TEST_ALL_SHADOWED) == 0) {
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
@@ -1794,7 +1795,7 @@ vm_object_collapse(vm_object_t object)
new_backing_object = backing_object->backing_object;
if ((object->backing_object = new_backing_object) != NULL) {
- VM_OBJECT_LOCK(new_backing_object);
+ VM_OBJECT_WLOCK(new_backing_object);
LIST_INSERT_HEAD(
&new_backing_object->shadow_head,
object,
@@ -1802,7 +1803,7 @@ vm_object_collapse(vm_object_t object)
);
new_backing_object->shadow_count++;
vm_object_reference_locked(new_backing_object);
- VM_OBJECT_UNLOCK(new_backing_object);
+ VM_OBJECT_WUNLOCK(new_backing_object);
object->backing_object_offset +=
backing_object->backing_object_offset;
}
@@ -1812,7 +1813,7 @@ vm_object_collapse(vm_object_t object)
* its ref_count was at least 2, it will not vanish.
*/
backing_object->ref_count--;
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
object_bypasses++;
}
@@ -1855,7 +1856,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
vm_page_t p, next;
int wirings;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
("vm_object_page_remove: illegal options for object %p", object));
@@ -1950,7 +1951,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
struct mtx *mtx, *new_mtx;
vm_page_t p, next;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
("vm_object_page_cache: illegal object %p", object));
if (object->resident_page_count == 0)
@@ -1998,7 +1999,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
vm_pindex_t pindex;
int rv;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (pindex = start; pindex < end; pindex++) {
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
VM_ALLOC_RETRY);
@@ -2059,10 +2060,10 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (prev_object == NULL)
return (TRUE);
- VM_OBJECT_LOCK(prev_object);
+ VM_OBJECT_WLOCK(prev_object);
if (prev_object->type != OBJT_DEFAULT &&
prev_object->type != OBJT_SWAP) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2077,7 +2078,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
* pages not mapped to prev_entry may be in use anyway)
*/
if (prev_object->backing_object != NULL) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2087,7 +2088,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if ((prev_object->ref_count > 1) &&
(prev_object->size != next_pindex)) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2141,7 +2142,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (next_pindex + next_size > prev_object->size)
prev_object->size = next_pindex + next_size;
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (TRUE);
}
@@ -2149,7 +2150,7 @@ void
vm_object_set_writeable_dirty(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_VNODE)
return;
object->generation++;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index d69e679..7598ea3 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -70,6 +70,7 @@
#include <sys/queue.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
+#include <sys/_rwlock.h>
#include <vm/_vm_radix.h>
@@ -78,9 +79,9 @@
*
* vm_object_t Virtual memory object.
*
- * The root of cached pages pool is protected by both the per-object mutex
+ * The root of cached pages pool is protected by both the per-object lock
* and the free pages queue mutex.
- * On insert in the cache radix trie, the per-object mutex is expected
+ * On insert in the cache radix trie, the per-object lock is expected
* to be already held and the free pages queue mutex will be
* acquired during the operation too.
* On remove and lookup from the cache radix trie, only the free
@@ -91,13 +92,13 @@
*
* List of locks
* (c) const until freed
- * (o) per-object mutex
+ * (o) per-object lock
* (f) free pages queue mutex
*
*/
struct vm_object {
- struct mtx mtx;
+ struct rwlock lock;
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
@@ -205,14 +206,26 @@ extern struct vm_object kmem_object_store;
#define kernel_object (&kernel_object_store)
#define kmem_object (&kmem_object_store)
-#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx)
-#define VM_OBJECT_LOCK_ASSERT(object, type) \
- mtx_assert(&(object)->mtx, (type))
-#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
- msleep((wchan), &(object)->mtx, (pri), \
- (wmesg), (timo))
-#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx)
-#define VM_OBJECT_UNLOCK(object) mtx_unlock(&(object)->mtx)
+#define VM_OBJECT_ASSERT_LOCKED(object) \
+ rw_assert(&(object)->lock, RA_LOCKED)
+#define VM_OBJECT_ASSERT_RLOCKED(object) \
+ rw_assert(&(object)->lock, RA_RLOCKED)
+#define VM_OBJECT_ASSERT_WLOCKED(object) \
+ rw_assert(&(object)->lock, RA_WLOCKED)
+#define VM_OBJECT_RLOCK(object) \
+ rw_rlock(&(object)->lock)
+#define VM_OBJECT_RUNLOCK(object) \
+ rw_runlock(&(object)->lock)
+#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
+ rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo))
+#define VM_OBJECT_TRYRLOCK(object) \
+ rw_try_rlock(&(object)->lock)
+#define VM_OBJECT_TRYWLOCK(object) \
+ rw_try_wlock(&(object)->lock)
+#define VM_OBJECT_WLOCK(object) \
+ rw_wlock(&(object)->lock)
+#define VM_OBJECT_WUNLOCK(object) \
+ rw_wunlock(&(object)->lock)
/*
* The object must be locked or thread private.
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 47ffc31..e51a28d 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -96,6 +96,7 @@ __FBSDID("$FreeBSD$");
#include <sys/msgbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -469,7 +470,7 @@ void
vm_page_busy(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("vm_page_busy: page already busy!!!"));
m->oflags |= VPO_BUSY;
@@ -484,7 +485,7 @@ void
vm_page_flash(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->oflags & VPO_WANTED) {
m->oflags &= ~VPO_WANTED;
wakeup(m);
@@ -502,7 +503,7 @@ void
vm_page_wakeup(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
m->oflags &= ~VPO_BUSY;
vm_page_flash(m);
@@ -512,7 +513,7 @@ void
vm_page_io_start(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
m->busy++;
}
@@ -520,7 +521,7 @@ void
vm_page_io_finish(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m));
m->busy--;
if (m->busy == 0)
@@ -752,7 +753,7 @@ void
vm_page_sleep(vm_page_t m, const char *msg)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (mtx_owned(vm_page_lockptr(m)))
vm_page_unlock(m);
@@ -810,7 +811,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
{
vm_page_t neighbor;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (m->object != NULL)
panic("vm_page_insert: page already inserted");
@@ -876,7 +877,7 @@ vm_page_remove(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if ((object = m->object) == NULL)
return;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (m->oflags & VPO_BUSY) {
m->oflags &= ~VPO_BUSY;
vm_page_flash(m);
@@ -914,7 +915,7 @@ vm_page_t
vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
return (vm_radix_lookup(&object->rtree, pindex));
}
@@ -931,7 +932,7 @@ vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
{
vm_page_t m;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
m = vm_radix_lookup_ge(&object->rtree, pindex);
return (m);
@@ -948,7 +949,7 @@ vm_page_next(vm_page_t m)
{
vm_page_t next;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((next = TAILQ_NEXT(m, listq)) != NULL &&
next->pindex != m->pindex + 1)
next = NULL;
@@ -966,7 +967,7 @@ vm_page_prev(vm_page_t m)
{
vm_page_t prev;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
prev->pindex != m->pindex - 1)
prev = NULL;
@@ -1094,7 +1095,7 @@ vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
* requires the object to be locked. In contrast, removal does
* not.
*/
- VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(new_object);
KASSERT(vm_object_cache_is_empty(new_object),
("vm_page_cache_transfer: object %p has cached pages",
new_object));
@@ -1135,7 +1136,7 @@ vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
* page queues lock in order to prove that the specified page doesn't
* exist.
*/
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (__predict_true(vm_object_cache_is_empty(object)))
return (FALSE);
mtx_lock(&vm_page_queue_free_mtx);
@@ -1184,7 +1185,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
("vm_page_alloc: inconsistent object/req"));
if (object != NULL)
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
req_class = req & VM_ALLOC_CLASS_MASK;
@@ -1392,7 +1393,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
("vm_page_alloc_contig: inconsistent object/req"));
if (object != NULL) {
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_PHYS,
("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
object));
@@ -1803,7 +1804,7 @@ vm_page_activate(vm_page_t m)
int queue;
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((queue = m->queue) != PQ_ACTIVE) {
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@@ -2087,7 +2088,7 @@ vm_page_try_to_cache(vm_page_t m)
{
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
@@ -2110,7 +2111,7 @@ vm_page_try_to_free(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if (m->object != NULL)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
@@ -2136,7 +2137,7 @@ vm_page_cache(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
m->hold_count || m->wire_count)
panic("vm_page_cache: attempting to cache busy page");
@@ -2241,7 +2242,7 @@ vm_page_dontneed(vm_page_t m)
int head;
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
dnw = PCPU_GET(dnweight);
PCPU_INC(dnweight);
@@ -2306,7 +2307,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
{
vm_page_t m;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
("vm_page_grab: VM_ALLOC_RETRY is required"));
retrylookup:
@@ -2335,9 +2336,9 @@ retrylookup:
m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
VM_ALLOC_IGN_SBUSY));
if (m == NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VM_WAIT;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retrylookup;
} else if (m->valid != 0)
return (m);
@@ -2387,7 +2388,7 @@ vm_page_set_valid_range(vm_page_t m, int base, int size)
{
int endoff, frag;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (size == 0) /* handle degenerate case */
return;
@@ -2440,7 +2441,7 @@ vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
* write mapped, then the page's dirty field cannot possibly be
* set by a concurrent pmap operation.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
m->dirty &= ~pagebits;
else {
@@ -2494,7 +2495,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
vm_page_bits_t oldvalid, pagebits;
int endoff, frag;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (size == 0) /* handle degenerate case */
return;
@@ -2584,7 +2585,7 @@ vm_page_set_invalid(vm_page_t m, int base, int size)
{
vm_page_bits_t bits;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("vm_page_set_invalid: page %p is busy", m));
bits = vm_page_bits(base, size);
@@ -2613,7 +2614,7 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
int b;
int i;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
/*
* Scan the valid bits looking for invalid sections that
* must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
@@ -2652,7 +2653,7 @@ vm_page_is_valid(vm_page_t m, int base, int size)
{
vm_page_bits_t bits;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
bits = vm_page_bits(base, size);
if (m->valid && ((m->valid & bits) == bits))
return 1;
@@ -2667,7 +2668,7 @@ void
vm_page_test_dirty(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
vm_page_dirty(m);
}
@@ -2721,7 +2722,7 @@ vm_page_cowfault(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->paging_in_progress != 0,
("vm_page_cowfault: object %p's paging-in-progress count is zero.",
object));
@@ -2734,9 +2735,9 @@ vm_page_cowfault(vm_page_t m)
if (mnew == NULL) {
vm_page_insert(m, object, pindex);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VM_WAIT;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (m == vm_page_lookup(object, pindex)) {
vm_page_lock(m);
goto retry_alloc;
@@ -2793,11 +2794,11 @@ vm_page_cowsetup(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->oflags & VPO_UNMANAGED) != 0 ||
- m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
+ m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYWLOCK(m->object))
return (EBUSY);
m->cow++;
pmap_remove_write(m);
- VM_OBJECT_UNLOCK(m->object);
+ VM_OBJECT_WUNLOCK(m->object);
return (0);
}
@@ -2814,7 +2815,7 @@ vm_page_object_lock_assert(vm_page_t m)
* here.
*/
if (m->object != NULL && (m->oflags & VPO_BUSY) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
}
#endif
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index ac593a4..c0a0da4 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -92,6 +92,7 @@ __FBSDID("$FreeBSD$");
#include <sys/signalvar.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
@@ -248,7 +249,7 @@ vm_pageout_init_marker(vm_page_t marker, u_short queue)
/*
* vm_pageout_fallback_object_lock:
*
- * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is
+ * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
* known to have failed and page queue must be either PQ_ACTIVE or
* PQ_INACTIVE. To avoid lock order violation, unlock the page queues
* while locking the vm object. Use marker page to detect page queue
@@ -276,7 +277,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
vm_pagequeue_unlock(pq);
vm_page_unlock(m);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_lock(m);
vm_pagequeue_lock(pq);
@@ -346,7 +347,7 @@ vm_pageout_clean(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
@@ -484,7 +485,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
int numpagedout = 0;
int i, runlen;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Initiate I/O. Bump the vm_page_t->busy counter and
@@ -595,12 +596,12 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
continue;
}
object = m->object;
- if ((!VM_OBJECT_TRYLOCK(object) &&
+ if ((!VM_OBJECT_TRYWLOCK(object) &&
(!vm_pageout_fallback_object_lock(m, &next) ||
m->hold_count != 0)) || (m->oflags & VPO_BUSY) != 0 ||
m->busy != 0) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
vm_page_test_dirty(m);
@@ -609,19 +610,19 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
if (m->dirty != 0) {
vm_page_unlock(m);
if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
if (object->type == OBJT_VNODE) {
vm_pagequeue_unlock(pq);
vp = object->handle;
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
(void)vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
vm_object_deallocate(object);
vn_finished_write(mp);
@@ -632,7 +633,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
m_tmp = m;
vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
0, NULL, NULL);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (TRUE);
}
} else {
@@ -644,7 +645,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
vm_page_cache(m);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
vm_pagequeue_unlock(pq);
return (FALSE);
@@ -713,13 +714,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
vm_page_t p;
int actcount, remove_mode;
- VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(first_object);
if ((first_object->flags & OBJ_FICTITIOUS) != 0)
return;
for (object = first_object;; object = backing_object) {
if (pmap_resident_count(pmap) <= desired)
goto unlock_return;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((object->flags & OBJ_UNMANAGED) != 0 ||
object->paging_in_progress != 0)
goto unlock_return;
@@ -775,13 +776,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
}
if ((backing_object = object->backing_object) == NULL)
goto unlock_return;
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
if (object != first_object)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
unlock_return:
if (object != first_object)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -811,15 +812,15 @@ vm_pageout_map_deactivate_pages(map, desired)
while (tmpe != &map->header) {
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
- if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
+ if (obj != NULL && VM_OBJECT_TRYWLOCK(obj)) {
if (obj->shadow_count <= 1 &&
(bigobj == NULL ||
bigobj->resident_page_count < obj->resident_page_count)) {
if (bigobj != NULL)
- VM_OBJECT_UNLOCK(bigobj);
+ VM_OBJECT_WUNLOCK(bigobj);
bigobj = obj;
} else
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
}
if (tmpe->wired_count > 0)
@@ -829,7 +830,7 @@ vm_pageout_map_deactivate_pages(map, desired)
if (bigobj != NULL) {
vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
- VM_OBJECT_UNLOCK(bigobj);
+ VM_OBJECT_WUNLOCK(bigobj);
}
/*
* Next, hunt around for other pages to deactivate. We actually
@@ -842,9 +843,9 @@ vm_pageout_map_deactivate_pages(map, desired)
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
}
tmpe = tmpe->next;
@@ -963,10 +964,10 @@ vm_pageout_scan(int pass)
continue;
}
object = m->object;
- if (!VM_OBJECT_TRYLOCK(object) &&
+ if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
@@ -979,7 +980,7 @@ vm_pageout_scan(int pass)
*/
if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
addl_page_shortage++;
continue;
}
@@ -1016,7 +1017,7 @@ vm_pageout_scan(int pass)
vm_page_activate(m);
vm_page_unlock(m);
m->act_count += actcount + ACT_ADVANCE;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
goto relock_queues;
}
@@ -1032,13 +1033,13 @@ vm_pageout_scan(int pass)
vm_page_activate(m);
vm_page_unlock(m);
m->act_count += actcount + ACT_ADVANCE + 1;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
goto relock_queues;
}
if (m->hold_count != 0) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* Held pages are essentially stuck in the
@@ -1122,7 +1123,7 @@ vm_pageout_scan(int pass)
if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
vm_pagequeue_lock(pq);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
queues_locked = TRUE;
vm_page_requeue_locked(m);
goto relock_queues;
@@ -1165,17 +1166,17 @@ vm_pageout_scan(int pass)
KASSERT(mp != NULL,
("vp %p with NULL v_mount", vp));
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
curthread)) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
++pageout_lock_miss;
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
vp = NULL;
goto unlock_and_continue;
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_lock(m);
vm_pagequeue_lock(pq);
queues_locked = TRUE;
@@ -1236,7 +1237,7 @@ vm_pageout_scan(int pass)
}
unlock_and_continue:
vm_page_lock_assert(m, MA_NOTOWNED);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (mp != NULL) {
if (queues_locked) {
vm_pagequeue_unlock(pq);
@@ -1251,7 +1252,7 @@ unlock_and_continue:
goto relock_queues;
}
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
relock_queues:
if (!queues_locked) {
vm_pagequeue_lock(pq);
@@ -1299,9 +1300,9 @@ relock_queues:
continue;
}
object = m->object;
- if (!VM_OBJECT_TRYLOCK(object) &&
+ if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_page_unlock(m);
m = next;
continue;
@@ -1314,7 +1315,7 @@ relock_queues:
(m->oflags & VPO_BUSY) ||
(m->hold_count != 0)) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_page_requeue_locked(m);
m = next;
continue;
@@ -1375,7 +1376,7 @@ relock_queues:
vm_page_requeue_locked(m);
}
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
m = next;
}
vm_pagequeue_unlock(pq);
@@ -1571,9 +1572,9 @@ vm_pageout_page_stats(void)
continue;
}
object = m->object;
- if (!VM_OBJECT_TRYLOCK(object) &&
+ if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_page_unlock(m);
m = next;
continue;
@@ -1586,7 +1587,7 @@ vm_pageout_page_stats(void)
(m->oflags & VPO_BUSY) ||
(m->hold_count != 0)) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_page_requeue_locked(m);
m = next;
continue;
@@ -1625,7 +1626,7 @@ vm_pageout_page_stats(void)
}
}
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
m = next;
}
vm_pagequeue_unlock(pq);
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 6ed64ea..a991e41 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -74,6 +74,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/ucred.h>
#include <sys/malloc.h>
+#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -248,7 +249,7 @@ vm_pager_deallocate(object)
vm_object_t object;
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
(*pagertab[object->type]->pgo_dealloc) (object);
}
@@ -272,13 +273,13 @@ vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
TAILQ_FOREACH(object, pg_list, pager_object_list) {
if (object->handle == handle) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if ((object->flags & OBJ_DEAD) == 0) {
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
break;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
}
return (object);
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index bb7a5ec..b5d923c 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -124,7 +124,7 @@ vm_pager_get_pages(
) {
int r;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
vm_page_zero_invalid(m[reqpage], TRUE);
@@ -141,7 +141,7 @@ vm_pager_put_pages(
int *rtvals
) {
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
(*pagertab[object->type]->pgo_putpages)
(object, m, count, flags, rtvals);
}
@@ -165,7 +165,7 @@ vm_pager_has_page(
) {
boolean_t ret;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
ret = (*pagertab[object->type]->pgo_haspage)
(object, offset, before, after);
return (ret);
@@ -188,7 +188,7 @@ static __inline void
vm_pager_page_unswapped(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (pagertab[m->object->type]->pgo_pageunswapped)
(*pagertab[m->object->type]->pgo_pageunswapped)(m);
}
diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c
index eb14411..bb071bd 100644
--- a/sys/vm/vm_reserv.c
+++ b/sys/vm/vm_reserv.c
@@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -312,7 +313,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
int i, index, n;
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
/*
@@ -485,7 +486,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
vm_reserv_t rv;
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Is a reservation fundamentally impossible?
@@ -849,7 +850,7 @@ vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
{
vm_reserv_t rv;
- VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(new_object);
rv = vm_reserv_from_page(m);
if (rv->object == old_object) {
mtx_lock(&vm_page_queue_free_mtx);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 4c678f4..5e331ee 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <sys/vmmeter.h>
#include <sys/limits.h>
#include <sys/conf.h>
+#include <sys/rwlock.h>
#include <sys/sf_buf.h>
#include <machine/atomic.h>
@@ -109,9 +110,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
return (0);
while ((object = vp->v_object) != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (!(object->flags & OBJ_DEAD)) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
VOP_UNLOCK(vp, 0);
@@ -135,9 +136,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->ref_count--;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vrele(vp);
KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
@@ -154,7 +155,7 @@ vnode_destroy_vobject(struct vnode *vp)
if (obj == NULL)
return;
ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->ref_count == 0) {
/*
* vclean() may be called twice. The first time
@@ -167,13 +168,13 @@ vnode_destroy_vobject(struct vnode *vp)
if ((obj->flags & OBJ_DEAD) == 0)
vm_object_terminate(obj);
else
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
} else {
/*
* Woe to the process that tries to page now :-).
*/
vm_pager_deallocate(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
vp->v_object = NULL;
}
@@ -206,7 +207,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
retry:
while ((object = vp->v_object) != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if ((object->flags & OBJ_DEAD) == 0)
break;
vm_object_set_flag(object, OBJ_DISCONNECTWNT);
@@ -239,7 +240,7 @@ retry:
VI_UNLOCK(vp);
} else {
object->ref_count++;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
vref(vp);
return (object);
@@ -259,7 +260,7 @@ vnode_pager_dealloc(object)
if (vp == NULL)
panic("vnode_pager_dealloc: pager already dealloced");
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
vm_object_pip_wait(object, "vnpdea");
refs = object->ref_count;
@@ -278,10 +279,10 @@ vnode_pager_dealloc(object)
}
vp->v_object = NULL;
VOP_UNSET_TEXT(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
while (refs-- > 0)
vunref(vp);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
static boolean_t
@@ -299,7 +300,7 @@ vnode_pager_haspage(object, pindex, before, after)
int bsize;
int pagesperblock, blocksperpage;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
@@ -322,9 +323,9 @@ vnode_pager_haspage(object, pindex, before, after)
blocksperpage = (PAGE_SIZE / bsize);
reqblock = pindex * blocksperpage;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (err)
return TRUE;
if (bn == -1)
@@ -379,12 +380,12 @@ vnode_pager_setsize(vp, nsize)
if ((object = vp->v_object) == NULL)
return;
/* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (nsize == object->un_pager.vnp.vnp_size) {
/*
* Hasn't changed size
*/
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
@@ -445,7 +446,7 @@ vnode_pager_setsize(vp, nsize)
}
object->un_pager.vnp.vnp_size = nsize;
object->size = nobjsize;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -568,9 +569,9 @@ vnode_pager_input_smlfs(object, m)
bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
KASSERT((m->dirty & bits) == 0,
("vnode_pager_input_smlfs: page %p is dirty", m));
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
m->valid |= bits;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
sf_buf_free(sf);
if (error) {
@@ -594,7 +595,7 @@ vnode_pager_input_old(object, m)
struct sf_buf *sf;
struct vnode *vp;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
error = 0;
/*
@@ -607,7 +608,7 @@ vnode_pager_input_old(object, m)
if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* Allocate a kernel virtual address and initialize so that
@@ -637,7 +638,7 @@ vnode_pager_input_old(object, m)
}
sf_buf_free(sf);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
if (!error)
@@ -669,11 +670,11 @@ vnode_pager_getpages(object, m, count, reqpage)
int bytes = count * PAGE_SIZE;
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: FS getpages not implemented\n"));
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
return rtval;
}
@@ -723,7 +724,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL);
if (error == EOPNOTSUPP) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
@@ -734,17 +735,17 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
error = vnode_pager_input_old(object, m[reqpage]);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (error);
} else if (error != 0) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
/*
@@ -754,14 +755,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
} else if ((PAGE_SIZE / bsize) > 1 &&
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
return vnode_pager_input_smlfs(object, m[reqpage]);
@@ -772,7 +773,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* clean up and return. Otherwise we have to re-read the
* media.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
for (i = 0; i < count; i++)
if (i != reqpage) {
@@ -780,7 +781,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return VM_PAGER_OK;
} else if (reqblock == -1) {
pmap_zero_page(m[reqpage]);
@@ -793,11 +794,11 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_OK);
}
m[reqpage]->valid = 0;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* here on direct device I/O
@@ -810,18 +811,18 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (first = 0, i = 0; i < count; i = runend) {
if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
&runpg) != 0) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
if (firstaddr == -1) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
(intmax_t)firstaddr, (uintmax_t)(foff >> 32),
@@ -833,29 +834,29 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
runend = i + 1;
first = runend;
continue;
}
runend = i + runpg;
if (runend <= reqpage) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (j = i; j < runend; j++) {
vm_page_lock(m[j]);
vm_page_free(m[j]);
vm_page_unlock(m[j]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
} else {
if (runpg < (count - first)) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = first + runpg; i < count; i++) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
count = first + runpg;
}
break;
@@ -946,7 +947,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
pbrelbo(bp);
relpbuf(bp, &vnode_pbuf_freecnt);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
vm_page_t mt;
@@ -983,7 +984,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (i != reqpage)
vm_page_readahead_finish(mt);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (error) {
printf("vnode_pager_getpages: I/O read error\n");
}
@@ -1029,11 +1030,11 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
* Call device-specific putpages function
*/
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: stale FS putpages\n"));
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
@@ -1095,7 +1096,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
* We do not under any circumstances truncate the valid bits, as
* this will screw up bogus page replacement.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > poffset) {
int pgoff;
@@ -1127,7 +1128,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
}
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* pageouts are already clustered, use IO_ASYNC to force a bawrite()
@@ -1181,7 +1182,7 @@ vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
if (written == 0)
return;
obj = ma[0]->object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
if (pos < trunc_page(written)) {
rtvals[i] = VM_PAGER_OK;
@@ -1192,7 +1193,7 @@ vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
}
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
void
@@ -1202,9 +1203,9 @@ vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
struct vnode *vp;
vm_ooffset_t old_wm;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type != OBJT_VNODE) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
old_wm = object->un_pager.vnp.writemappings;
@@ -1221,7 +1222,7 @@ vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
__func__, vp, vp->v_writecount);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
void
@@ -1232,14 +1233,14 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
struct mount *mp;
vm_offset_t inc;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
/*
* First, recheck the object type to account for the race when
* the vnode is reclaimed.
*/
if (object->type != OBJT_VNODE) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
@@ -1250,13 +1251,13 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
inc = end - start;
if (object->un_pager.vnp.writemappings != inc) {
object->un_pager.vnp.writemappings -= inc;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
vp = object->handle;
vhold(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
mp = NULL;
vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
OpenPOWER on IntegriCloud