summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
committerdillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
commite028603b7e3e4fb35cdf00aab533f3965f4a13cc (patch)
tree7420cce169451a74c5b87963467a4aeff668ed12 /sys/vm
parent0b028660051eb7abf4306d34e7fec0e7fde86a28 (diff)
downloadFreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.zip
FreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.tar.gz
With Alfred's permission, remove vm_mtx in favor of a fine-grained approach
(this commit is just the first stage). Also add various GIANT_ macros to formalize the removal of Giant, making it easy to test in a more piecemeal fashion. These macros will allow us to test fine-grained locks to a degree before removing Giant, and also after, and to remove Giant in a piecemeal fashion via sysctl's on those subsystems which the authors believe can operate without Giant.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/default_pager.c1
-rw-r--r--sys/vm/device_pager.c1
-rw-r--r--sys/vm/phys_pager.c5
-rw-r--r--sys/vm/swap_pager.c100
-rw-r--r--sys/vm/vm.h4
-rw-r--r--sys/vm/vm_fault.c49
-rw-r--r--sys/vm/vm_glue.c45
-rw-r--r--sys/vm/vm_init.c6
-rw-r--r--sys/vm/vm_kern.c70
-rw-r--r--sys/vm/vm_map.c113
-rw-r--r--sys/vm/vm_map.h16
-rw-r--r--sys/vm/vm_meter.c7
-rw-r--r--sys/vm/vm_mmap.c86
-rw-r--r--sys/vm/vm_object.c71
-rw-r--r--sys/vm/vm_object.h48
-rw-r--r--sys/vm/vm_page.c77
-rw-r--r--sys/vm/vm_page.h36
-rw-r--r--sys/vm/vm_pageout.c46
-rw-r--r--sys/vm/vm_pager.c20
-rw-r--r--sys/vm/vm_pager.h15
-rw-r--r--sys/vm/vm_unix.c46
-rw-r--r--sys/vm/vm_zone.c23
-rw-r--r--sys/vm/vnode_pager.c67
23 files changed, 294 insertions, 658 deletions
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c
index 0fb4896..21a3b7c 100644
--- a/sys/vm/default_pager.c
+++ b/sys/vm/default_pager.c
@@ -42,6 +42,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <vm/vm.h>
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index b7c35af..af52cd9 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -43,6 +43,7 @@
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/sx.h>
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index b3355c7..8ac32d0 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -31,6 +31,7 @@
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/sysctl.h>
@@ -62,6 +63,8 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
{
vm_object_t object;
+ GIANT_REQUIRED;
+
/*
* Offset should be page aligned.
*/
@@ -76,7 +79,7 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
while (phys_pager_alloc_lock) {
phys_pager_alloc_lock = -1;
- msleep(&phys_pager_alloc_lock, &vm_mtx, PVM, "swpalc", 0);
+ tsleep(&phys_pager_alloc_lock, PVM, "swpalc", 0);
}
phys_pager_alloc_lock = 1;
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index edbfa8e..53b78de 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -80,6 +80,7 @@
#include <sys/sysctl.h>
#include <sys/blist.h>
#include <sys/lock.h>
+#include <sys/sx.h>
#include <sys/vmmeter.h>
#ifndef MAX_PAGEOUT_CLUSTER
@@ -118,12 +119,12 @@ static int nsw_wcount_sync; /* limit write buffers / synchronous */
static int nsw_wcount_async; /* limit write buffers / asynchronous */
static int nsw_wcount_async_max;/* assigned maximum */
static int nsw_cluster_max; /* maximum VOP I/O allowed */
-static int sw_alloc_interlock; /* swap pager allocation interlock */
struct blist *swapblist;
static struct swblock **swhash;
static int swhash_mask;
static int swap_async_max = 4; /* maximum in-progress async I/O's */
+static struct sx sw_alloc_sx;
/* from vm_swap.c */
extern struct vnode *swapdev_vp;
@@ -232,8 +233,8 @@ static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
static __inline void
swp_sizecheck()
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
if (vm_swap_size < nswap_lowat) {
if (swap_pager_almost_full == 0) {
printf("swap_pager: out of swap space\n");
@@ -383,7 +384,8 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
{
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (handle) {
/*
* Reference existing named region or allocate new one. There
@@ -391,13 +393,7 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
* as called from vm_page_remove() in regards to the lookup
* of the handle.
*/
-
- while (sw_alloc_interlock) {
- sw_alloc_interlock = -1;
- msleep(&sw_alloc_interlock, &vm_mtx, PVM, "swpalc", 0);
- }
- sw_alloc_interlock = 1;
-
+ sx_xlock(&sw_alloc_sx);
object = vm_pager_object_lookup(NOBJLIST(handle), handle);
if (object != NULL) {
@@ -409,10 +405,7 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
}
-
- if (sw_alloc_interlock == -1)
- wakeup(&sw_alloc_interlock);
- sw_alloc_interlock = 0;
+ sx_xunlock(&sw_alloc_sx);
} else {
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(offset + PAGE_MASK + size));
@@ -442,12 +435,12 @@ swap_pager_dealloc(object)
{
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Remove from list right away so lookups will fail if we block for
* pageout completion.
*/
-
mtx_lock(&sw_alloc_mtx);
if (object->handle == NULL) {
TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
@@ -488,7 +481,6 @@ swap_pager_dealloc(object)
*
* This routine may not block
* This routine must be called at splvm().
- * vm_mtx should be held
*/
static __inline daddr_t
@@ -497,7 +489,8 @@ swp_pager_getswapspace(npages)
{
daddr_t blk;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
if (swap_pager_full != 2) {
printf("swap_pager_getswapspace: failed\n");
@@ -526,7 +519,6 @@ swp_pager_getswapspace(npages)
*
* This routine may not block
* This routine must be called at splvm().
- * vm_mtx should be held
*/
static __inline void
@@ -534,8 +526,8 @@ swp_pager_freeswapspace(blk, npages)
daddr_t blk;
int npages;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
blist_free(swapblist, blk, npages);
vm_swap_size += npages;
/* per-swap area stats */
@@ -566,9 +558,8 @@ swap_pager_freespace(object, start, size)
vm_size_t size;
{
int s = splvm();
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
swp_pager_meta_free(object, start, size);
splx(s);
}
@@ -651,10 +642,9 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
vm_pindex_t i;
int s;
- s = splvm();
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+ s = splvm();
/*
* If destroysource is set, we remove the source object from the
* swap_pager internal queue now.
@@ -871,8 +861,8 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
char *data;
struct buf *nbp = NULL;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
/* XXX: KASSERT instead ? */
if (bp->bio_bcount & PAGE_MASK) {
biofinish(bp, NULL, EINVAL);
@@ -903,9 +893,7 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
* FREE PAGE(s) - destroy underlying swap that is no longer
* needed.
*/
- mtx_lock(&vm_mtx);
swp_pager_meta_free(object, start, count);
- mtx_unlock(&vm_mtx);
splx(s);
bp->bio_resid = 0;
biodone(bp);
@@ -915,8 +903,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
/*
* Execute read or write
*/
-
- mtx_lock(&vm_mtx);
while (count > 0) {
daddr_t blk;
@@ -959,9 +945,7 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
cnt.v_swappgsout += btoc(nbp->b_bcount);
nbp->b_dirtyend = nbp->b_bcount;
}
- mtx_unlock(&vm_mtx);
flushchainbuf(nbp);
- mtx_lock(&vm_mtx);
s = splvm();
nbp = NULL;
}
@@ -981,9 +965,7 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
bp->bio_resid -= PAGE_SIZE;
} else {
if (nbp == NULL) {
- mtx_unlock(&vm_mtx);
nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
- mtx_lock(&vm_mtx);
nbp->b_blkno = blk;
nbp->b_bcount = 0;
nbp->b_data = data;
@@ -1010,11 +992,9 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
cnt.v_swappgsout += btoc(nbp->b_bcount);
nbp->b_dirtyend = nbp->b_bcount;
}
- mtx_unlock(&vm_mtx);
flushchainbuf(nbp);
/* nbp = NULL; */
- } else
- mtx_unlock(&vm_mtx);
+ }
/*
* Wait for completion.
*/
@@ -1057,7 +1037,8 @@ swap_pager_getpages(object, m, count, reqpage)
vm_offset_t kva;
vm_pindex_t lastpindex;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
mreq = m[reqpage];
if (mreq->object != object) {
@@ -1185,10 +1166,8 @@ swap_pager_getpages(object, m, count, reqpage)
*
* NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
*/
- mtx_unlock(&vm_mtx);
BUF_KERNPROC(bp);
BUF_STRATEGY(bp);
- mtx_lock(&vm_mtx);
/*
* wait for the page we want to complete. PG_SWAPINPROG is always
@@ -1201,7 +1180,7 @@ swap_pager_getpages(object, m, count, reqpage)
while ((mreq->flags & PG_SWAPINPROG) != 0) {
vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
cnt.v_intrans++;
- if (msleep(mreq, &vm_mtx, PSWP, "swread", hz*20)) {
+ if (tsleep(mreq, PSWP, "swread", hz*20)) {
printf(
"swap_pager: indefinite wait buffer: device:"
" %s, blkno: %ld, size: %ld\n",
@@ -1267,7 +1246,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
int i;
int n = 0;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
if (count && m[0]->object != object) {
panic("swap_pager_getpages: object mismatch %p/%p",
object,
@@ -1432,7 +1411,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
swapdev_vp->v_numoutput++;
splx(s);
- mtx_unlock(&vm_mtx);
/*
* asynchronous
@@ -1444,7 +1422,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
bp->b_iodone = swp_pager_async_iodone;
BUF_KERNPROC(bp);
BUF_STRATEGY(bp);
- mtx_lock(&vm_mtx);
for (j = 0; j < n; ++j)
rtvals[i+j] = VM_PAGER_PEND;
@@ -1482,8 +1459,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
*/
swp_pager_async_iodone(bp);
- mtx_lock(&vm_mtx);
-
splx(s);
}
}
@@ -1533,7 +1508,8 @@ swp_pager_async_iodone(bp)
int i;
vm_object_t object = NULL;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
bp->b_flags |= B_DONE;
/*
@@ -1562,7 +1538,6 @@ swp_pager_async_iodone(bp)
/*
* remove the mapping for kernel virtual
*/
- mtx_lock(&vm_mtx);
pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
/*
@@ -1697,7 +1672,6 @@ swp_pager_async_iodone(bp)
if (object)
vm_object_pip_wakeupn(object, bp->b_npages);
- mtx_unlock(&vm_mtx);
/*
* release the physical I/O buffer
*/
@@ -1771,8 +1745,6 @@ swp_pager_hash(vm_object_t object, vm_pindex_t index)
*
* This routine must be called at splvm(), except when used to convert
* an OBJT_DEFAULT object into an OBJT_SWAP object.
- *
- * Requires vm_mtx.
*/
static void
@@ -1784,7 +1756,7 @@ swp_pager_meta_build(
struct swblock *swap;
struct swblock **pswap;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Convert default object to swap object if necessary
*/
@@ -1871,15 +1843,13 @@ retry:
* out. This routine does *NOT* operate on swap metadata associated
* with resident pages.
*
- * vm_mtx must be held
* This routine must be called at splvm()
*/
static void
swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object->type != OBJT_SWAP)
return;
@@ -1920,7 +1890,6 @@ swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
* an object.
*
* This routine must be called at splvm()
- * Requires vm_mtx.
*/
static void
@@ -1928,7 +1897,7 @@ swp_pager_meta_free_all(vm_object_t object)
{
daddr_t index = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object->type != OBJT_SWAP)
return;
@@ -1978,7 +1947,6 @@ swp_pager_meta_free_all(vm_object_t object)
* busy page.
*
* This routine must be called at splvm().
- * Requires vm_mtx.
*
* SWM_FREE remove and free swap block from metadata
* SWM_POP remove from meta data but do not free.. pop it out
@@ -1994,7 +1962,7 @@ swp_pager_meta_ctl(
struct swblock *swap;
daddr_t r1;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* The meta data only exists of the object is OBJT_SWAP
* and even then might not be allocated yet.
@@ -2082,8 +2050,6 @@ vm_pager_chain_iodone(struct buf *nbp)
* Obtain a physical buffer and chain it to its parent buffer. When
* I/O completes, the parent buffer will be B_SIGNAL'd. Errors are
* automatically propagated to the parent
- *
- * vm_mtx can't be held
*/
struct buf *
@@ -2092,8 +2058,7 @@ getchainbuf(struct bio *bp, struct vnode *vp, int flags)
struct buf *nbp;
u_int *count;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
nbp = getpbuf(NULL);
count = (u_int *)&(bp->bio_caller1);
@@ -2120,9 +2085,7 @@ getchainbuf(struct bio *bp, struct vnode *vp, int flags)
void
flushchainbuf(struct buf *nbp)
{
-
- mtx_assert(&vm_mtx, MA_NOTOWNED);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
if (nbp->b_bcount) {
nbp->b_bufsize = nbp->b_bcount;
if (nbp->b_iocmd == BIO_WRITE)
@@ -2140,8 +2103,7 @@ waitchainbuf(struct bio *bp, int limit, int done)
int s;
u_int *count;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
count = (u_int *)&(bp->bio_caller1);
s = splbio();
while (*count > limit) {
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 5915b29..38f04ac 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -95,10 +95,6 @@ typedef struct vm_map *vm_map_t;
struct vm_object;
typedef struct vm_object *vm_object_t;
-#ifdef _KERNEL
-extern struct mtx vm_mtx;
-#endif
-
#ifndef _KERNEL
/*
* This is defined in <sys/types.h> for the kernel so that non-vm kernel
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index e263280..a25b805 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -136,9 +136,7 @@ unlock_map(struct faultstate *fs)
static void
_unlock_things(struct faultstate *fs, int dealloc)
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
vm_object_pip_wakeup(fs->object);
if (fs->object != fs->first_object) {
vm_page_free(fs->first_m);
@@ -150,13 +148,8 @@ _unlock_things(struct faultstate *fs, int dealloc)
}
unlock_map(fs);
if (fs->vp != NULL) {
- struct vnode *vp;
-
- vp = fs->vp;
+ vput(fs->vp);
fs->vp = NULL;
- mtx_unlock(&vm_mtx);
- vput(vp);
- mtx_lock(&vm_mtx);
}
}
@@ -189,37 +182,20 @@ _unlock_things(struct faultstate *fs, int dealloc)
*
*
* The map in question must be referenced, and remains so.
- * Caller may hold no locks except the vm_mtx which will be
- * locked if needed.
+ * Caller may hold no locks.
*/
static int vm_fault1 __P((vm_map_t, vm_offset_t, vm_prot_t, int));
-static int vm_faults_no_vm_mtx;
-SYSCTL_INT(_vm, OID_AUTO, vm_faults_no_vm_mtx, CTLFLAG_RW,
- &vm_faults_no_vm_mtx, 0, "");
-
-static int vm_faults_no_giant;
-SYSCTL_INT(_vm, OID_AUTO, vm_faults_no_giant, CTLFLAG_RW,
- &vm_faults_no_giant, 0, "");
-
int
vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags)
{
- int hadvmlock, hadgiant, ret;
+ int ret;
- hadvmlock = mtx_owned(&vm_mtx);
- hadgiant = mtx_owned(&Giant);
mtx_lock(&Giant);
- if (!hadvmlock) {
- mtx_lock(&vm_mtx);
- vm_faults_no_vm_mtx++;
- if (hadgiant == 0)
- vm_faults_no_giant++;
- }
+ /* GIANT_REQUIRED */
+
ret = vm_fault1(map, vaddr, fault_type, fault_flags);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
return (ret);
}
@@ -238,7 +214,8 @@ vm_fault1(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int faultcount;
struct faultstate fs;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
cnt.v_vm_faults++;
hardfault = 0;
@@ -296,9 +273,7 @@ RetryFault:;
vm_object_reference(fs.first_object);
vm_object_pip_add(fs.first_object, 1);
- mtx_unlock(&vm_mtx);
fs.vp = vnode_pager_lock(fs.first_object);
- mtx_lock(&vm_mtx);
if ((fault_type & VM_PROT_WRITE) &&
(fs.first_object->type == OBJT_VNODE)) {
vm_freeze_copyopts(fs.first_object,
@@ -770,9 +745,7 @@ readrest:
*/
if (fs.vp != NULL) {
- mtx_unlock(&vm_mtx);
vput(fs.vp);
- mtx_lock(&vm_mtx);
fs.vp = NULL;
}
@@ -989,7 +962,8 @@ vm_fault_user_wire(map, start, end)
register pmap_t pmap;
int rv;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
pmap = vm_map_pmap(map);
/*
@@ -1164,7 +1138,6 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* number of pages in marray
*
* This routine can't block.
- * vm_mtx must be held.
*/
static int
vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
@@ -1180,7 +1153,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t rtm;
int cbehind, cahead;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
object = m->object;
pindex = m->pindex;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index ba14789..ce610d3 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -145,6 +145,8 @@ useracc(addr, len, rw)
vm_map_t map;
vm_map_entry_t save_hint;
+ GIANT_REQUIRED;
+
KASSERT((rw & (~VM_PROT_ALL)) == 0,
("illegal ``rw'' argument to useracc (%x)\n", rw));
prot = rw;
@@ -161,7 +163,6 @@ useracc(addr, len, rw)
|| (vm_offset_t) addr + len < (vm_offset_t) addr) {
return (FALSE);
}
- mtx_lock(&vm_mtx);
map = &curproc->p_vmspace->vm_map;
vm_map_lock_read(map);
/*
@@ -173,7 +174,6 @@ useracc(addr, len, rw)
trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot);
map->hint = save_hint;
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
return (rv == TRUE);
}
@@ -183,12 +183,10 @@ vslock(addr, len)
caddr_t addr;
u_int len;
{
-
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
vm_map_pageable(&curproc->p_vmspace->vm_map,
trunc_page((vm_offset_t)addr),
round_page((vm_offset_t)addr + len), FALSE);
- mtx_unlock(&vm_mtx);
}
void
@@ -196,12 +194,10 @@ vsunlock(addr, len)
caddr_t addr;
u_int len;
{
-
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
vm_map_pageable(&curproc->p_vmspace->vm_map,
trunc_page((vm_offset_t)addr),
round_page((vm_offset_t)addr + len), TRUE);
- mtx_unlock(&vm_mtx);
}
/*
@@ -211,8 +207,6 @@ vsunlock(addr, len)
* machine-dependent layer to fill those in and make the new process
* ready to run. The new process is set up so that it returns directly
* to user mode to avoid stack copying and relocation problems.
- *
- * Called without vm_mtx.
*/
void
vm_fork(p1, p2, flags)
@@ -221,7 +215,8 @@ vm_fork(p1, p2, flags)
{
register struct user *up;
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
if ((flags & RFPROC) == 0) {
/*
* Divorce the memory, if it is shared, essentially
@@ -234,7 +229,6 @@ vm_fork(p1, p2, flags)
}
}
cpu_fork(p1, p2, flags);
- mtx_unlock(&vm_mtx);
return;
}
@@ -289,7 +283,6 @@ vm_fork(p1, p2, flags)
* and make the child ready to run.
*/
cpu_fork(p1, p2, flags);
- mtx_unlock(&vm_mtx);
}
/*
@@ -329,18 +322,16 @@ void
faultin(p)
struct proc *p;
{
+ GIANT_REQUIRED;
PROC_LOCK_ASSERT(p, MA_OWNED);
mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
-
++p->p_lock;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_lock(&vm_mtx);
pmap_swapin_proc(p);
- mtx_unlock(&vm_mtx);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@@ -374,15 +365,13 @@ scheduler(dummy)
int ppri;
mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
+ /* GIANT_REQUIRED */
loop:
- mtx_lock(&vm_mtx);
if (vm_page_count_min()) {
VM_WAIT;
- mtx_unlock(&vm_mtx);
goto loop;
}
- mtx_unlock(&vm_mtx);
pp = NULL;
ppri = INT_MIN;
@@ -458,9 +447,6 @@ SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
* If any procs have been sleeping/stopped for at least maxslp seconds,
* they are swapped. Else, we swap the longest-sleeping or stopped process,
* if any, otherwise the longest-resident process.
- *
- * Can block
- * must be called with vm_mtx
*/
void
swapout_procs(action)
@@ -471,8 +457,8 @@ int action;
int outpri, outpri2;
int didswap = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
- mtx_unlock(&vm_mtx);
+ GIANT_REQUIRED;
+
outp = outp2 = NULL;
outpri = outpri2 = INT_MIN;
retry:
@@ -480,12 +466,10 @@ retry:
LIST_FOREACH(p, &allproc, p_list) {
struct vmspace *vm;
- mtx_lock(&vm_mtx);
PROC_LOCK(p);
if (p->p_lock != 0 ||
(p->p_flag & (P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
/*
@@ -498,7 +482,6 @@ retry:
if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
@@ -506,7 +489,6 @@ retry:
default:
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
case SSLEEP:
@@ -517,7 +499,6 @@ retry:
if (PRI_IS_REALTIME(p->p_pri.pri_class)) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
@@ -530,7 +511,6 @@ retry:
(p->p_slptime < swap_idle_threshold1)) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
@@ -544,7 +524,6 @@ retry:
(p->p_slptime < swap_idle_threshold2))) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
mtx_unlock_spin(&sched_lock);
@@ -559,7 +538,6 @@ retry:
NULL, curproc)) {
vmspace_free(vm);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
vm_map_unlock(&vm->vm_map);
@@ -574,12 +552,10 @@ retry:
swapout(p);
vmspace_free(vm);
didswap++;
- mtx_unlock(&vm_mtx);
goto retry;
}
PROC_UNLOCK(p);
vmspace_free(vm);
- mtx_unlock(&vm_mtx);
}
}
sx_sunlock(&allproc_lock);
@@ -587,7 +563,6 @@ retry:
* If we swapped something out, and another process needed memory,
* then wakeup the sched process.
*/
- mtx_lock(&vm_mtx);
if (didswap)
wakeup(&proc0);
}
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index ce8301a..cec1997 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -97,20 +97,15 @@ SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL)
* The start and end address of physical memory is passed in.
*/
-struct mtx vm_mtx;
-
/* ARGSUSED*/
static void
vm_mem_init(dummy)
void *dummy;
{
-
/*
* Initializes resident memory structures. From here on, all physical
* memory is accounted for, and we use only virtual addresses.
*/
- mtx_init(&vm_mtx, "vm", MTX_DEF);
- mtx_lock(&vm_mtx);
vm_set_page_size();
virtual_avail = vm_page_startup(avail_start, avail_end, virtual_avail);
@@ -123,5 +118,4 @@ vm_mem_init(dummy)
kmem_init(virtual_avail, virtual_end);
pmap_init(avail_start, avail_end);
vm_pager_init();
- mtx_unlock(&vm_mtx);
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 38f969e..96199cd 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -104,17 +104,13 @@ kmem_alloc_pageable(map, size)
{
vm_offset_t addr;
int result;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
size = round_page(size);
addr = vm_map_min(map);
result = vm_map_find(map, NULL, (vm_offset_t) 0,
&addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
if (result != KERN_SUCCESS) {
return (0);
}
@@ -135,17 +131,12 @@ kmem_alloc_nofault(map, size)
vm_offset_t addr;
int result;
- int hadvmlock;
+ GIANT_REQUIRED;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
size = round_page(size);
addr = vm_map_min(map);
result = vm_map_find(map, NULL, (vm_offset_t) 0,
&addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
if (result != KERN_SUCCESS) {
return (0);
}
@@ -164,11 +155,9 @@ kmem_alloc(map, size)
vm_offset_t addr;
vm_offset_t offset;
vm_offset_t i;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
size = round_page(size);
/*
@@ -184,8 +173,6 @@ kmem_alloc(map, size)
vm_map_lock(map);
if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (0);
}
offset = addr - VM_MIN_KERNEL_ADDRESS;
@@ -230,8 +217,6 @@ kmem_alloc(map, size)
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (addr);
}
@@ -250,16 +235,9 @@ kmem_free(map, addr, size)
vm_offset_t addr;
vm_size_t size;
{
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
-
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
}
/*
@@ -282,11 +260,8 @@ kmem_suballoc(parent, min, max, size)
{
int ret;
vm_map_t result;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
size = round_page(size);
@@ -304,8 +279,6 @@ kmem_suballoc(parent, min, max, size)
panic("kmem_suballoc: cannot create submap");
if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
panic("kmem_suballoc: unable to change range to submap");
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (result);
}
@@ -343,12 +316,9 @@ kmem_malloc(map, size, flags)
vm_map_entry_t entry;
vm_offset_t addr;
vm_page_t m;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
-
+ GIANT_REQUIRED;
+
size = round_page(size);
addr = vm_map_min(map);
@@ -444,13 +414,9 @@ retry:
}
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (addr);
bad:
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (0);
}
@@ -469,11 +435,8 @@ kmem_alloc_wait(map, size)
vm_size_t size;
{
vm_offset_t addr;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
size = round_page(size);
@@ -488,17 +451,13 @@ kmem_alloc_wait(map, size)
/* no space now; see if we can ever get space */
if (vm_map_max(map) - vm_map_min(map) < size) {
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (0);
}
vm_map_unlock(map);
- msleep(map, &vm_mtx, PVM, "kmaw", 0);
+ tsleep(map, PVM, "kmaw", 0);
}
vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (addr);
}
@@ -514,17 +473,12 @@ kmem_free_wakeup(map, addr, size)
vm_offset_t addr;
vm_size_t size;
{
- int hadvmlock;
+ GIANT_REQUIRED;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
vm_map_lock(map);
(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
wakeup(map);
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
}
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 229a822..707f5e6 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -173,7 +173,7 @@ vmspace_alloc(min, max)
{
struct vmspace *vm;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm = zalloc(vmspace_zone);
CTR1(KTR_VM, "vmspace_alloc: %p", vm);
vm_map_init(&vm->vm_map, min, max);
@@ -201,8 +201,8 @@ void
vmspace_free(vm)
struct vmspace *vm;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
if (vm->vm_refcnt == 0)
panic("vmspace_free: attempt to free already freed vmspace");
@@ -273,7 +273,8 @@ vm_map_create(pmap, min, max)
{
vm_map_t result;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
result = zalloc(mapzone);
CTR1(KTR_VM, "vm_map_create: %p", result);
vm_map_init(result, min, max);
@@ -291,8 +292,8 @@ vm_map_init(map, min, max)
struct vm_map *map;
vm_offset_t min, max;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
map->header.next = map->header.prev = &map->header;
map->nentries = 0;
map->size = 0;
@@ -310,8 +311,7 @@ void
vm_map_destroy(map)
struct vm_map *map;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
lockdestroy(&map->lock);
}
@@ -400,8 +400,6 @@ vm_map_entry_unlink(vm_map_t map,
* in the "entry" parameter. The boolean
* result indicates whether the address is
* actually contained in the map.
- *
- * Doesn't block.
*/
boolean_t
vm_map_lookup_entry(map, address, entry)
@@ -412,7 +410,7 @@ vm_map_lookup_entry(map, address, entry)
vm_map_entry_t cur;
vm_map_entry_t last;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Start looking either from the head of the list, or from the hint.
*/
@@ -492,7 +490,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_map_entry_t temp_entry;
vm_eflags_t protoeflags;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Check that the start and end points are not bogus.
*/
@@ -654,7 +653,7 @@ vm_map_findspace(map, start, length, addr)
vm_map_entry_t entry, next;
vm_offset_t end;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (start < map->min_offset)
start = map->min_offset;
if (start > map->max_offset)
@@ -723,7 +722,8 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_offset_t start;
int result, s = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
start = *addr;
if (map == kmem_map)
@@ -769,7 +769,8 @@ vm_map_simplify_entry(map, entry)
vm_map_entry_t next, prev;
vm_size_t prevsize, esize;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
return;
@@ -988,7 +989,8 @@ vm_map_submap(map, start, end, submap)
vm_map_entry_t entry;
int result = KERN_INVALID_ARGUMENT;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1027,7 +1029,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t current;
vm_map_entry_t entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1117,7 +1119,8 @@ vm_map_madvise(map, start, end, behav)
vm_map_entry_t current, entry;
int modify_map = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Some madvise calls directly modify the vm_map_entry, in which case
* we need to use an exclusive lock on the map and we need to perform
@@ -1271,7 +1274,8 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t entry;
vm_map_entry_t temp_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
switch (new_inheritance) {
case VM_INHERIT_NONE:
case VM_INHERIT_COPY:
@@ -1458,7 +1462,8 @@ vm_map_pageable(map, start, end, new_pageable)
vm_offset_t failed = 0;
int rv;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1689,8 +1694,8 @@ vm_map_clean(map, start, end, syncio, invalidate)
vm_object_t object;
vm_ooffset_t offset;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &entry)) {
@@ -1769,9 +1774,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
int flags;
vm_object_reference(object);
- mtx_unlock(&vm_mtx);
vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
- mtx_lock(&vm_mtx);
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
flags |= invalidate ? OBJPC_INVAL : 0;
vm_object_page_clean(object,
@@ -1848,7 +1851,8 @@ vm_map_delete(map, start, end)
vm_map_entry_t entry;
vm_map_entry_t first_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Find the start of the region, and clip it
*/
@@ -1950,7 +1954,8 @@ vm_map_remove(map, start, end)
{
int result, s = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (map == kmem_map)
s = splvm();
@@ -1979,7 +1984,8 @@ vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t entry;
vm_map_entry_t tmp_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
return (FALSE);
}
@@ -2027,7 +2033,8 @@ vm_map_split(entry)
vm_size_t size;
vm_ooffset_t offset;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
orig_object = entry->object.vm_object;
if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
return;
@@ -2194,7 +2201,8 @@ vmspace_fork(vm1)
vm_map_entry_t new_entry;
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(old_map);
old_map->infork = 1;
@@ -2304,7 +2312,8 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
vm_size_t init_ssize;
int rv;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
return (KERN_NO_SPACE);
@@ -2368,8 +2377,6 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
* the stack. Also returns KERN_SUCCESS if addr is outside the
* stack range (this is strange, but preserves compatibility with
* the grow function in vm_machdep.c).
- *
- * Will grab vm_mtx if needed
*/
int
vm_map_growstack (struct proc *p, vm_offset_t addr)
@@ -2383,16 +2390,8 @@ vm_map_growstack (struct proc *p, vm_offset_t addr)
int grow_amount;
int rv;
int is_procstack;
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
-#define myreturn(rval) do { \
- if (!hadvmlock) \
- mtx_unlock(&vm_mtx); \
- return (rval); \
-} while (0)
+
+ GIANT_REQUIRED;
Retry:
vm_map_lock_read(map);
@@ -2400,12 +2399,12 @@ Retry:
/* If addr is already in the entry range, no need to grow.*/
if (vm_map_lookup_entry(map, addr, &prev_entry)) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
if ((stack_entry = prev_entry->next) == &map->header) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
if (prev_entry == &map->header)
end = stack_entry->start - stack_entry->avail_ssize;
@@ -2423,14 +2422,14 @@ Retry:
addr >= stack_entry->start ||
addr < stack_entry->start - stack_entry->avail_ssize) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
/* Find the minimum grow amount */
grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
if (grow_amount > stack_entry->avail_ssize) {
vm_map_unlock_read(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
/* If there is no longer enough space between the entries
@@ -2449,7 +2448,7 @@ Retry:
stack_entry->avail_ssize = stack_entry->start - end;
vm_map_unlock(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
@@ -2460,7 +2459,7 @@ Retry:
if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
vm_map_unlock_read(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
/* Round up the grow amount modulo SGROWSIZ */
@@ -2512,8 +2511,7 @@ Retry:
}
vm_map_unlock(map);
- myreturn (rv);
-#undef myreturn
+ return (rv);
}
/*
@@ -2527,7 +2525,7 @@ vmspace_exec(struct proc *p) {
struct vmspace *newvmspace;
vm_map_t map = &p->p_vmspace->vm_map;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
(caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
@@ -2555,7 +2553,7 @@ vmspace_unshare(struct proc *p) {
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (oldvmspace->vm_refcnt == 1)
return;
newvmspace = vmspace_fork(oldvmspace);
@@ -2588,9 +2586,6 @@ vmspace_unshare(struct proc *p) {
* specified, the map may be changed to perform virtual
* copying operations, although the data referenced will
* remain the same.
- *
- * Can block locking maps and while calling vm_object_shadow().
- * Will drop/reaquire the vm_mtx.
*/
int
vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
@@ -2607,7 +2602,7 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_prot_t prot;
vm_prot_t fault_type = fault_typea;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
RetryLookup:;
/*
@@ -2779,8 +2774,7 @@ vm_map_lookup_done(map, entry)
/*
* Unlock the main-level map
*/
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm_map_unlock_read(map);
}
@@ -2809,7 +2803,8 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
off_t ooffset;
int cnt;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (npages)
*npages = 0;
@@ -3021,8 +3016,6 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
* Performs the copy_on_write operations necessary to allow the virtual copies
* into user space to work. This has to be called for write(2) system calls
* from other processes, file unlinking, and file size shrinkage.
- *
- * Requires that the vm_mtx is held
*/
void
vm_freeze_copyopts(object, froma, toa)
@@ -3033,7 +3026,7 @@ vm_freeze_copyopts(object, froma, toa)
vm_object_t robject;
vm_pindex_t idx;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if ((object == NULL) ||
((object->flags & OBJ_OPT) == 0))
return;
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 5ea3ccf..5442c85 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -212,7 +212,6 @@ struct vmspace {
do { \
lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
&(map)->ref_lock, curproc); \
- mtx_lock(&vm_mtx); \
(map)->timestamp++; \
} while(0)
@@ -227,11 +226,9 @@ struct vmspace {
#define vm_map_lock(map) \
do { \
vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map); \
- mtx_assert(&vm_mtx, MA_OWNED); \
- if (lockmgr(&(map)->lock, LK_EXCLUSIVE | LK_INTERLOCK, \
- &vm_mtx, curproc) != 0) \
+ if (lockmgr(&(map)->lock, LK_EXCLUSIVE, \
+ NULL, curproc) != 0) \
panic("vm_map_lock: failed to get lock"); \
- mtx_lock(&vm_mtx); \
(map)->timestamp++; \
} while(0)
@@ -244,10 +241,8 @@ struct vmspace {
#define vm_map_lock_read(map) \
do { \
vm_map_printf("locking map LK_SHARED: %p\n", map); \
- mtx_assert(&vm_mtx, MA_OWNED); \
- lockmgr(&(map)->lock, LK_SHARED | LK_INTERLOCK, \
- &vm_mtx, curproc); \
- mtx_lock(&vm_mtx); \
+ lockmgr(&(map)->lock, LK_SHARED, \
+ NULL, curproc); \
} while (0)
#define vm_map_unlock_read(map) \
@@ -261,8 +256,7 @@ _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
int error;
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
- error = lockmgr(&map->lock, LK_EXCLUPGRADE | LK_INTERLOCK, &vm_mtx, p);
- mtx_lock(&vm_mtx);
+ error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, p);
if (error == 0)
map->timestamp++;
return error;
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 24af2ec..3ff9e68 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -145,10 +145,9 @@ vmtotal(SYSCTL_HANDLER_ARGS)
/*
* Mark all objects as inactive.
*/
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
TAILQ_FOREACH(object, &vm_object_list, object_list)
vm_object_clear_flag(object, OBJ_ACTIVE);
- mtx_unlock(&vm_mtx);
/*
* Calculate process statistics.
*/
@@ -199,7 +198,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
* Note active objects.
*/
paging = 0;
- mtx_lock(&vm_mtx);
for (map = &p->p_vmspace->vm_map, entry = map->header.next;
entry != &map->header; entry = entry->next) {
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
@@ -208,7 +206,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
vm_object_set_flag(entry->object.vm_object, OBJ_ACTIVE);
paging |= entry->object.vm_object->paging_in_progress;
}
- mtx_unlock(&vm_mtx);
if (paging)
totalp->t_pw++;
}
@@ -216,7 +213,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
/*
* Calculate object memory usage statistics.
*/
- mtx_lock(&vm_mtx);
TAILQ_FOREACH(object, &vm_object_list, object_list) {
/*
* devices, like /dev/mem, will badly skew our totals
@@ -240,7 +236,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
}
}
totalp->t_free = cnt.v_free_count + cnt.v_cache_count;
- mtx_unlock(&vm_mtx);
return (sysctl_handle_opaque(oidp, totalp, sizeof total, req));
}
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 4b9abe3..fcc78e6 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -113,8 +113,9 @@ sbrk(p, uap)
struct proc *p;
struct sbrk_args *uap;
{
-
/* Not yet implemented */
+ /* mtx_lock(&Giant); */
+ /* mtx_unlock(&Giant); */
return (EOPNOTSUPP);
}
@@ -130,8 +131,9 @@ sstk(p, uap)
struct proc *p;
struct sstk_args *uap;
{
-
/* Not yet implemented */
+ /* mtx_lock(&Giant); */
+ /* mtx_unlock(&Giant); */
return (EOPNOTSUPP);
}
@@ -148,7 +150,7 @@ ogetpagesize(p, uap)
struct proc *p;
struct getpagesize_args *uap;
{
-
+ /* MP SAFE */
p->p_retval[0] = PAGE_SIZE;
return (0);
}
@@ -268,7 +270,7 @@ mmap(p, uap)
addr < round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ)))
addr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
- mtx_lock(&Giant);
+ mtx_lock(&Giant); /* syscall marked mp-safe but isn't */
if (flags & MAP_ANON) {
/*
* Mapping blank space is trivial.
@@ -315,8 +317,10 @@ mmap(p, uap)
/*
* Get the proper underlying object
*/
- if (VOP_GETVOBJECT(vp, &obj) != 0)
- return (EINVAL);
+ if (VOP_GETVOBJECT(vp, &obj) != 0) {
+ error = EINVAL;
+ goto done;
+ }
vp = (struct vnode*)obj->handle;
}
/*
@@ -518,6 +522,8 @@ msync(p, uap)
if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
return (EINVAL);
+ mtx_lock(&Giant);
+
map = &p->p_vmspace->vm_map;
/*
@@ -527,10 +533,6 @@ msync(p, uap)
* the range of the map entry containing addr. This can be incorrect
* if the region splits or is coalesced with a neighbor.
*/
-#ifndef BLEED
- mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
if (size == 0) {
vm_map_entry_t entry;
@@ -538,10 +540,7 @@ msync(p, uap)
rv = vm_map_lookup_entry(map, addr, &entry);
vm_map_unlock_read(map);
if (rv == FALSE) {
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (EINVAL);
}
addr = entry->start;
@@ -554,10 +553,8 @@ msync(p, uap)
rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
(flags & MS_INVALIDATE) != 0);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
+
switch (rv) {
case KERN_SUCCESS:
break;
@@ -610,20 +607,17 @@ munmap(p, uap)
if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
return (EINVAL);
#endif
+ mtx_lock(&Giant);
map = &p->p_vmspace->vm_map;
/*
* Make sure entire range is allocated.
*/
- mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) {
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
return (EINVAL);
}
/* returns nothing but KERN_SUCCESS anyway */
(void) vm_map_remove(map, addr, addr + size);
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
return (0);
}
@@ -674,10 +668,8 @@ mprotect(p, uap)
return(EINVAL);
mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
ret = vm_map_protect(&p->p_vmspace->vm_map, addr,
addr + size, prot, FALSE);
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
switch (ret) {
case KERN_SUCCESS:
@@ -716,16 +708,10 @@ minherit(p, uap)
if (addr + size < addr)
return(EINVAL);
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
ret = vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
inherit);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
switch (ret) {
case KERN_SUCCESS:
@@ -779,15 +765,9 @@ madvise(p, uap)
start = trunc_page((vm_offset_t) uap->addr);
end = round_page((vm_offset_t) uap->addr + uap->len);
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
ret = vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (ret ? EINVAL : 0);
}
@@ -833,11 +813,8 @@ mincore(p, uap)
*/
vec = uap->vec;
- map = &p->p_vmspace->vm_map;
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
+ map = &p->p_vmspace->vm_map;
pmap = vmspace_pmap(p->p_vmspace);
vm_map_lock_read(map);
@@ -917,7 +894,6 @@ RestartScan:
* the map, we release the lock.
*/
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
/*
* calculate index into user supplied byte vector
@@ -931,9 +907,7 @@ RestartScan:
while((lastvecindex + 1) < vecindex) {
error = subyte( vec + lastvecindex, 0);
if (error) {
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (EFAULT);
}
++lastvecindex;
@@ -944,9 +918,7 @@ RestartScan:
*/
error = subyte( vec + vecindex, mincoreinfo);
if (error) {
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (EFAULT);
}
@@ -954,7 +926,6 @@ RestartScan:
* If the map has changed, due to the subyte, the previous
* output may be invalid.
*/
- mtx_lock(&vm_mtx);
vm_map_lock_read(map);
if (timestamp != map->timestamp)
goto RestartScan;
@@ -969,7 +940,6 @@ RestartScan:
* the map, we release the lock.
*/
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
/*
* Zero the last entries in the byte vector.
@@ -978,9 +948,7 @@ RestartScan:
while((lastvecindex + 1) < vecindex) {
error = subyte( vec + lastvecindex, 0);
if (error) {
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (EFAULT);
}
++lastvecindex;
@@ -990,15 +958,11 @@ RestartScan:
* If the map has changed, due to the subyte, the previous
* output may be invalid.
*/
- mtx_lock(&vm_mtx);
vm_map_lock_read(map);
if (timestamp != map->timestamp)
goto RestartScan;
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (0);
}
@@ -1043,16 +1007,10 @@ mlock(p, uap)
return (error);
#endif
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
addr + size, FALSE);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1067,6 +1025,8 @@ mlockall(p, uap)
struct proc *p;
struct mlockall_args *uap;
{
+ /* mtx_lock(&Giant); */
+ /* mtx_unlock(&Giant); */
return 0;
}
@@ -1081,6 +1041,8 @@ munlockall(p, uap)
struct proc *p;
struct munlockall_args *uap;
{
+ /* mtx_lock(&Giant); */
+ /* mtx_unlock(&Giant); */
return 0;
}
@@ -1117,16 +1079,10 @@ munlock(p, uap)
return (error);
#endif
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
addr + size, TRUE);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1175,9 +1131,7 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
return (EINVAL);
fitit = FALSE;
mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
(void) vm_map_remove(map, *addr, *addr + size);
- mtx_unlock(&vm_mtx);
}
/*
@@ -1252,7 +1206,6 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
maxprot |= VM_PROT_EXECUTE;
#endif
- mtx_lock(&vm_mtx);
if (fitit)
*addr = pmap_addr_hint(object, *addr, size);
@@ -1279,7 +1232,6 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
if (rv != KERN_SUCCESS)
(void) vm_map_remove(map, *addr, *addr + size);
}
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
switch (rv) {
case KERN_SUCCESS:
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 9717325..96be4c0 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -147,7 +147,8 @@ _vm_object_allocate(type, size, object)
{
int incr;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
TAILQ_INIT(&object->memq);
TAILQ_INIT(&object->shadow_head);
@@ -192,8 +193,8 @@ _vm_object_allocate(type, size, object)
void
vm_object_init()
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", MTX_DEF);
vm_object_count = 0;
@@ -230,7 +231,8 @@ vm_object_allocate(type, size)
{
vm_object_t result;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
result = (vm_object_t) zalloc(obj_zone);
_vm_object_allocate(type, size, result);
@@ -247,8 +249,8 @@ void
vm_object_reference(object)
vm_object_t object;
{
+ GIANT_REQUIRED;
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
if (object == NULL)
return;
@@ -257,20 +259,14 @@ vm_object_reference(object)
object->ref_count++;
if (object->type == OBJT_VNODE) {
- mtx_unlock(VM_OBJECT_MTX(object));
- mtx_assert(&Giant, MA_OWNED);
while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) {
printf("vm_object_reference: delay in getting object\n");
}
- mtx_lock(VM_OBJECT_MTX(object));
}
}
/*
* handle deallocating a object of type OBJT_VNODE
- *
- * requires vm_mtx
- * may block
*/
void
vm_object_vndeallocate(object)
@@ -278,7 +274,7 @@ vm_object_vndeallocate(object)
{
struct vnode *vp = (struct vnode *) object->handle;
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
@@ -297,10 +293,7 @@ vm_object_vndeallocate(object)
/*
* vrele may need a vop lock
*/
- mtx_unlock(VM_OBJECT_MTX(object));
- mtx_assert(&Giant, MA_OWNED);
vrele(vp);
- mtx_lock(VM_OBJECT_MTX(object));
}
/*
@@ -313,7 +306,6 @@ vm_object_vndeallocate(object)
* may be relinquished.
*
* No object may be locked.
- * vm_mtx must be held
*/
void
vm_object_deallocate(object)
@@ -321,7 +313,8 @@ vm_object_deallocate(object)
{
vm_object_t temp;
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
+
while (object != NULL) {
if (object->type == OBJT_VNODE) {
@@ -355,9 +348,6 @@ vm_object_deallocate(object)
("vm_object_deallocate: ref_count: %d, shadow_count: %d",
object->ref_count,
object->shadow_count));
-#ifdef objlocks
- mtx_lock(VM_OBJECT_MTX(robject));
-#endif
if ((robject->handle == NULL) &&
(robject->type == OBJT_DEFAULT ||
robject->type == OBJT_SWAP)) {
@@ -368,32 +358,16 @@ vm_object_deallocate(object)
robject->paging_in_progress ||
object->paging_in_progress
) {
-#ifdef objlocks
- mtx_unlock(VM_OBJECT_MTX(object));
-#endif
vm_object_pip_sleep(robject, "objde1");
-#ifdef objlocks
- mtx_unlock(VM_OBJECT_MTX(robject));
- mtx_lock(VM_OBJECT_MTX(object));
-#endif
vm_object_pip_sleep(object, "objde2");
-#ifdef objlocks
- mtx_lock(VM_OBJECT_MTX(robject));
-#endif
}
if (robject->ref_count == 1) {
robject->ref_count--;
-#ifdef objlocks
- mtx_unlock(VM_OBJECT_MTX(object));
-#endif
object = robject;
goto doterm;
}
-#ifdef objlocks
- mtx_unlock(VM_OBJECT_MTX(object));
-#endif
object = robject;
vm_object_collapse(object);
continue;
@@ -435,8 +409,8 @@ vm_object_terminate(object)
vm_page_t p;
int s;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Make sure no one uses us.
*/
@@ -468,9 +442,7 @@ vm_object_terminate(object)
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
vp = (struct vnode *) object->handle;
- mtx_unlock(VM_OBJECT_MTX(object));
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
- mtx_lock(VM_OBJECT_MTX(object));
}
KASSERT(object->ref_count == 0,
@@ -555,7 +527,8 @@ vm_object_page_clean(object, start, end, flags)
vm_page_t ma[vm_pageout_page_count];
int curgeneration;
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
+
if (object->type != OBJT_VNODE ||
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
return;
@@ -763,7 +736,8 @@ vm_object_pmap_copy_1(object, start, end)
vm_pindex_t idx;
vm_page_t p;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
return;
@@ -791,7 +765,7 @@ vm_object_pmap_remove(object, start, end)
{
vm_page_t p;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object == NULL)
return;
TAILQ_FOREACH(p, &object->memq, listq) {
@@ -834,7 +808,7 @@ vm_object_madvise(object, pindex, count, advise)
vm_object_t tobject;
vm_page_t m;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object == NULL)
return;
@@ -948,7 +922,7 @@ vm_object_shadow(object, offset, length)
vm_object_t source;
vm_object_t result;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
source = *object;
/*
@@ -1015,7 +989,7 @@ vm_object_backing_scan(vm_object_t object, int op)
vm_pindex_t backing_offset_index;
s = splvm();
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
backing_object = object->backing_object;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
@@ -1229,8 +1203,7 @@ void
vm_object_collapse(object)
vm_object_t object;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
while (TRUE) {
vm_object_t backing_object;
@@ -1443,7 +1416,7 @@ vm_object_page_remove(object, start, end, clean_only)
unsigned int size;
int all;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object == NULL ||
object->resident_page_count == 0)
@@ -1561,7 +1534,7 @@ vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
{
vm_pindex_t next_pindex;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (prev_object == NULL) {
return (TRUE);
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 2b29baf..c9c0920 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -169,49 +169,44 @@ extern vm_object_t kmem_object;
#ifdef _KERNEL
-/*
- * For now a global vm lock.
- */
-#define VM_OBJECT_MTX(object) (&vm_mtx)
-
static __inline void
vm_object_set_flag(vm_object_t object, u_short bits)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->flags |= bits;
+ GIANT_REQUIRED;
+ atomic_set_short(&object->flags, bits);
+ /* object->flags |= bits; */
}
static __inline void
vm_object_clear_flag(vm_object_t object, u_short bits)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->flags &= ~bits;
+ GIANT_REQUIRED;
+ atomic_clear_short(&object->flags, bits);
+ /* object->flags &= ~bits; */
}
static __inline void
vm_object_pip_add(vm_object_t object, short i)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->paging_in_progress += i;
+ GIANT_REQUIRED;
+ atomic_add_short(&object->paging_in_progress, i);
+ /* object->paging_in_progress += i; */
}
static __inline void
vm_object_pip_subtract(vm_object_t object, short i)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->paging_in_progress -= i;
+ GIANT_REQUIRED;
+ atomic_subtract_short(&object->paging_in_progress, i);
+ /* object->paging_in_progress -= i; */
}
static __inline void
vm_object_pip_wakeup(vm_object_t object)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->paging_in_progress--;
+ GIANT_REQUIRED;
+ atomic_subtract_short(&object->paging_in_progress, 1);
+ /* object->paging_in_progress--; */
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
@@ -221,10 +216,9 @@ vm_object_pip_wakeup(vm_object_t object)
static __inline void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
if (i)
- object->paging_in_progress -= i;
+ atomic_subtract_short(&object->paging_in_progress, i);
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
@@ -234,13 +228,12 @@ vm_object_pip_wakeupn(vm_object_t object, short i)
static __inline void
vm_object_pip_sleep(vm_object_t object, char *waitid)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
if (object->paging_in_progress) {
int s = splvm();
if (object->paging_in_progress) {
vm_object_set_flag(object, OBJ_PIPWNT);
- msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
+ tsleep(object, PVM, waitid, 0);
}
splx(s);
}
@@ -249,8 +242,7 @@ vm_object_pip_sleep(vm_object_t object, char *waitid)
static __inline void
vm_object_pip_wait(vm_object_t object, char *waitid)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
while (object->paging_in_progress)
vm_object_pip_sleep(object, waitid);
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index dc391cb..e5ef9f8 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -148,7 +148,6 @@ vm_set_page_size()
*
* Add a new page to the freelist for use by the system.
* Must be called at splhigh().
- * Must be called with the vm_mtx held.
*/
vm_page_t
vm_add_new_page(pa)
@@ -156,7 +155,8 @@ vm_add_new_page(pa)
{
vm_page_t m;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
++cnt.v_page_count;
++cnt.v_free_count;
m = PHYS_TO_VM_PAGE(pa);
@@ -363,7 +363,8 @@ vm_page_insert(m, object, pindex)
{
register struct vm_page **bucket;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (m->object != NULL)
panic("vm_page_insert: already inserted");
@@ -423,7 +424,8 @@ vm_page_remove(m)
{
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (m->object == NULL)
return;
@@ -487,8 +489,6 @@ vm_page_remove(m)
* an interrupt makes a change, but the generation algorithm will not
* operate properly in an SMP environment where both cpu's are able to run
* kernel code simultaneously.
- * NOTE: under the giant vm lock we should be ok, there should be
- * no reason to check vm_page_bucket_generation
*
* The object must be locked. No side effects.
* This routine may not block.
@@ -604,7 +604,7 @@ vm_page_unqueue(m)
int queue = m->queue;
struct vpgqueues *pq;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (queue != PQ_NONE) {
m->queue = PQ_NONE;
pq = &vm_page_queues[queue];
@@ -645,7 +645,7 @@ _vm_page_list_find(basequeue, index)
vm_page_t m = NULL;
struct vpgqueues *pq;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
pq = &vm_page_queues[basequeue];
/*
@@ -683,7 +683,7 @@ vm_page_select_cache(object, pindex)
{
vm_page_t m;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
while (TRUE) {
m = vm_page_list_find(
PQ_CACHE,
@@ -735,7 +735,6 @@ vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zer
* VM_ALLOC_INTERRUPT interrupt time request
* VM_ALLOC_ZERO zero page
*
- * vm_mtx must be locked.
* This routine may not block.
*
* Additional special handling is required when called from an
@@ -752,7 +751,8 @@ vm_page_alloc(object, pindex, page_req)
register vm_page_t m = NULL;
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
KASSERT(!vm_page_lookup(object, pindex),
("vm_page_alloc: page already allocated"));
@@ -885,13 +885,13 @@ vm_wait()
s = splvm();
if (curproc == pageproc) {
vm_pageout_pages_needed = 1;
- msleep(&vm_pageout_pages_needed, &vm_mtx, PSWP, "VMWait", 0);
+ tsleep(&vm_pageout_pages_needed, PSWP, "VMWait", 0);
} else {
if (!vm_pages_needed) {
vm_pages_needed = 1;
wakeup(&vm_pages_needed);
}
- msleep(&cnt.v_free_count, &vm_mtx, PVM, "vmwait", 0);
+ tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
}
splx(s);
}
@@ -938,8 +938,9 @@ vm_page_activate(m)
{
int s;
+ GIANT_REQUIRED;
s = splvm();
- mtx_assert(&vm_mtx, MA_OWNED);
+
if (m->queue != PQ_ACTIVE) {
if ((m->queue - m->pc) == PQ_CACHE)
cnt.v_reactivated++;
@@ -1012,9 +1013,8 @@ vm_page_free_toq(vm_page_t m)
struct vpgqueues *pq;
vm_object_t object = m->object;
+ GIANT_REQUIRED;
s = splvm();
-
- mtx_assert(&vm_mtx, MA_OWNED);
cnt.v_tfree++;
if (m->busy || ((m->queue - m->pc) == PQ_FREE) ||
@@ -1252,7 +1252,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
{
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Ignore if already inactive.
*/
@@ -1290,8 +1290,8 @@ vm_page_deactivate(vm_page_t m)
int
vm_page_try_to_cache(vm_page_t m)
{
+ GIANT_REQUIRED;
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->flags & (PG_BUSY|PG_UNMANAGED))) {
return(0);
@@ -1339,7 +1339,7 @@ vm_page_cache(m)
{
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) {
printf("vm_page_cache: attempting to cache busy page\n");
return;
@@ -1397,7 +1397,7 @@ vm_page_dontneed(m)
int dnw;
int head;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
dnw = ++dnweight;
/*
@@ -1438,7 +1438,6 @@ vm_page_dontneed(m)
* to be in the object. If the page doesn't exist, allocate it.
*
* This routine may block.
- * Requires vm_mtx.
*/
vm_page_t
vm_page_grab(object, pindex, allocflags)
@@ -1449,7 +1448,7 @@ vm_page_grab(object, pindex, allocflags)
vm_page_t m;
int s, generation;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
retrylookup:
if ((m = vm_page_lookup(object, pindex)) != NULL) {
if (m->busy || (m->flags & PG_BUSY)) {
@@ -1459,7 +1458,7 @@ retrylookup:
while ((object->generation == generation) &&
(m->busy || (m->flags & PG_BUSY))) {
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
- msleep(m, &vm_mtx, PVM, "pgrbwt", 0);
+ tsleep(m, PVM, "pgrbwt", 0);
if ((allocflags & VM_ALLOC_RETRY) == 0) {
splx(s);
return NULL;
@@ -1522,8 +1521,6 @@ vm_page_bits(int base, int size)
* This routine may not block.
*
* (base + size) must be less then or equal to PAGE_SIZE.
- *
- * vm_mtx needs to be held
*/
void
vm_page_set_validclean(m, base, size)
@@ -1535,7 +1532,7 @@ vm_page_set_validclean(m, base, size)
int frag;
int endoff;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (size == 0) /* handle degenerate case */
return;
@@ -1609,8 +1606,7 @@ vm_page_clear_dirty(m, base, size)
int base;
int size;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
m->dirty &= ~vm_page_bits(base, size);
}
@@ -1630,7 +1626,7 @@ vm_page_set_invalid(m, base, size)
{
int bits;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
bits = vm_page_bits(base, size);
m->valid &= ~bits;
m->dirty &= ~bits;
@@ -1918,16 +1914,10 @@ contigmalloc(size, type, flags, low, high, alignment, boundary)
unsigned long boundary;
{
void * ret;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
ret = contigmalloc1(size, type, flags, low, high, alignment, boundary,
kernel_map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
-
return (ret);
}
@@ -1938,14 +1928,8 @@ contigfree(addr, size, type)
unsigned long size;
struct malloc_type *type;
{
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
kmem_free(kernel_map, (vm_offset_t)addr, size);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
}
vm_offset_t
@@ -1956,15 +1940,10 @@ vm_page_alloc_contig(size, low, high, alignment)
vm_offset_t alignment;
{
vm_offset_t ret;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
ret = ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
alignment, 0ul, kernel_map));
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (ret);
}
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 6bc7266..1050e8e 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -305,28 +305,23 @@ extern long first_page; /* first physical page number */
(&vm_page_array[atop(pa) - first_page ])
/*
- * For now, a global vm lock
- */
-#define VM_PAGE_MTX(m) (&vm_mtx)
-
-/*
* Functions implemented as macros
*/
static __inline void
vm_page_flag_set(vm_page_t m, unsigned short bits)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
- m->flags |= bits;
+ GIANT_REQUIRED;
+ atomic_set_short(&(m)->flags, bits);
+ /* m->flags |= bits; */
}
static __inline void
vm_page_flag_clear(vm_page_t m, unsigned short bits)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
- m->flags &= ~bits;
+ GIANT_REQUIRED;
+ atomic_clear_short(&(m)->flags, bits);
+ /* m->flags &= ~bits; */
}
#if 0
@@ -386,17 +381,15 @@ vm_page_wakeup(vm_page_t m)
static __inline void
vm_page_io_start(vm_page_t m)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
- m->busy++;
+ GIANT_REQUIRED;
+ atomic_add_char(&(m)->busy, 1);
}
static __inline void
vm_page_io_finish(vm_page_t m)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
- m->busy--;
+ GIANT_REQUIRED;
+ atomic_subtract_char(&(m)->busy, 1);
if (m->busy == 0)
vm_page_flash(m);
}
@@ -463,16 +456,14 @@ void vm_page_free_toq(vm_page_t m);
static __inline void
vm_page_hold(vm_page_t mem)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
+ GIANT_REQUIRED;
mem->hold_count++;
}
static __inline void
vm_page_unhold(vm_page_t mem)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
+ GIANT_REQUIRED;
--mem->hold_count;
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
}
@@ -578,6 +569,7 @@ vm_page_free_zero(m)
static __inline int
vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
{
+ GIANT_REQUIRED;
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
int s = splvm();
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
@@ -585,7 +577,7 @@ vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
* Page is busy. Wait and retry.
*/
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
- msleep(m, VM_PAGE_MTX(m), PVM, msg, 0);
+ tsleep(m, PVM, msg, 0);
}
splx(s);
return(TRUE);
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index f4c5670..444bfdc 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -226,7 +226,8 @@ vm_pageout_clean(m)
int ib, is, page_base;
vm_pindex_t pindex = m->pindex;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
object = m->object;
/*
@@ -366,7 +367,7 @@ vm_pageout_flush(mc, count, flags)
int numpagedout = 0;
int i;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Initiate I/O. Bump the vm_page_t->busy counter and
* mark the pages read-only.
@@ -449,8 +450,6 @@ vm_pageout_flush(mc, count, flags)
* backing_objects.
*
* The object and map must be locked.
- *
- * Requires the vm_mtx
*/
static void
vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
@@ -464,7 +463,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
int remove_mode;
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS)
return;
@@ -553,7 +552,7 @@ vm_pageout_map_deactivate_pages(map, desired)
vm_map_entry_t tmpe;
vm_object_t obj, bigobj;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
return;
}
@@ -619,7 +618,7 @@ vm_pageout_page_free(vm_page_t m) {
vm_object_t object = m->object;
int type = object->type;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (type == OBJT_SWAP || type == OBJT_DEFAULT)
vm_object_reference(object);
vm_page_busy(m);
@@ -649,8 +648,7 @@ vm_pageout_scan(int pass)
int maxlaunder;
int s;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Do whatever cleanup that the pmap code can.
*/
@@ -878,17 +876,14 @@ rescan0:
vp = object->handle;
mp = NULL;
- mtx_unlock(&vm_mtx);
if (vp->v_type == VREG)
vn_start_write(vp, &mp, V_NOWAIT);
if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
continue;
}
- mtx_lock(&vm_mtx);
/*
* The page might have been moved to another
@@ -902,10 +897,8 @@ rescan0:
object->handle != vp) {
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
- mtx_unlock(&vm_mtx);
vput(vp);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
continue;
}
@@ -916,10 +909,8 @@ rescan0:
* statistics are more correct if we don't.
*/
if (m->busy || (m->flags & PG_BUSY)) {
- mtx_unlock(&vm_mtx);
vput(vp);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
continue;
}
@@ -933,10 +924,8 @@ rescan0:
splx(s);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
- mtx_unlock(&vm_mtx);
vput(vp);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
continue;
}
}
@@ -967,10 +956,8 @@ rescan0:
TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
splx(s);
if (vp) {
- mtx_unlock(&vm_mtx);
vput(vp);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
}
}
}
@@ -1154,11 +1141,9 @@ rescan0:
#if 0
if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
#endif
- mtx_unlock(&vm_mtx);
bigproc = NULL;
bigsize = 0;
sx_slock(&allproc_lock);
- mtx_lock(&vm_mtx);
LIST_FOREACH(p, &allproc, p_list) {
/*
* If this process is already locked, skip it.
@@ -1350,7 +1335,6 @@ vm_pageout()
int pass;
mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
/*
* Initialize some paging parameters.
@@ -1410,7 +1394,6 @@ vm_pageout()
*/
if (vm_pageout_stats_free_max == 0)
vm_pageout_stats_free_max = 5;
- mtx_unlock(&vm_mtx);
PROC_LOCK(curproc);
curproc->p_flag |= P_BUFEXHAUST;
@@ -1420,7 +1403,6 @@ vm_pageout()
/*
* The pageout daemon is never done, so loop forever.
*/
- mtx_lock(&vm_mtx);
while (TRUE) {
int error;
int s = splvm();
@@ -1444,7 +1426,7 @@ vm_pageout()
*/
++pass;
if (pass > 1)
- msleep(&vm_pages_needed, &vm_mtx, PVM,
+ tsleep(&vm_pages_needed, PVM,
"psleep", hz/2);
} else {
/*
@@ -1455,8 +1437,8 @@ vm_pageout()
pass = 1;
else
pass = 0;
- error = msleep(&vm_pages_needed, &vm_mtx,
- PVM, "psleep", vm_pageout_stats_interval * hz);
+ error = tsleep(&vm_pages_needed, PVM,
+ "psleep", vm_pageout_stats_interval * hz);
if (error && !vm_pages_needed) {
splx(s);
pass = 0;
@@ -1501,14 +1483,11 @@ vm_daemon()
mtx_lock(&Giant);
while (TRUE) {
- mtx_lock(&vm_mtx);
- msleep(&vm_daemon_needed, &vm_mtx, PPAUSE, "psleep", 0);
+ tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
if (vm_pageout_req_swapout) {
swapout_procs(vm_pageout_req_swapout);
- mtx_assert(&vm_mtx, MA_OWNED);
vm_pageout_req_swapout = 0;
}
- mtx_unlock(&vm_mtx);
/*
* scan the processes for exceeding their rlimits or if
* process is swapped out -- deactivate pages
@@ -1525,7 +1504,6 @@ vm_daemon()
if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
continue;
}
- mtx_lock(&vm_mtx);
/*
* if the process is in a non-running type state,
* don't touch it.
@@ -1533,7 +1511,6 @@ vm_daemon()
mtx_lock_spin(&sched_lock);
if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
mtx_unlock_spin(&sched_lock);
- mtx_unlock(&vm_mtx);
continue;
}
/*
@@ -1557,7 +1534,6 @@ vm_daemon()
vm_pageout_map_deactivate_pages(
&p->p_vmspace->vm_map, limit);
}
- mtx_unlock(&vm_mtx);
}
sx_sunlock(&allproc_lock);
}
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index e53a14c..07d0655 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -245,18 +245,14 @@ vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
{
vm_object_t ret;
struct pagerops *ops;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
ops = pagertab[type];
if (ops)
ret = (*ops->pgo_alloc) (handle, size, prot, off);
else
ret = NULL;
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (ret);
}
@@ -264,8 +260,7 @@ void
vm_pager_deallocate(object)
vm_object_t object;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
(*pagertab[object->type]->pgo_dealloc) (object);
}
@@ -385,8 +380,6 @@ initpbuf(struct buf *bp)
*
* NOTE: pfreecnt can be NULL, but this 'feature' will be removed
* relatively soon when the rest of the subsystems get smart about it. XXX
- *
- * vm_mtx can be held or unheld
*/
struct buf *
getpbuf(pfreecnt)
@@ -394,12 +387,9 @@ getpbuf(pfreecnt)
{
int s;
struct buf *bp;
- int hadvmlock;
s = splvm();
- hadvmlock = mtx_owned(&vm_mtx);
- if (hadvmlock)
- mtx_unlock(&vm_mtx);
+ GIANT_REQUIRED;
mtx_lock(&pbuf_mtx);
for (;;) {
@@ -424,8 +414,6 @@ getpbuf(pfreecnt)
splx(s);
initpbuf(bp);
- if (hadvmlock)
- mtx_lock(&vm_mtx);
return bp;
}
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index b4511ca..427d103 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -124,12 +124,12 @@ vm_pager_get_pages(
) {
int r;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
vm_page_zero_invalid(m[reqpage], TRUE);
}
- mtx_assert(&vm_mtx, MA_OWNED);
return(r);
}
@@ -141,11 +141,9 @@ vm_pager_put_pages(
int flags,
int *rtvals
) {
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
(*pagertab[object->type]->pgo_putpages)
(object, m, count, flags, rtvals);
- mtx_assert(&vm_mtx, MA_OWNED);
}
/*
@@ -168,10 +166,9 @@ vm_pager_has_page(
) {
boolean_t ret;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
ret = (*pagertab[object->type]->pgo_haspage)
(object, offset, before, after);
- mtx_assert(&vm_mtx, MA_OWNED);
return (ret);
}
@@ -186,11 +183,9 @@ vm_pager_has_page(
static __inline void
vm_pager_page_unswapped(vm_page_t m)
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (pagertab[m->object->type]->pgo_pageunswapped)
(*pagertab[m->object->type]->pgo_pageunswapped)(m);
- mtx_assert(&vm_mtx, MA_OWNED);
}
#endif
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index 0dfb83e..dcd02e8 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -74,6 +74,9 @@ obreak(p, uap)
register struct vmspace *vm = p->p_vmspace;
vm_offset_t new, old, base;
int rv;
+ int error = 0;
+
+ mtx_lock(&Giant); /* syscall marked mp-safe but isn't */
base = round_page((vm_offset_t) vm->vm_daddr);
new = round_page((vm_offset_t)uap->nsize);
@@ -84,52 +87,46 @@ obreak(p, uap)
* reduce their usage, even if they remain over the limit.
*/
if (new > old &&
- (new - base) > (unsigned) p->p_rlimit[RLIMIT_DATA].rlim_cur)
- return ENOMEM;
- if (new >= VM_MAXUSER_ADDRESS)
- return (ENOMEM);
+ (new - base) > (unsigned) p->p_rlimit[RLIMIT_DATA].rlim_cur) {
+ error = ENOMEM;
+ goto done;
+ }
+ if (new >= VM_MAXUSER_ADDRESS) {
+ error = ENOMEM;
+ goto done;
+ }
} else if (new < base) {
/*
* This is simply an invalid value. If someone wants to
* do fancy address space manipulations, mmap and munmap
* can do most of what the user would want.
*/
- return EINVAL;
+ error = EINVAL;
+ goto done;
}
if (new > old) {
vm_size_t diff;
diff = new - old;
-#ifndef BLEED
- mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
rv = vm_map_find(&vm->vm_map, NULL, 0, &old, diff, FALSE,
VM_PROT_ALL, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
- mtx_unlock(&vm_mtx);
- return (ENOMEM);
+ error = ENOMEM;
+ goto done;
}
vm->vm_dsize += btoc(diff);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
- mtx_unlock(&Giant);
-#endif
} else if (new < old) {
- mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
rv = vm_map_remove(&vm->vm_map, new, old);
if (rv != KERN_SUCCESS) {
- mtx_unlock(&vm_mtx);
- mtx_unlock(&Giant);
- return (ENOMEM);
+ error = ENOMEM;
+ goto done;
}
vm->vm_dsize -= btoc(old - new);
- mtx_unlock(&vm_mtx);
- mtx_unlock(&Giant);
}
- return (0);
+done:
+ mtx_unlock(&Giant);
+ return (error);
}
#ifndef _SYS_SYSPROTO_H_
@@ -144,6 +141,7 @@ ovadvise(p, uap)
struct proc *p;
struct ovadvise_args *uap;
{
-
+ /* START_GIANT_OPTIONAL */
+ /* END_GIANT_OPTIONAL */
return (EINVAL);
}
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index 30fadbe..5c6431b 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -19,6 +19,7 @@
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
@@ -119,6 +120,8 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
{
int totsize, oldzflags;
+ GIANT_REQUIRED;
+
oldzflags = z->zflags;
if ((z->zflags & ZONE_BOOT) == 0) {
z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
@@ -137,8 +140,6 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
* in pages as needed.
*/
if (z->zflags & ZONE_INTERRUPT) {
- int hadvmlock;
-
totsize = round_page(z->zsize * nentries);
atomic_add_int(&zone_kmem_kvaspace, totsize);
z->zkva = kmem_alloc_pageable(kernel_map, totsize);
@@ -146,17 +147,12 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
return 0;
z->zpagemax = totsize / PAGE_SIZE;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
if (obj == NULL) {
z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
} else {
z->zobj = obj;
_vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
}
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
z->zallocflag = VM_ALLOC_INTERRUPT;
z->zmax += nentries;
} else {
@@ -364,12 +360,8 @@ void *
zalloc(vm_zone_t z)
{
void *item;
- int hadvmlock;
KASSERT(z != NULL, ("invalid zone"));
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
mtx_lock(&z->zmtx);
if (z->zfreecnt <= z->zfreemin) {
@@ -390,8 +382,6 @@ zalloc(vm_zone_t z)
out:
mtx_unlock(&z->zmtx);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return item;
}
@@ -401,13 +391,8 @@ out:
void
zfree(vm_zone_t z, void *item)
{
- int hadvmlock;
-
KASSERT(z != NULL, ("invalid zone"));
KASSERT(item != NULL, ("invalid item"));
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
mtx_lock(&z->zmtx);
((void **) item)[0] = z->zitems;
@@ -419,8 +404,6 @@ zfree(vm_zone_t z, void *item)
z->zitems = item;
z->zfreecnt++;
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
mtx_unlock(&z->zmtx);
}
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 1bcdf9f..bc78e5a 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -110,7 +110,8 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_object_t object;
struct vnode *vp;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Pageout to vnode, no can do yet.
*/
@@ -123,13 +124,11 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
* Prevent race condition when allocating the object. This
* can happen with NFS vnodes since the nfsnode isn't locked.
*/
- mtx_unlock(&vm_mtx);
while (vp->v_flag & VOLOCK) {
vp->v_flag |= VOWANT;
tsleep(vp, PVM, "vnpobj", 0);
}
vp->v_flag |= VOLOCK;
- mtx_lock(&vm_mtx);
/*
* If the object is being terminated, wait for it to
@@ -137,7 +136,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
while (((object = vp->v_object) != NULL) &&
(object->flags & OBJ_DEAD)) {
- msleep(object, &vm_mtx, PVM, "vadead", 0);
+ tsleep(object, PVM, "vadead", 0);
}
if (vp->v_usecount == 0)
@@ -160,13 +159,11 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vp->v_usecount++;
}
- mtx_unlock(&vm_mtx);
vp->v_flag &= ~VOLOCK;
if (vp->v_flag & VOWANT) {
vp->v_flag &= ~VOWANT;
wakeup(vp);
}
- mtx_lock(&vm_mtx);
return (object);
}
@@ -176,7 +173,7 @@ vnode_pager_dealloc(object)
{
register struct vnode *vp = object->handle;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
if (vp == NULL)
panic("vnode_pager_dealloc: pager already dealloced");
@@ -203,7 +200,7 @@ vnode_pager_haspage(object, pindex, before, after)
int bsize;
int pagesperblock, blocksperpage;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
/*
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
@@ -228,10 +225,8 @@ vnode_pager_haspage(object, pindex, before, after)
blocksperpage = (PAGE_SIZE / bsize);
reqblock = pindex * blocksperpage;
}
- mtx_unlock(&vm_mtx);
err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn,
after, before);
- mtx_lock(&vm_mtx);
if (err)
return TRUE;
if ( bn == -1)
@@ -279,6 +274,8 @@ vnode_pager_setsize(vp, nsize)
vm_pindex_t nobjsize;
vm_object_t object = vp->v_object;
+ GIANT_REQUIRED;
+
if (object == NULL)
return;
@@ -294,11 +291,6 @@ vnode_pager_setsize(vp, nsize)
* File has shrunk. Toss any cached pages beyond the new EOF.
*/
if (nsize < object->un_pager.vnp.vnp_size) {
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size);
if (nobjsize < object->size) {
vm_object_page_remove(object, nobjsize, object->size,
@@ -339,8 +331,6 @@ vnode_pager_setsize(vp, nsize)
m->dirty = VM_PAGE_BITS_ALL;
}
}
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
}
object->un_pager.vnp.vnp_size = nsize;
object->size = nobjsize;
@@ -364,7 +354,7 @@ vnode_pager_addr(vp, address, run)
daddr_t vblock;
int voffset;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
if ((int) address < 0)
return -1;
@@ -374,11 +364,9 @@ vnode_pager_addr(vp, address, run)
bsize = vp->v_mount->mnt_stat.f_iosize;
vblock = address / bsize;
voffset = address % bsize;
- mtx_unlock(&vm_mtx);
err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL);
- mtx_lock(&vm_mtx);
if (err || (block == -1))
rtaddress = -1;
else {
@@ -421,17 +409,16 @@ vnode_pager_input_smlfs(object, m)
vm_offset_t bsize;
int error = 0;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
vp = object->handle;
if (vp->v_mount == NULL)
return VM_PAGER_BAD;
bsize = vp->v_mount->mnt_stat.f_iosize;
- mtx_unlock(&vm_mtx);
VOP_BMAP(vp, 0, &dp, 0, NULL, NULL);
- mtx_lock(&vm_mtx);
kva = vm_pager_map_page(m);
for (i = 0; i < PAGE_SIZE / bsize; i++) {
@@ -442,7 +429,6 @@ vnode_pager_input_smlfs(object, m)
fileaddr = vnode_pager_addr(vp,
IDX_TO_OFF(m->pindex) + i * bsize, (int *)0);
if (fileaddr != -1) {
- mtx_unlock(&vm_mtx);
bp = getpbuf(&vnode_pbuf_freecnt);
/* build a minimal buffer header */
@@ -478,7 +464,6 @@ vnode_pager_input_smlfs(object, m)
* free the buffer header back to the swap buffer pool
*/
relpbuf(bp, &vnode_pbuf_freecnt);
- mtx_lock(&vm_mtx);
if (error)
break;
@@ -514,7 +499,7 @@ vnode_pager_input_old(object, m)
vm_offset_t kva;
struct vnode *vp;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
error = 0;
/*
@@ -534,7 +519,6 @@ vnode_pager_input_old(object, m)
kva = vm_pager_map_page(m);
vp = object->handle;
- mtx_unlock(&vm_mtx);
aiov.iov_base = (caddr_t) kva;
aiov.iov_len = size;
auio.uio_iov = &aiov;
@@ -554,7 +538,6 @@ vnode_pager_input_old(object, m)
else if (count != PAGE_SIZE)
bzero((caddr_t) kva + count, PAGE_SIZE - count);
}
- mtx_lock(&vm_mtx);
vm_pager_unmap_page(kva);
}
pmap_clear_modify(m);
@@ -588,7 +571,7 @@ vnode_pager_getpages(object, m, count, reqpage)
struct vnode *vp;
int bytes = count * PAGE_SIZE;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
vp = object->handle;
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
KASSERT(rtval != EOPNOTSUPP,
@@ -620,7 +603,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
int count;
int error = 0;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
object = vp->v_object;
count = bytecount / PAGE_SIZE;
@@ -640,9 +623,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
/*
* if we can't bmap, use old VOP code
*/
- mtx_unlock(&vm_mtx);
if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) {
- mtx_lock(&vm_mtx);
for (i = 0; i < count; i++) {
if (i != reqpage) {
vm_page_free(m[i]);
@@ -659,7 +640,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
} else if ((PAGE_SIZE / bsize) > 1 &&
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
- mtx_lock(&vm_mtx);
for (i = 0; i < count; i++) {
if (i != reqpage) {
vm_page_free(m[i]);
@@ -669,7 +649,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
cnt.v_vnodepgsin++;
return vnode_pager_input_smlfs(object, m[reqpage]);
}
- mtx_lock(&vm_mtx);
/*
* If we have a completely valid page available to us, we can
@@ -770,7 +749,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* and map the pages to be read into the kva
*/
pmap_qenter(kva, m, count);
- mtx_unlock(&vm_mtx);
/* build a minimal buffer header */
bp->b_iocmd = BIO_READ;
@@ -808,7 +786,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (size != count * PAGE_SIZE)
bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
}
- mtx_lock(&vm_mtx);
pmap_qremove(kva, count);
/*
@@ -899,7 +876,7 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
struct mount *mp;
int bytes = count * PAGE_SIZE;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Force synchronous operation if we are extremely low on memory
* to prevent a low-memory deadlock. VOP operations often need to
@@ -920,17 +897,13 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
*/
vp = object->handle;
- mtx_unlock(&vm_mtx);
if (vp->v_type != VREG)
mp = NULL;
(void)vn_start_write(vp, &mp, V_WAIT);
- mtx_lock(&vm_mtx);
rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: stale FS putpages\n"));
- mtx_unlock(&vm_mtx);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
}
@@ -962,7 +935,7 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
int error;
int ioflags;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
object = vp->v_object;
count = bytecount / PAGE_SIZE;
@@ -992,7 +965,6 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
}
}
}
- mtx_unlock(&vm_mtx);
/*
* pageouts are already clustered, use IO_ASYNC t o force a bawrite()
@@ -1013,7 +985,6 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
auio.uio_resid = maxsize;
auio.uio_procp = (struct proc *) 0;
error = VOP_WRITE(vp, &auio, ioflags, curproc->p_ucred);
- mtx_lock(&vm_mtx);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += ncount;
@@ -1036,18 +1007,15 @@ vnode_pager_lock(object)
{
struct proc *p = curproc; /* XXX */
- mtx_assert(&vm_mtx, MA_NOTOWNED);
- mtx_assert(&Giant, MA_OWNED);
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
for (; object != NULL; object = object->backing_object) {
if (object->type != OBJT_VNODE)
continue;
if (object->flags & OBJ_DEAD) {
- mtx_unlock(&vm_mtx);
return NULL;
}
- mtx_unlock(&vm_mtx);
/* XXX; If object->handle can change, we need to cache it. */
while (vget(object->handle,
LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) {
@@ -1057,6 +1025,5 @@ vnode_pager_lock(object)
}
return object->handle;
}
- mtx_unlock(&vm_mtx);
return NULL;
}
OpenPOWER on IntegriCloud