summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c4
-rw-r--r--sys/vm/vm_glue.c48
-rw-r--r--sys/vm/vm_map.h8
-rw-r--r--sys/vm/vm_meter.c10
-rw-r--r--sys/vm/vm_object.c4
-rw-r--r--sys/vm/vm_pageout.c20
-rw-r--r--sys/vm/vm_zone.c34
7 files changed, 64 insertions, 64 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index ee30759..4641537 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -854,7 +854,7 @@ readrest:
vm_page_activate(fs.m);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (curproc && (curproc->p_sflag & PS_INMEM) && curproc->p_stats) {
if (hardfault) {
curproc->p_stats->p_ru.ru_majflt++;
@@ -862,7 +862,7 @@ readrest:
curproc->p_stats->p_ru.ru_minflt++;
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* Unlock everything, and return
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index b76c855..0f80f57 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -313,18 +313,18 @@ faultin(p)
{
mtx_assert(&p->p_mtx, MA_OWNED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
++p->p_lock;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
mtx_assert(&Giant, MA_OWNED);
pmap_swapin_proc(p);
PROC_LOCK(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SRUN) {
setrunqueue(p);
}
@@ -334,7 +334,7 @@ faultin(p)
/* undo the effect of setting SLOCK above */
--p->p_lock;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -366,7 +366,7 @@ loop:
ppri = INT_MIN;
ALLPROC_LOCK(AP_SHARED);
LIST_FOREACH(p, &allproc, p_list) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SRUN &&
(p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) {
@@ -385,7 +385,7 @@ loop:
ppri = pri;
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
ALLPROC_LOCK(AP_RELEASE);
@@ -396,9 +396,9 @@ loop:
tsleep(&proc0, PVM, "sched", 0);
goto loop;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_SWAPINREQ;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* We would like to bring someone in. (only if there is space).
@@ -406,9 +406,9 @@ loop:
PROC_LOCK(p);
faultin(p);
PROC_UNLOCK(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_swtime = 0;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto loop;
}
@@ -461,15 +461,15 @@ retry:
}
vm = p->p_vmspace;
PROC_UNLOCK(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
switch (p->p_stat) {
default:
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
case SSLEEP:
@@ -478,7 +478,7 @@ retry:
* do not swapout a realtime process
*/
if (RTP_PRIO_IS_REALTIME(p->p_rtprio.type)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
@@ -489,7 +489,7 @@ retry:
*/
if (((p->p_priority & 0x7f) < PSOCK) ||
(p->p_slptime < swap_idle_threshold1)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
@@ -501,10 +501,10 @@ retry:
if (((action & VM_SWAP_NORMAL) == 0) &&
(((action & VM_SWAP_IDLE) == 0) ||
(p->p_slptime < swap_idle_threshold2))) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
++vm->vm_refcnt;
/*
@@ -522,17 +522,17 @@ retry:
* If the process has been asleep for awhile and had
* most of its pages taken away already, swap it out.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((action & VM_SWAP_NORMAL) ||
((action & VM_SWAP_IDLE) &&
(p->p_slptime > swap_idle_threshold2))) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
swapout(p);
vmspace_free(vm);
didswap++;
goto retry;
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
ALLPROC_LOCK(AP_RELEASE);
@@ -559,19 +559,19 @@ swapout(p)
p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
(void) splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_INMEM;
p->p_sflag |= PS_SWAPPING;
if (p->p_stat == SRUN)
remrunqueue(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
(void) spl0();
pmap_swapout_proc(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_SWAPPING;
p->p_swtime = 0;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
#endif /* !NO_SWAPPING */
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index ef48af2..291826b 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -291,15 +291,15 @@ _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
#define vm_map_set_recursive(map) \
do { \
- mtx_enter((map)->lock.lk_interlock, MTX_DEF); \
+ mtx_lock((map)->lock.lk_interlock); \
(map)->lock.lk_flags |= LK_CANRECURSE; \
- mtx_exit((map)->lock.lk_interlock, MTX_DEF); \
+ mtx_unlock((map)->lock.lk_interlock); \
} while(0)
#define vm_map_clear_recursive(map) \
do { \
- mtx_enter((map)->lock.lk_interlock, MTX_DEF); \
+ mtx_lock((map)->lock.lk_interlock); \
(map)->lock.lk_flags &= ~LK_CANRECURSE; \
- mtx_exit((map)->lock.lk_interlock, MTX_DEF); \
+ mtx_unlock((map)->lock.lk_interlock); \
} while(0)
/*
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 3a31ad4..0a05cb9 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -153,10 +153,10 @@ vmtotal(SYSCTL_HANDLER_ARGS)
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_flag & P_SYSTEM)
continue;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
switch (p->p_stat) {
case 0:
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
case SMTX:
@@ -170,7 +170,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
} else if (p->p_slptime < maxslp)
totalp->t_sw++;
if (p->p_slptime >= maxslp) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
break;
@@ -186,12 +186,12 @@ vmtotal(SYSCTL_HANDLER_ARGS)
else
totalp->t_sw++;
if (p->p_stat == SIDL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
break;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* Note active objects.
*/
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 1e16917..39191b1 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -458,9 +458,9 @@ vm_object_terminate(object)
/*
* Remove the object from the global object list.
*/
- mtx_enter(&vm_object_list_mtx, MTX_DEF);
+ mtx_lock(&vm_object_list_mtx);
TAILQ_REMOVE(&vm_object_list, object, object_list);
- mtx_exit(&vm_object_list_mtx, MTX_DEF);
+ mtx_unlock(&vm_object_list_mtx);
wakeup(object);
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 4046e0e..568f42b 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1140,12 +1140,12 @@ rescan0:
* if the process is in a non-running type state,
* don't touch it.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* get the process size
*/
@@ -1162,11 +1162,11 @@ rescan0:
ALLPROC_LOCK(AP_RELEASE);
if (bigproc != NULL) {
killproc(bigproc, "out of swap space");
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
bigproc->p_estcpu = 0;
bigproc->p_nice = PRIO_MIN;
resetpriority(bigproc);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
wakeup(&cnt.v_free_count);
}
}
@@ -1305,7 +1305,7 @@ vm_pageout()
{
int pass;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* Initialize some paging parameters.
@@ -1449,7 +1449,7 @@ vm_daemon()
{
struct proc *p;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
while (TRUE) {
tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
@@ -1477,9 +1477,9 @@ vm_daemon()
* if the process is in a non-running type state,
* don't touch it.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
/*
@@ -1496,7 +1496,7 @@ vm_daemon()
*/
if ((p->p_sflag & PS_INMEM) == 0)
limit = 0; /* XXX */
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
size = vmspace_resident_count(p->p_vmspace);
if (limit >= 0 && size >= limit) {
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index 390c5f2..f2d9622 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -173,9 +173,9 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
/* our zone is good and ready, add it to the list */
if ((z->zflags & ZONE_BOOT) == 0) {
mtx_init(&(z)->zmtx, "zone", MTX_DEF);
- mtx_enter(&zone_mtx, MTX_DEF);
+ mtx_lock(&zone_mtx);
SLIST_INSERT_HEAD(&zlist, z, zent);
- mtx_exit(&zone_mtx, MTX_DEF);
+ mtx_unlock(&zone_mtx);
}
return 1;
@@ -245,9 +245,9 @@ zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
z->zmax = nitems;
z->ztotal = nitems;
- mtx_enter(&zone_mtx, MTX_DEF);
+ mtx_lock(&zone_mtx);
SLIST_INSERT_HEAD(&zlist, z, zent);
- mtx_exit(&zone_mtx, MTX_DEF);
+ mtx_unlock(&zone_mtx);
}
/*
@@ -300,15 +300,15 @@ _zget(vm_zone_t z)
* map.
*/
if (lockstatus(&kernel_map->lock, NULL)) {
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
if (item != NULL)
atomic_add_int(&zone_kmem_pages, z->zalloc);
} else {
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
item = (void *) kmem_alloc(kernel_map, nbytes);
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
if (item != NULL)
atomic_add_int(&zone_kern_pages, z->zalloc);
}
@@ -363,11 +363,11 @@ zalloc(vm_zone_t z)
void *item;
KASSERT(z != NULL, ("invalid zone"));
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
if (z->zfreecnt <= z->zfreemin) {
item = _zget(z);
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
return item;
}
@@ -382,7 +382,7 @@ zalloc(vm_zone_t z)
z->zfreecnt--;
z->znalloc++;
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
return item;
}
@@ -394,7 +394,7 @@ zfree(vm_zone_t z, void *item)
{
KASSERT(z != NULL, ("invalid zone"));
KASSERT(item != NULL, ("invalid item"));
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
((void **) item)[0] = z->zitems;
#ifdef INVARIANTS
@@ -405,7 +405,7 @@ zfree(vm_zone_t z, void *item)
z->zitems = item;
z->zfreecnt++;
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
}
/*
@@ -418,22 +418,22 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
char tmpbuf[128];
vm_zone_t z;
- mtx_enter(&zone_mtx, MTX_DEF);
+ mtx_lock(&zone_mtx);
len = snprintf(tmpbuf, sizeof(tmpbuf),
"\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
error = SYSCTL_OUT(req, tmpbuf, SLIST_EMPTY(&zlist) ? len-1 : len);
SLIST_FOREACH(z, &zlist, zent) {
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
len = snprintf(tmpbuf, sizeof(tmpbuf),
"%-14.14s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
z->zname, z->zsize, z->zmax, (z->ztotal - z->zfreecnt),
z->zfreecnt, z->znalloc);
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
if (SLIST_NEXT(z, zent) == NULL)
tmpbuf[len - 1] = 0;
error = SYSCTL_OUT(req, tmpbuf, len);
}
- mtx_exit(&zone_mtx, MTX_DEF);
+ mtx_unlock(&zone_mtx);
return (error);
}
OpenPOWER on IntegriCloud