summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2001-02-09 06:11:45 +0000
committerbmilekic <bmilekic@FreeBSD.org>2001-02-09 06:11:45 +0000
commitf364d4ac3621ae2689a3cc1b82c73eb491475a24 (patch)
tree84444d0341ce519800ed7913d826f5f38c622d6d /sys/kern
parent363bdddf694863339f6629340cfb324771b8ffe7 (diff)
downloadFreeBSD-src-f364d4ac3621ae2689a3cc1b82c73eb491475a24.zip
FreeBSD-src-f364d4ac3621ae2689a3cc1b82c73eb491475a24.tar.gz
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_elf.c4
-rw-r--r--sys/kern/init_main.c12
-rw-r--r--sys/kern/kern_acct.c4
-rw-r--r--sys/kern/kern_clock.c28
-rw-r--r--sys/kern/kern_condvar.c60
-rw-r--r--sys/kern/kern_exit.c12
-rw-r--r--sys/kern/kern_fork.c16
-rw-r--r--sys/kern/kern_idle.c4
-rw-r--r--sys/kern/kern_intr.c12
-rw-r--r--sys/kern/kern_kthread.c4
-rw-r--r--sys/kern/kern_lock.c24
-rw-r--r--sys/kern/kern_malloc.c24
-rw-r--r--sys/kern/kern_mutex.c948
-rw-r--r--sys/kern/kern_proc.c4
-rw-r--r--sys/kern/kern_prot.c14
-rw-r--r--sys/kern/kern_resource.c40
-rw-r--r--sys/kern/kern_shutdown.c6
-rw-r--r--sys/kern/kern_sig.c48
-rw-r--r--sys/kern/kern_subr.c4
-rw-r--r--sys/kern/kern_synch.c84
-rw-r--r--sys/kern/kern_timeout.c38
-rw-r--r--sys/kern/subr_eventhandler.c12
-rw-r--r--sys/kern/subr_prof.c6
-rw-r--r--sys/kern/subr_rman.c42
-rw-r--r--sys/kern/subr_smp.c20
-rw-r--r--sys/kern/subr_trap.c108
-rw-r--r--sys/kern/subr_turnstile.c948
-rw-r--r--sys/kern/subr_witness.c948
-rw-r--r--sys/kern/sys_generic.c12
-rw-r--r--sys/kern/sys_process.c24
-rw-r--r--sys/kern/tty.c4
-rw-r--r--sys/kern/uipc_mbuf.c30
-rw-r--r--sys/kern/uipc_syscalls.c12
-rw-r--r--sys/kern/vfs_aio.c2
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/kern/vfs_conf.c4
-rw-r--r--sys/kern/vfs_default.c4
-rw-r--r--sys/kern/vfs_export.c174
-rw-r--r--sys/kern/vfs_extattr.c46
-rw-r--r--sys/kern/vfs_mount.c4
-rw-r--r--sys/kern/vfs_subr.c174
-rw-r--r--sys/kern/vfs_syscalls.c46
-rw-r--r--sys/kern/vfs_vnops.c12
43 files changed, 1796 insertions, 2228 deletions
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 96db3b3..b5fc907 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -485,9 +485,9 @@ exec_elf_imgact(struct image_params *imgp)
* a context switch. Better safe than sorry; I really don't want
* the file to change while it's being loaded.
*/
- mtx_enter(&imgp->vp->v_interlock, MTX_DEF);
+ mtx_lock(&imgp->vp->v_interlock);
imgp->vp->v_flag |= VTEXT;
- mtx_exit(&imgp->vp->v_interlock, MTX_DEF);
+ mtx_unlock(&imgp->vp->v_interlock);
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index ffa711f..0214ed1 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -455,7 +455,7 @@ start_init(void *dummy)
char *ucp, **uap, *arg0, *arg1;
struct proc *p;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
p = curproc;
@@ -555,7 +555,7 @@ start_init(void *dummy)
* to user mode as init!
*/
if ((error = execve(p, &args)) == 0) {
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return;
}
if (error != ENOENT)
@@ -584,9 +584,9 @@ create_init(const void *udata __unused)
PROC_LOCK(initproc);
initproc->p_flag |= P_SYSTEM;
PROC_UNLOCK(initproc);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
initproc->p_sflag |= PS_INMEM;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
cpu_set_fork_handler(initproc, start_init, NULL);
}
SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL)
@@ -598,9 +598,9 @@ static void
kick_init(const void *udata __unused)
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
initproc->p_stat = SRUN;
setrunqueue(initproc);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
diff --git a/sys/kern/kern_acct.c b/sys/kern/kern_acct.c
index 9220e57..cfa8cbe 100644
--- a/sys/kern/kern_acct.c
+++ b/sys/kern/kern_acct.c
@@ -194,9 +194,9 @@ acct_process(p)
bcopy(p->p_comm, acct.ac_comm, sizeof acct.ac_comm);
/* (2) The amount of user and system time that was used */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
calcru(p, &ut, &st, NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
acct.ac_utime = encode_comp_t(ut.tv_sec, ut.tv_usec);
acct.ac_stime = encode_comp_t(st.tv_sec, st.tv_usec);
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 6d45911..0743c6c 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -170,17 +170,17 @@ hardclock(frame)
if (CLKF_USERMODE(frame) &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag |= PS_ALRMPEND;
aston();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag |= PS_PROFPEND;
aston();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
@@ -200,13 +200,13 @@ hardclock(frame)
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
ticks++;
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
need_softclock = 1;
} else if (softticks + 1 == ticks)
++softticks;
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
/*
* sched_swi acquires sched_lock, so we don't want to call it with
@@ -292,7 +292,7 @@ startprofclock(p)
* it should be protected later on by a time_lock, which would
* cover psdiv, etc. as well.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_PROFIL) == 0) {
p->p_sflag |= PS_PROFIL;
if (++profprocs == 1 && stathz != 0) {
@@ -302,7 +302,7 @@ startprofclock(p)
splx(s);
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -314,7 +314,7 @@ stopprofclock(p)
{
int s;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_PROFIL) {
p->p_sflag &= ~PS_PROFIL;
if (--profprocs == 0 && stathz != 0) {
@@ -324,7 +324,7 @@ stopprofclock(p)
splx(s);
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -347,7 +347,7 @@ statclock(frame)
struct rusage *ru;
struct vmspace *vm;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (CLKF_USERMODE(frame)) {
/*
@@ -362,7 +362,7 @@ statclock(frame)
forward_statclock(pscnt);
#endif
if (--pscnt > 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
/*
@@ -392,7 +392,7 @@ statclock(frame)
forward_statclock(pscnt);
#endif
if (--pscnt > 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
/*
@@ -435,7 +435,7 @@ statclock(frame)
ru->ru_maxrss = rss;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 05b8dc7..d8b97bf 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -138,9 +138,9 @@ cv_switch_catch(struct proc *p)
* stopped, p->p_wchan will be 0 upon return from CURSIG.
*/
p->p_sflag |= PS_SINTR;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
sig = CURSIG(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (sig != 0) {
if (p->p_wchan != NULL)
cv_waitq_remove(p);
@@ -199,7 +199,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
@@ -207,25 +207,25 @@ cv_wait(struct cv *cvp, struct mtx *mp)
* procs or panic below, in case this is the idle process and
* already asleep.
*/
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
CV_WAIT_VALIDATE(cvp, mp);
DROP_GIANT_NOSWITCH();
- mtx_exit(mp, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mp, MTX_NOSWITCH);
cv_waitq_add(cvp, p);
cv_switch(p);
curpriority = p->p_usrpri;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 0, 0);
#endif
PICKUP_GIANT();
- mtx_enter(mp, MTX_DEF);
+ mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
}
@@ -253,7 +253,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
@@ -261,19 +261,19 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
* procs or panic below, in case this is the idle process and
* already asleep.
*/
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
CV_WAIT_VALIDATE(cvp, mp);
DROP_GIANT_NOSWITCH();
- mtx_exit(mp, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mp, MTX_NOSWITCH);
cv_waitq_add(cvp, p);
sig = cv_switch_catch(p);
curpriority = p->p_usrpri;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
/* proc_lock(p); */
@@ -291,7 +291,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 0, 0);
#endif
- mtx_enter(mp, MTX_DEF);
+ mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
return (rval);
@@ -319,7 +319,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
@@ -327,13 +327,13 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
* procs or panic below, in case this is the idle process and
* already asleep.
*/
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
CV_WAIT_VALIDATE(cvp, mp);
DROP_GIANT_NOSWITCH();
- mtx_exit(mp, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mp, MTX_NOSWITCH);
cv_waitq_add(cvp, p);
callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
@@ -346,13 +346,13 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
} else
callout_stop(&p->p_slpcallout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 0, 0);
#endif
PICKUP_GIANT();
- mtx_enter(mp, MTX_DEF);
+ mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
return (rval);
@@ -382,7 +382,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
@@ -390,13 +390,13 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
* procs or panic below, in case this is the idle process and
* already asleep.
*/
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
CV_WAIT_VALIDATE(cvp, mp);
DROP_GIANT_NOSWITCH();
- mtx_exit(mp, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mp, MTX_NOSWITCH);
cv_waitq_add(cvp, p);
callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
@@ -409,7 +409,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
} else
callout_stop(&p->p_slpcallout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
/* proc_lock(p); */
@@ -427,7 +427,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 0, 0);
#endif
- mtx_enter(mp, MTX_DEF);
+ mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
return (rval);
@@ -480,12 +480,12 @@ cv_signal(struct cv *cvp)
{
KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__));
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
CV_SIGNAL_VALIDATE(cvp);
cv_wakeup(cvp);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -497,11 +497,11 @@ cv_broadcast(struct cv *cvp)
{
KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__));
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
CV_SIGNAL_VALIDATE(cvp);
while (!TAILQ_EMPTY(&cvp->cv_waitq))
cv_wakeup(cvp);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -513,13 +513,13 @@ cv_waitq_remove(struct proc *p)
{
struct cv *cvp;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((cvp = p->p_wchan) != NULL && p->p_sflag & PS_CVWAITQ) {
TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq);
p->p_sflag &= ~PS_CVWAITQ;
p->p_wchan = NULL;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -534,7 +534,7 @@ cv_timedwait_end(void *arg)
p = arg;
CTR3(KTR_PROC, "cv_timedwait_end: proc %p (pid %d, %s)", p, p->p_pid,
p->p_comm);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan != NULL) {
if (p->p_stat == SSLEEP)
setrunnable(p);
@@ -542,5 +542,5 @@ cv_timedwait_end(void *arg)
cv_waitq_remove(p);
p->p_sflag |= PS_TIMEOUT;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 5e803e1..71e6288 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -314,9 +314,9 @@ exit1(p, rv)
*/
p->p_xstat = rv;
*p->p_ru = p->p_stats->p_ru;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
ruadd(p->p_ru, &p->p_stats->p_cru);
/*
@@ -457,9 +457,9 @@ loop:
}
nfound++;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SZOMB) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
PROCTREE_LOCK(PT_RELEASE);
@@ -579,7 +579,7 @@ loop:
}
if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
(p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_flag |= P_WAITED;
PROC_UNLOCK(p);
PROCTREE_LOCK(PT_RELEASE);
@@ -598,7 +598,7 @@ loop:
error = 0;
return (error);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
}
PROCTREE_LOCK(PT_RELEASE);
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 2e2318f..8a9e835 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -380,11 +380,11 @@ again:
* The p_stats and p_sigacts substructs are set in vm_fork.
*/
p2->p_flag = 0;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p2->p_sflag = PS_INMEM;
if (p1->p_sflag & PS_PROFIL)
startprofclock(p2);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred),
M_SUBPROC, M_WAITOK);
bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
@@ -554,10 +554,10 @@ again:
p2->p_acflag = AFORK;
if ((flags & RFSTOPPED) == 0) {
splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p2->p_stat = SRUN;
setrunqueue(p2);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
spl0();
}
@@ -649,7 +649,7 @@ fork_exit(callout, arg, frame)
{
struct proc *p;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* XXX: We really shouldn't have to do this.
*/
@@ -674,7 +674,7 @@ fork_exit(callout, arg, frame)
*/
p = CURPROC;
if (p->p_flag & P_KTHREAD) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
p->p_comm, p->p_pid);
kthread_exit(0);
@@ -698,11 +698,11 @@ fork_return(p, frame)
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsysret(p->p_tracep, SYS_fork, 0, 0);
}
#endif
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
}
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index a24893d..fdf3566 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -105,8 +105,8 @@ idle_proc(void *dummy)
#endif
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index e4411c9..e7915c4 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -168,7 +168,7 @@ sched_swi(struct intrhand *ih, int flag)
ih->ih_need = 1;
if (!(flag & SWI_DELAY)) {
it->it_need = 1;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SWAIT) { /* not on run queue */
CTR1(KTR_INTR, "sched_swi: setrunqueue %d", p->p_pid);
/* membar_lock(); */
@@ -180,7 +180,7 @@ sched_swi(struct intrhand *ih, int flag)
CTR3(KTR_INTR, "sched_swi %d: it_need %d, state %d",
p->p_pid, it->it_need, p->p_stat );
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
need_resched();
}
}
@@ -223,10 +223,10 @@ sithd_loop(void *dummy)
ih->ih_flags);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ih->ih_handler(ih->ih_argument);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
}
@@ -236,14 +236,14 @@ sithd_loop(void *dummy)
* set again, so we have to check it again.
*/
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!it->it_need) {
p->p_stat = SWAIT; /* we're idle */
CTR1(KTR_INTR, "sithd_loop pid %d: done", p->p_pid);
mi_switch();
CTR1(KTR_INTR, "sithd_loop pid %d: resumed", p->p_pid);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c
index f9ca35f..b322bc1 100644
--- a/sys/kern/kern_kthread.c
+++ b/sys/kern/kern_kthread.c
@@ -103,13 +103,13 @@ kthread_create(void (*func)(void *), void *arg,
cpu_set_fork_handler(p2, func, arg);
/* Delay putting it on the run queue until now. */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p2->p_sflag |= PS_INMEM;
if (!(flags & RFSTOPPED)) {
p2->p_stat = SRUN;
setrunqueue(p2);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index afd59f2..d5987f5 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -144,11 +144,11 @@ apause(struct lock *lkp, int flags)
return 0;
#ifdef SMP
for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
if ((lkp->lk_flags & flags) == 0)
break;
- mtx_enter(lkp->lk_interlock, MTX_DEF);
+ mtx_lock(lkp->lk_interlock);
if ((lkp->lk_flags & flags) == 0)
return 0;
}
@@ -236,9 +236,9 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
else
pid = p->p_pid;
- mtx_enter(lkp->lk_interlock, MTX_DEF);
+ mtx_lock(lkp->lk_interlock);
if (flags & LK_INTERLOCK)
- mtx_exit(interlkp, MTX_DEF);
+ mtx_unlock(interlkp);
extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
@@ -451,7 +451,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
break;
default:
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
panic("lockmgr: unknown locktype request %d",
flags & LK_TYPE_MASK);
/* NOTREACHED */
@@ -462,7 +462,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup((void *)&lkp->lk_flags);
}
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
return (error);
}
@@ -506,12 +506,12 @@ lockinit(lkp, prio, wmesg, timo, flags)
"timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
if (lock_mtx_array != NULL) {
- mtx_enter(&lock_mtx, MTX_DEF);
+ mtx_lock(&lock_mtx);
lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector];
lock_mtx_selector++;
if (lock_mtx_selector == lock_nmtx)
lock_mtx_selector = 0;
- mtx_exit(&lock_mtx, MTX_DEF);
+ mtx_unlock(&lock_mtx);
} else {
/*
* Giving lockmgr locks that are initialized during boot a
@@ -561,7 +561,7 @@ lockstatus(lkp, p)
{
int lock_type = 0;
- mtx_enter(lkp->lk_interlock, MTX_DEF);
+ mtx_lock(lkp->lk_interlock);
if (lkp->lk_exclusivecount != 0) {
if (p == NULL || lkp->lk_lockholder == p->p_pid)
lock_type = LK_EXCLUSIVE;
@@ -569,7 +569,7 @@ lockstatus(lkp, p)
lock_type = LK_EXCLOTHER;
} else if (lkp->lk_sharecount != 0)
lock_type = LK_SHARED;
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
return (lock_type);
}
@@ -582,9 +582,9 @@ lockcount(lkp)
{
int count;
- mtx_enter(lkp->lk_interlock, MTX_DEF);
+ mtx_lock(lkp->lk_interlock);
count = lkp->lk_exclusivecount + lkp->lk_sharecount;
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
return (count);
}
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 089d867..a6447a5 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -154,7 +154,7 @@ malloc(size, type, flags)
indx = BUCKETINDX(size);
kbp = &bucket[indx];
s = splmem();
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
while (ksp->ks_memuse >= ksp->ks_limit) {
if (flags & M_ASLEEP) {
if (ksp->ks_limblocks < 65535)
@@ -163,7 +163,7 @@ malloc(size, type, flags)
}
if (flags & M_NOWAIT) {
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
return ((void *) NULL);
}
if (ksp->ks_limblocks < 65535)
@@ -183,7 +183,7 @@ malloc(size, type, flags)
allocsize = 1 << indx;
npg = btoc(allocsize);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags);
if (va == NULL) {
@@ -194,7 +194,7 @@ malloc(size, type, flags)
* Enter malloc_mtx after the error check to avoid having to
* immediately exit it again if there is an error.
*/
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
kbp->kb_total += kbp->kb_elmpercl;
kup = btokup(va);
@@ -278,7 +278,7 @@ out:
if (ksp->ks_memuse > ksp->ks_maxused)
ksp->ks_maxused = ksp->ks_memuse;
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
/* XXX: Do idle pre-zeroing. */
if (va != NULL && (flags & M_ZERO))
bzero(va, size);
@@ -314,7 +314,7 @@ free(addr, type)
size = 1 << kup->ku_indx;
kbp = &bucket[kup->ku_indx];
s = splmem();
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
#ifdef INVARIANTS
/*
* Check for returns of data that do not point to the
@@ -329,9 +329,9 @@ free(addr, type)
(void *)addr, size, type->ks_shortdesc, alloc);
#endif /* INVARIANTS */
if (size > MAXALLOCSAVE) {
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
size = kup->ku_pagecnt << PAGE_SHIFT;
ksp->ks_memuse -= size;
@@ -343,7 +343,7 @@ free(addr, type)
ksp->ks_inuse--;
kbp->kb_total -= 1;
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
return;
}
freep = (struct freelist *)addr;
@@ -410,7 +410,7 @@ free(addr, type)
}
#endif
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
}
/*
@@ -540,7 +540,7 @@ malloc_uninit(data)
#ifdef INVARIANTS
s = splmem();
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
for (indx = 0; indx < MINBUCKET + 16; indx++) {
kbp = bucket + indx;
freep = (struct freelist*)kbp->kb_next;
@@ -551,7 +551,7 @@ malloc_uninit(data)
}
}
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
if (type->ks_memuse != 0)
printf("malloc_uninit: %ld bytes of '%s' still allocated\n",
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index c13dd1d..64d3972 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -31,6 +31,11 @@
*/
/*
+ * Machine independent bits of mutex implementation and implementation of
+ * `witness' structure & related debugging routines.
+ */
+
+/*
* Main Entry: witness
* Pronunciation: 'wit-n&s
* Function: noun
@@ -53,12 +58,6 @@
#include "opt_ddb.h"
#include "opt_witness.h"
-/*
- * Cause non-inlined mtx_*() to be compiled.
- * Must be defined early because other system headers may include mutex.h.
- */
-#define _KERN_MUTEX_C_
-
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
@@ -82,9 +81,8 @@
#include <sys/mutex.h>
/*
- * Machine independent bits of the mutex implementation
+ * The WITNESS-enabled mutex debug structure.
*/
-
#ifdef WITNESS
struct mtx_debug {
struct witness *mtxd_witness;
@@ -100,138 +98,54 @@ struct mtx_debug {
#endif /* WITNESS */
/*
- * Assembly macros
- *------------------------------------------------------------------------------
- */
-
-#define _V(x) __STRING(x)
-
-/*
- * Default, unoptimized mutex micro-operations
+ * Internal utility macros.
*/
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#ifndef _obtain_lock
-/* Actually obtain mtx_lock */
-#define _obtain_lock(mp, tid) \
- atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
-#endif
-
-#ifndef _release_lock
-/* Actually release mtx_lock */
-#define _release_lock(mp, tid) \
- atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _release_lock_quick
-/* Actually release mtx_lock quickly assuming that we own it */
-#define _release_lock_quick(mp) \
- atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _getlock_sleep
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _getlock_spin_block
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_intr = save_intr(); \
- disable_intr(); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
- else \
- (mp)->mtx_saveintr = _mtx_intr; \
-} while (0)
-#endif
+#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-#ifndef _getlock_norecurse
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-#endif
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-#ifndef _exitlock_norecurse
/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
+ * Early WITNESS-enabled declarations.
*/
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-#endif
+#ifdef WITNESS
-#ifndef _exitlock
/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) { \
- if ((mp)->mtx_lock & MTX_RECURSED) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_ptr(&(mp)->mtx_lock, \
- MTX_RECURSED); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _exitlock_spin
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp) do { \
- if (!mtx_recursed((mp))) { \
- int _mtx_intr = (mp)->mtx_saveintr; \
- \
- _release_lock_quick(mp); \
- restore_intr(_mtx_intr); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-#endif
+ * Internal WITNESS routines which must be prototyped early.
+ *
+ * XXX: When/if witness code is cleaned up, it would be wise to place all
+ * witness prototyping early in this file.
+ */
+static void witness_init(struct mtx *, int flag);
+static void witness_destroy(struct mtx *);
+static void witness_display(void(*)(const char *fmt, ...));
-#ifdef WITNESS
-static void witness_init(struct mtx *, int flag);
-static void witness_destroy(struct mtx *);
-static void witness_display(void(*)(const char *fmt, ...));
+MALLOC_DEFINE(M_WITNESS, "witness", "witness mtx_debug structure");
/* All mutexes in system (used for debug/panic) */
static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0 };
+
/*
- * Set to 0 once mutexes have been fully initialized so that witness code can be
- * safely executed.
+ * This global is set to 0 once it becomes safe to use the witness code.
*/
static int witness_cold = 1;
+
#else /* WITNESS */
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
+/* XXX XXX XXX
+ * flag++ is sleazoid way of shuting up warning
*/
#define witness_init(m, flag) flag++
#define witness_destroy(m)
#define witness_try_enter(m, t, f, l)
#endif /* WITNESS */
-/* All mutexes in system (used for debug/panic) */
+/*
+ * All mutex locks in system are kept on the all_mtx list.
+ */
static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
{ NULL, NULL }, &all_mtx, &all_mtx,
@@ -242,19 +156,18 @@ static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
#endif
};
+/*
+ * Global variables for book keeping.
+ */
static int mtx_cur_cnt;
static int mtx_max_cnt;
+/*
+ * Prototypes for non-exported routines.
+ *
+ * NOTE: Prototypes for witness routines are placed at the bottom of the file.
+ */
static void propagate_priority(struct proc *);
-static void mtx_enter_hard(struct mtx *, int type, int saveintr);
-static void mtx_exit_hard(struct mtx *, int type);
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
static void
propagate_priority(struct proc *p)
@@ -277,6 +190,7 @@ propagate_priority(struct proc *p)
MPASS(m->mtx_lock == MTX_CONTESTED);
return;
}
+
MPASS(p->p_magic == P_MAGIC);
KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
if (p->p_priority <= pri)
@@ -314,7 +228,7 @@ propagate_priority(struct proc *p)
* quit.
*/
if (p->p_stat == SRUN) {
- printf("XXX: moving process %d(%s) to a new run queue\n",
+ printf("XXX: moving proc %d(%s) to a new run queue\n",
p->p_pid, p->p_comm);
MPASS(p->p_blocked == NULL);
remrunqueue(p);
@@ -338,6 +252,7 @@ propagate_priority(struct proc *p)
printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
p->p_comm, m->mtx_description);
+
/*
* Check if the proc needs to be moved up on
* the blocked chain
@@ -346,10 +261,11 @@ propagate_priority(struct proc *p)
printf("XXX: process at head of run queue\n");
continue;
}
+
p1 = TAILQ_PREV(p, rq, p_procq);
if (p1->p_priority <= pri) {
printf(
- "XXX: previous process %d(%s) has higher priority\n",
+ "XXX: previous process %d(%s) has higher priority\n",
p->p_pid, p->p_comm);
continue;
}
@@ -367,6 +283,7 @@ propagate_priority(struct proc *p)
if (p1->p_priority > pri)
break;
}
+
MPASS(p1 != NULL);
TAILQ_INSERT_BEFORE(p1, p, p_procq);
CTR4(KTR_LOCK,
@@ -376,421 +293,332 @@ propagate_priority(struct proc *p)
}
/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
+ * The important part of mtx_trylock{,_flags}()
+ * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
+ * if we're called, it's because we know we don't already own this lock.
*/
-void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
+int
+_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *mpp = mtxp;
+ int rval;
- /* bits only valid on mtx_exit() */
- MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type, file, line);
+ KASSERT(CURPROC != NULL, ("curproc is NULL in _mtx_trylock"));
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == (uintptr_t)CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
-done:
- WITNESS_ENTER(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
-
-}
+ /*
+ * _mtx_trylock does not accept MTX_NOSWITCH option.
+ */
+ MPASS((opts & MTX_NOSWITCH) == 0);
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
+ rval = _obtain_lock(m, CURTHD);
- rval = _obtain_lock(mpp, CURTHD);
#ifdef WITNESS
- if (rval && mpp->mtx_witness != NULL) {
- MPASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
+ if (rval && m->mtx_witness != NULL) {
+ /*
+ * We do not handle recursion in _mtx_trylock; see the
+ * note at the top of the routine.
+ */
+ MPASS(!mtx_recursed(m));
+ witness_try_enter(m, (opts | m->mtx_flags), file, line);
}
#endif /* WITNESS */
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, rval, file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "TRY_ENTER %s [%p] result=%d at %s:%d",
+ m->mtx_description, m, rval, file, line);
return rval;
}
/*
- * Release lock m.
+ * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
+ *
+ * We call this if the lock is either contested (i.e. we need to go to
+ * sleep waiting for it), or if we need to recurse on it.
*/
void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
+_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *const mpp = mtxp;
-
- MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line);
- WITNESS_EXIT(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- int mtx_intr = mpp->mtx_saveintr;
-
- MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse,
- file, line);
- _release_lock_quick(mpp);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- restore_intr(mtx_intr);
- }
- } else {
- if (((type & MTX_TOPHALF) == 0) &&
- (type & MTX_FIRST)) {
- ASS_IDIS;
- ASS_SIEN(mpp);
- }
- _exitlock_spin(mpp);
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
+ struct proc *p = CURPROC;
+
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recurse", m);
+ return;
}
-}
-void
-mtx_enter_hard(struct mtx *m, int type, int saveintr)
-{
- struct proc *p = CURPROC;
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK, "mtx_lock: %p contested (lock=%p) [%p]", m,
+ (void *)m->mtx_lock, (void *)RETIP(m));
+
+ /*
+ * Save our priority. Even though p_nativepri is protected by
+ * sched_lock, we don't obtain it here as it can be expensive.
+ * Since this is the only place p_nativepri is set, and since two
+ * CPUs will not be executing the same process concurrently, we know
+ * that no other CPU is going to be messing with this. Also,
+ * p_nativepri is only read when we are blocked on a mutex, so that
+ * can't be happening right now either.
+ */
+ p->p_nativepri = p->p_priority;
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
+ while (!_obtain_lock(m, p)) {
+ uintptr_t v;
+ struct proc *p1;
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
- m->mtx_recurse++;
- atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p recurse", m);
- return;
+ mtx_lock_spin(&sched_lock);
+ /*
+ * Check if the lock has been released while spinning for
+ * the sched_lock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
}
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: %p contested (lock=%p) [%p]",
- m, (void *)m->mtx_lock, (void *)RETIP(m));
/*
- * Save our priority. Even though p_nativepri is protected
- * by sched_lock, we don't obtain it here as it can be
- * expensive. Since this is the only place p_nativepri is
- * set, and since two CPUs will not be executing the same
- * process concurrently, we know that no other CPU is going
- * to be messing with this. Also, p_nativepri is only read
- * when we are blocked on a mutex, so that can't be happening
- * right now either.
+ * The mutex was marked contested on release. This means that
+ * there are processes blocked on it.
*/
- p->p_nativepri = p->p_priority;
- while (!_obtain_lock(m, p)) {
- uintptr_t v;
- struct proc *p1;
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL,
+ ("contested mutex has no contesters"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+
+ if (p1->p_priority < p->p_priority)
+ SET_PRIO(p, p1->p_priority);
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
- (void *)(v | MTX_CONTESTED))) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
+ /*
+ * If the mutex isn't already contested and a failure occurs
+ * setting the contested bit, the mutex was either released
+ * or the state of the MTX_RECURSED bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
+ }
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
+ /*
+ * We deffinately must sleep for this lock.
+ */
+ mtx_assert(m, MA_NOTOWNED);
#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ /*
+ * If we're borrowing an interrupted thread's VM context, we
+ * must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "mtx_lock: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
+ }
#endif
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
+ /*
+ * Put us on the list of threads blocked on this mutex.
+ */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
+ }
- p->p_blocked = m; /* Who we're blocked on */
- p->p_mtxname = m->mtx_description;
- p->p_stat = SMTX;
+ /*
+ * Save who we're blocked on.
+ */
+ p->p_blocked = m;
+ p->p_mtxname = m->mtx_description;
+ p->p_stat = SMTX;
#if 0
- propagate_priority(p);
+ propagate_priority(p);
#endif
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p blocked on [%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p free from blocked on [%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (uintptr_t)p) {
- m->mtx_recurse++;
- return;
- }
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (_obtain_lock(m, p))
- break;
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
+ m->mtx_description);
+
+ mi_switch();
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
+ p, m, m->mtx_description);
+
+ mtx_unlock_spin(&sched_lock);
+ }
+
+ return;
+}
+
+/*
+ * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
+ *
+ * This is only called if we need to actually spin for the lock. Recursion
+ * is handled inline.
+ */
+void
+_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+ int line)
+{
+ int i = 0;
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "mtx_lock_spin: %p spinning", m);
+
+ for (;;) {
+ if (_obtain_lock(m, CURPROC))
+ break;
+
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY(1);
#ifdef DDB
- else if (!db_active)
+ else if (!db_active)
#else
- else
+ else
#endif
- panic(
- "spin lock %s held by %p for > 5 seconds",
- m->mtx_description,
- (void *)m->mtx_lock);
- }
+ panic("spin lock %s held by %p for > 5 seconds",
+ m->mtx_description, (void *)m->mtx_lock);
}
-
-#ifdef MUTEX_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveintr = 0xbeefface;
- else
-#endif
- m->mtx_saveintr = saveintr;
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spin done", m);
- return;
- }
}
+
+ m->mtx_saveintr = mtx_intr;
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
+
+ return;
}
+/*
+ * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
+ *
+ * We are only called here if the lock is recursed or contested (i.e. we
+ * need to wake up a blocked thread).
+ */
void
-mtx_exit_hard(struct mtx *m, int type)
+_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
{
struct proc *p, *p1;
struct mtx *m1;
int pri;
p = CURPROC;
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (mtx_recursed(m)) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p contested", m);
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- _release_lock_quick(m);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p not held", m);
- } else
- atomic_store_rel_ptr(&m->mtx_lock,
- (void *)MTX_CONTESTED);
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p contested setrunqueue %p", m, p1);
- p1->p_blocked = NULL;
- p1->p_mtxname = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+ MPASS4(mtx_owned(m), "mtx_owned(mpp)", file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", m->mtx_description,
+ m, m->mtx_recurse, file, line);
+
+ if (mtx_recursed(m)) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
+ return;
+ }
+
+ mtx_lock_spin(&sched_lock);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
+
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
+ } else
+ atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
+
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
+ m, p1);
+
+ p1->p_blocked = NULL;
+ p1->p_mtxname = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+
+ if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
-#endif
- setrunqueue(p);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p switching out lock=%p",
- m, (void *)m->mtx_lock);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p resuming lock=%p",
- m, (void *)m->mtx_lock);
}
- mtx_exit(&sched_lock, MTX_SPIN);
- break;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveintr != 0xbeefface);
- restore_intr(m->mtx_saveintr);
- }
- break;
- case MTX_SPIN | MTX_TOPHALF:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- break;
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+#endif
+ setrunqueue(p);
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: %p switching out lock=%p", m,
+ (void *)m->mtx_lock);
+
+ mi_switch();
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
+ m, (void *)m->mtx_lock);
}
+
+ mtx_unlock_spin(&sched_lock);
+
+ return;
}
+/*
+ * All the unlocking of MTX_SPIN locks is done inline.
+ * See the _rel_spin_lock() macro for the details.
+ */
+
+/*
+ * The INVARIANTS-enabled mtx_assert()
+ */
#ifdef INVARIANTS
void
_mtx_assert(struct mtx *m, int what, const char *file, int line)
@@ -822,6 +650,9 @@ _mtx_assert(struct mtx *m, int what, const char *file, int line)
}
#endif
+/*
+ * The MUTEX_DEBUG-enabled mtx_validate()
+ */
#define MV_DESTROY 0 /* validate before destory */
#define MV_INIT 1 /* validate before init */
@@ -843,7 +674,7 @@ mtx_validate(struct mtx *m, int when)
if (m == &all_mtx || cold)
return 0;
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
/*
* XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
* we can re-enable the kernacc() checks.
@@ -887,50 +718,63 @@ mtx_validate(struct mtx *m, int when)
retval = 1;
}
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
return (retval);
}
#endif
+/*
+ * Mutex initialization routine; initialize lock `m' of type contained in
+ * `opts' with options contained in `opts' and description `description.'
+ * Place on "all_mtx" queue.
+ */
void
-mtx_init(struct mtx *m, const char *t, int flag)
+mtx_init(struct mtx *m, const char *description, int opts)
{
- if ((flag & MTX_QUIET) == 0)
- CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description);
+
#ifdef MUTEX_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ /* Diagnostic and error correction */
+ if (mtx_validate(m, MV_INIT))
return;
#endif
bzero((void *)m, sizeof *m);
TAILQ_INIT(&m->mtx_blocked);
+
#ifdef WITNESS
if (!witness_cold) {
- /* XXX - should not use DEVBUF */
m->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(m->mtx_debug != NULL);
}
#endif
- m->mtx_description = t;
- m->mtx_flags = flag;
+ m->mtx_description = description;
+ m->mtx_flags = opts;
m->mtx_lock = MTX_UNOWNED;
+
/* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next = &all_mtx;
m->mtx_prev = all_mtx.mtx_prev;
m->mtx_prev->mtx_next = m;
all_mtx.mtx_prev = m;
if (++mtx_cur_cnt > mtx_max_cnt)
mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
+
#ifdef WITNESS
if (!witness_cold)
- witness_init(m, flag);
+ witness_init(m, opts);
#endif
}
+/*
+ * Remove lock `m' from all_mtx queue.
+ */
void
mtx_destroy(struct mtx *m)
{
@@ -939,7 +783,9 @@ mtx_destroy(struct mtx *m)
KASSERT(!witness_cold, ("%s: Cannot destroy while still cold\n",
__FUNCTION__));
#endif
+
CTR2(KTR_LOCK, "mtx_destroy %p (%s)", m, m->mtx_description);
+
#ifdef MUTEX_DEBUG
if (m->mtx_next == NULL)
panic("mtx_destroy: %p (%s) already destroyed",
@@ -950,7 +796,9 @@ mtx_destroy(struct mtx *m)
} else {
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
}
- mtx_validate(m, MV_DESTROY); /* diagnostic */
+
+ /* diagnostic */
+ mtx_validate(m, MV_DESTROY);
#endif
#ifdef WITNESS
@@ -959,25 +807,27 @@ mtx_destroy(struct mtx *m)
#endif /* WITNESS */
/* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next->mtx_prev = m->mtx_prev;
m->mtx_prev->mtx_next = m->mtx_next;
+
#ifdef MUTEX_DEBUG
m->mtx_next = m->mtx_prev = NULL;
#endif
+
#ifdef WITNESS
- free(m->mtx_debug, M_DEVBUF);
+ free(m->mtx_debug, M_WITNESS);
m->mtx_debug = NULL;
#endif
+
mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
}
+
/*
- * The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the WITNESS kernel option being specified.
+ * The WITNESS-enabled diagnostic code.
*/
-
#ifdef WITNESS
static void
witness_fixup(void *dummy __unused)
@@ -988,26 +838,26 @@ witness_fixup(void *dummy __unused)
* We have to release Giant before initializing its witness
* structure so that WITNESS doesn't get confused.
*/
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&all_mtx, MTX_DEF);
+
+ mtx_lock(&all_mtx);
/* Iterate through all mutexes and finish up mutex initialization. */
for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- /* XXX - should not use DEVBUF */
mp->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(mp->mtx_debug != NULL);
witness_init(mp, mp->mtx_flags);
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
/* Mark the witness code as being ready for use. */
atomic_store_rel_int(&witness_cold, 0);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL)
@@ -1061,6 +911,9 @@ TUNABLE_INT_DECL("debug.witness_skipspin", 0, witness_skipspin);
SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
"");
+/*
+ * Witness-enabled globals
+ */
static struct mtx w_mtx;
static struct witness *w_free;
static struct witness *w_all;
@@ -1069,20 +922,22 @@ static int witness_dead; /* fatal error, probably no memory */
static struct witness w_data[WITNESS_COUNT];
-static struct witness *enroll __P((const char *description, int flag));
-static int itismychild __P((struct witness *parent, struct witness *child));
-static void removechild __P((struct witness *parent, struct witness *child));
-static int isitmychild __P((struct witness *parent, struct witness *child));
-static int isitmydescendant __P((struct witness *parent, struct witness *child));
-static int dup_ok __P((struct witness *));
-static int blessed __P((struct witness *, struct witness *));
-static void witness_displaydescendants
- __P((void(*)(const char *fmt, ...), struct witness *));
-static void witness_leveldescendents __P((struct witness *parent, int level));
-static void witness_levelall __P((void));
-static struct witness * witness_get __P((void));
-static void witness_free __P((struct witness *m));
-
+/*
+ * Internal witness routine prototypes
+ */
+static struct witness *enroll(const char *description, int flag);
+static int itismychild(struct witness *parent, struct witness *child);
+static void removechild(struct witness *parent, struct witness *child);
+static int isitmychild(struct witness *parent, struct witness *child);
+static int isitmydescendant(struct witness *parent, struct witness *child);
+static int dup_ok(struct witness *);
+static int blessed(struct witness *, struct witness *);
+static void
+ witness_displaydescendants(void(*)(const char *fmt, ...), struct witness *);
+static void witness_leveldescendents(struct witness *parent, int level);
+static void witness_levelall(void);
+static struct witness * witness_get(void);
+static void witness_free(struct witness *m);
static char *ignore_list[] = {
"witness lock",
@@ -1129,7 +984,8 @@ static char *sleep_list[] = {
*/
static struct witness_blessed blessed_list[] = {
};
-static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed);
+static int blessed_count =
+ sizeof(blessed_list) / sizeof(struct witness_blessed);
static void
witness_init(struct mtx *m, int flag)
@@ -1211,17 +1067,17 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
" %s:%d already holding %s:%x",
m->mtx_description, w->w_level, file, line,
spin_order_list[ffs(i)-1], i);
}
PCPU_SET(witness_spin_check, i | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1245,7 +1101,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
if (!mtx_legal2block())
- panic("blockable mtx_enter() of %s when not legal @ %s:%d",
+ panic("blockable mtx_lock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
/*
* Is this the first mutex acquired
@@ -1267,16 +1123,16 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
}
MPASS(!mtx_owned(&w_mtx));
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
/*
* If we have a known higher number just say ok
*/
if (witness_watch > 1 && w->w_level > w1->w_level) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
if (isitmydescendant(m1->mtx_witness, w)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
@@ -1284,7 +1140,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (blessed(w, w1))
goto out;
if (m1 == &Giant) {
@@ -1313,7 +1169,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
m1 = LIST_FIRST(&p->p_heldmtx);
if (!itismychild(m1->mtx_witness, w))
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
out:
#ifdef DDB
@@ -1356,10 +1212,10 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_description, file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1407,10 +1263,10 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) & ~w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return;
}
if ((m->mtx_flags & MTX_SPIN) != 0)
@@ -1426,7 +1282,7 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
}
if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold)
- panic("switchable mtx_exit() of %s when not legal @ %s:%d",
+ panic("switchable mtx_unlock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
LIST_REMOVE(m, mtx_held);
m->mtx_held.le_prev = NULL;
@@ -1497,10 +1353,10 @@ enroll(const char *description, int flag)
}
if ((flag & MTX_SPIN) && witness_skipspin)
return (NULL);
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
for (w = w_all; w; w = w->w_next) {
if (strcmp(description, w->w_description) == 0) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return (w);
}
}
@@ -1509,7 +1365,7 @@ enroll(const char *description, int flag)
w->w_next = w_all;
w_all = w;
w->w_description = description;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (flag & MTX_SPIN) {
w->w_spin = 1;
@@ -1731,7 +1587,7 @@ witness_get()
if ((w = w_free) == NULL) {
witness_dead = 1;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
printf("witness exhausted\n");
return (NULL);
}
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index f5b35c1..d1ef108 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -403,7 +403,7 @@ fill_kinfo_proc(p, kp)
kp->ki_sigignore = p->p_procsig->ps_sigignore;
kp->ki_sigcatch = p->p_procsig->ps_sigcatch;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat != SIDL && p->p_stat != SZOMB && p->p_vmspace != NULL) {
struct vmspace *vm = p->p_vmspace;
@@ -449,7 +449,7 @@ fill_kinfo_proc(p, kp)
kp->ki_rqindex = p->p_rqindex;
kp->ki_oncpu = p->p_oncpu;
kp->ki_lastcpu = p->p_lastcpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
sp = NULL;
if (p->p_pgrp) {
kp->ki_pgid = p->p_pgrp->pg_id;
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index 7238499..0c5b589 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -1155,9 +1155,9 @@ crhold(cr)
struct ucred *cr;
{
- mtx_enter(&cr->cr_mtx, MTX_DEF);
+ mtx_lock(&cr->cr_mtx);
cr->cr_ref++;
- mtx_exit(&(cr)->cr_mtx, MTX_DEF);
+ mtx_unlock(&(cr)->cr_mtx);
}
@@ -1170,7 +1170,7 @@ crfree(cr)
struct ucred *cr;
{
- mtx_enter(&cr->cr_mtx, MTX_DEF);
+ mtx_lock(&cr->cr_mtx);
if (--cr->cr_ref == 0) {
mtx_destroy(&cr->cr_mtx);
/*
@@ -1182,7 +1182,7 @@ crfree(cr)
uifree(cr->cr_uidinfo);
FREE((caddr_t)cr, M_CRED);
} else {
- mtx_exit(&cr->cr_mtx, MTX_DEF);
+ mtx_unlock(&cr->cr_mtx);
}
}
@@ -1195,12 +1195,12 @@ crcopy(cr)
{
struct ucred *newcr;
- mtx_enter(&cr->cr_mtx, MTX_DEF);
+ mtx_lock(&cr->cr_mtx);
if (cr->cr_ref == 1) {
- mtx_exit(&cr->cr_mtx, MTX_DEF);
+ mtx_unlock(&cr->cr_mtx);
return (cr);
}
- mtx_exit(&cr->cr_mtx, MTX_DEF);
+ mtx_unlock(&cr->cr_mtx);
newcr = crdup(cr);
crfree(cr);
return (newcr);
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index e29c273..8af2be5 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -610,9 +610,9 @@ getrusage(p, uap)
case RUSAGE_SELF:
rup = &p->p_stats->p_ru;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
break;
case RUSAGE_CHILDREN:
@@ -724,12 +724,12 @@ uifind(uid)
{
struct uidinfo *uip;
- mtx_enter(&uihashtbl_mtx, MTX_DEF);
+ mtx_lock(&uihashtbl_mtx);
uip = uilookup(uid);
if (uip == NULL)
uip = uicreate(uid);
uihold(uip);
- mtx_exit(&uihashtbl_mtx, MTX_DEF);
+ mtx_unlock(&uihashtbl_mtx);
return (uip);
}
@@ -741,9 +741,9 @@ uihold(uip)
struct uidinfo *uip;
{
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_lock(&uip->ui_mtx);
uip->ui_ref++;
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
}
/*-
@@ -767,18 +767,18 @@ uifree(uip)
{
/* Prepare for optimal case. */
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_lock(&uip->ui_mtx);
if (--uip->ui_ref != 0) {
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return;
}
/* Prepare for suboptimal case. */
uip->ui_ref++;
- mtx_exit(&uip->ui_mtx, MTX_DEF);
- mtx_enter(&uihashtbl_mtx, MTX_DEF);
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
+ mtx_lock(&uihashtbl_mtx);
+ mtx_lock(&uip->ui_mtx);
/*
* We must subtract one from the count again because we backed out
@@ -788,7 +788,7 @@ uifree(uip)
*/
if (--uip->ui_ref == 0) {
LIST_REMOVE(uip, ui_hash);
- mtx_exit(&uihashtbl_mtx, MTX_DEF);
+ mtx_unlock(&uihashtbl_mtx);
if (uip->ui_sbsize != 0)
/* XXX no %qd in kernel. Truncate. */
printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
@@ -801,8 +801,8 @@ uifree(uip)
return;
}
- mtx_exit(&uihashtbl_mtx, MTX_DEF);
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uihashtbl_mtx);
+ mtx_unlock(&uip->ui_mtx);
}
/*
@@ -816,16 +816,16 @@ chgproccnt(uip, diff, max)
int max;
{
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_lock(&uip->ui_mtx);
/* don't allow them to exceed max, but allow subtraction */
if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return (0);
}
uip->ui_proccnt += diff;
if (uip->ui_proccnt < 0)
printf("negative proccnt for uid = %d\n", uip->ui_uid);
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return (1);
}
@@ -843,12 +843,12 @@ chgsbsize(uip, hiwat, to, max)
int s;
s = splnet();
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_lock(&uip->ui_mtx);
new = uip->ui_sbsize + to - *hiwat;
/* don't allow them to exceed max, but allow subtraction */
if (to > *hiwat && new > max) {
splx(s);
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return (0);
}
uip->ui_sbsize = new;
@@ -856,6 +856,6 @@ chgsbsize(uip, hiwat, to, max)
if (uip->ui_sbsize < 0)
printf("negative sbsize for uid = %d\n", uip->ui_uid);
splx(s);
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return (1);
}
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 1b7426f..b25fa4d 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -256,10 +256,10 @@ boot(int howto)
if (curproc != NULL) {
DROP_GIANT_NOSWITCH();
for (subiter = 0; subiter < 50 * iter; subiter++) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
setrunqueue(curproc);
mi_switch(); /* Allow interrupt threads to run */
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
DELAY(1000);
}
PICKUP_GIANT();
@@ -540,7 +540,7 @@ panic(const char *fmt, ...)
#ifdef SMP
/* Only 1 CPU can panic at a time */
- mtx_enter(&panic_mtx, MTX_DEF);
+ mtx_lock(&panic_mtx);
#endif
bootopt = RB_AUTOBOOT | RB_DUMP;
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index a72de0e..deee375 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -186,9 +186,9 @@ CURSIG(struct proc *p)
SIGSETNAND(tmpset, p->p_sigmask);
if (SIGISEMPTY(tmpset) && (p->p_flag & P_TRACED) == 0)
return (0);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
r = issignal(p);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (r);
}
@@ -1087,11 +1087,11 @@ psignal(p, sig)
action = SIG_DFL;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
(p->p_flag & P_TRACED) == 0)
p->p_nice = NZERO;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (prop & SA_CONT)
SIG_STOPSIGMASK(p->p_siglist);
@@ -1116,9 +1116,9 @@ psignal(p, sig)
* Defer further processing for signals which are held,
* except that stopped processes must be continued by SIGCONT.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (action == SIG_HOLD && (!(prop & SA_CONT) || p->p_stat != SSTOP)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
return;
}
@@ -1132,7 +1132,7 @@ psignal(p, sig)
* trap() or syscall().
*/
if ((p->p_sflag & PS_SINTR) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
/*
@@ -1142,7 +1142,7 @@ psignal(p, sig)
*/
if (p->p_flag & P_TRACED)
goto run;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* If SIGCONT is default (or ignored) and process is
* asleep, we are finished; the process should not
@@ -1182,7 +1182,7 @@ psignal(p, sig)
/* NOTREACHED */
case SSTOP:
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* If traced process is already stopped,
* then no further action is necessary.
@@ -1211,11 +1211,11 @@ psignal(p, sig)
SIGDELSET(p->p_siglist, sig);
if (action == SIG_CATCH)
goto runfast;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == NULL)
goto run;
p->p_stat = SSLEEP;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
@@ -1234,14 +1234,14 @@ psignal(p, sig)
* runnable and can look at the signal. But don't make
* the process runnable, leave it stopped.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan && p->p_sflag & PS_SINTR) {
if (p->p_sflag & PS_CVWAITQ)
cv_waitq_remove(p);
else
unsleep(p);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
default:
@@ -1251,17 +1251,17 @@ psignal(p, sig)
* It will either never be noticed, or noticed very soon.
*/
if (p == curproc) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
signotify(p);
}
#ifdef SMP
else if (p->p_stat == SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
forward_signal(p);
}
#endif
else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
/*NOTREACHED*/
@@ -1270,14 +1270,14 @@ runfast:
/*
* Raise priority to at least PUSER.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_priority > PUSER)
p->p_priority = PUSER;
run:
/* If we jump here, sched_lock has to be owned. */
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
setrunnable(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
out:
/* If we jump here, sched_lock should not be owned. */
mtx_assert(&sched_lock, MA_NOTOWNED);
@@ -1336,10 +1336,10 @@ issignal(p)
do {
stop(p);
PROCTREE_LOCK(PT_RELEASE);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROCTREE_LOCK(PT_SHARED);
} while (!trace_req(p)
@@ -1413,10 +1413,10 @@ issignal(p)
if ((p->p_pptr->p_procsig->ps_flag & PS_NOCLDSTOP) == 0)
psignal(p->p_pptr, SIGCHLD);
PROCTREE_LOCK(PT_RELEASE);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
break;
} else if (prop & SA_IGNORE) {
@@ -1464,11 +1464,11 @@ stop(p)
{
PROCTREE_ASSERT(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_stat = SSTOP;
p->p_flag &= ~P_WAITED;
wakeup((caddr_t)p->p_pptr);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index c3d7849..ef4121b 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -377,13 +377,13 @@ uio_yield()
p = curproc;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
p->p_priority = p->p_usrpri;
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
splx(s);
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 2518a28..8f54602 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -295,7 +295,7 @@ schedcpu(arg)
if (p->p_stat == SWAIT)
continue;
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_swtime++;
if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
p->p_slptime++;
@@ -305,7 +305,7 @@ schedcpu(arg)
* stop recalculating its priority until it wakes up.
*/
if (p->p_slptime > 1) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
@@ -343,7 +343,7 @@ schedcpu(arg)
} else
p->p_priority = p->p_usrpri;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
ALLPROC_LOCK(AP_RELEASE);
@@ -427,7 +427,7 @@ msleep(ident, mtx, priority, wmesg, timo)
ktrcsw(p->p_tracep, 1, 0);
#endif
WITNESS_SLEEP(0, mtx);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
s = splhigh();
if (cold || panicstr) {
/*
@@ -437,8 +437,8 @@ msleep(ident, mtx, priority, wmesg, timo)
* in case this is the idle process and already asleep.
*/
if (mtx != NULL && priority & PDROP)
- mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_flags(mtx, MTX_NOSWITCH);
+ mtx_unlock_spin(&sched_lock);
splx(s);
return (0);
}
@@ -448,7 +448,7 @@ msleep(ident, mtx, priority, wmesg, timo)
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(mtx, mtx);
- mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mtx, MTX_NOSWITCH);
if (priority & PDROP)
mtx = NULL;
}
@@ -485,15 +485,15 @@ msleep(ident, mtx, priority, wmesg, timo)
"msleep caught: proc %p (pid %d, %s), schedlock %p",
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
p->p_sflag |= PS_SINTR;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if ((sig = CURSIG(p))) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan)
unsleep(p);
p->p_stat = SRUN;
goto resume;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == NULL) {
catch = 0;
goto resume;
@@ -518,12 +518,12 @@ resume:
ktrcsw(p->p_tracep, 0, 0);
#endif
rval = EWOULDBLOCK;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
} else if (timo)
callout_stop(&p->p_slpcallout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (catch && (sig != 0 || (sig = CURSIG(p)))) {
#ifdef KTRACE
@@ -543,7 +543,7 @@ out:
#endif
PICKUP_GIANT();
if (mtx != NULL) {
- mtx_enter(mtx, MTX_DEF);
+ mtx_lock(mtx);
WITNESS_RESTORE(mtx, mtx);
}
return (rval);
@@ -579,7 +579,7 @@ asleep(void *ident, int priority, const char *wmesg, int timo)
*/
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan != NULL)
unsleep(p);
@@ -593,7 +593,7 @@ asleep(void *ident, int priority, const char *wmesg, int timo)
TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
return(0);
@@ -620,12 +620,12 @@ mawait(struct mtx *mtx, int priority, int timo)
WITNESS_SAVE_DECL(mtx);
WITNESS_SLEEP(0, mtx);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(mtx, mtx);
- mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mtx, MTX_NOSWITCH);
if (priority & PDROP)
mtx = NULL;
}
@@ -657,15 +657,15 @@ mawait(struct mtx *mtx, int priority, int timo)
if (catch) {
p->p_sflag |= PS_SINTR;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if ((sig = CURSIG(p))) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan)
unsleep(p);
p->p_stat = SRUN;
goto resume;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == NULL) {
catch = 0;
goto resume;
@@ -687,12 +687,12 @@ resume:
ktrcsw(p->p_tracep, 0, 0);
#endif
rval = EWOULDBLOCK;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
} else if (timo)
callout_stop(&p->p_slpcallout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (catch && (sig != 0 || (sig = CURSIG(p)))) {
#ifdef KTRACE
@@ -720,7 +720,7 @@ resume:
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -735,7 +735,7 @@ resume:
out:
PICKUP_GIANT();
if (mtx != NULL) {
- mtx_enter(mtx, MTX_DEF);
+ mtx_lock(mtx);
WITNESS_RESTORE(mtx, mtx);
}
return (rval);
@@ -761,7 +761,7 @@ endtsleep(arg)
"endtsleep: proc %p (pid %d, %s), schedlock %p",
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan) {
if (p->p_stat == SSLEEP)
setrunnable(p);
@@ -769,7 +769,7 @@ endtsleep(arg)
unsleep(p);
p->p_sflag |= PS_TIMEOUT;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -783,12 +783,12 @@ unsleep(p)
int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan) {
TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_slpq);
p->p_wchan = NULL;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -804,7 +804,7 @@ wakeup(ident)
int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
restart:
TAILQ_FOREACH(p, qp, p_slpq) {
@@ -832,7 +832,7 @@ restart:
}
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -850,7 +850,7 @@ wakeup_one(ident)
int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
TAILQ_FOREACH(p, qp, p_slpq) {
@@ -878,7 +878,7 @@ wakeup_one(ident)
}
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -947,13 +947,13 @@ mi_switch()
p->p_runtime > p->p_limit->p_cpulimit) {
rlim = &p->p_rlimit[RLIMIT_CPU];
if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
killproc(p, "exceeded maximum CPU limit");
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
psignal(p, SIGXCPU);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (rlim->rlim_cur < rlim->rlim_max) {
/* XXX: we should make a private copy */
rlim->rlim_cur += 5;
@@ -990,7 +990,7 @@ setrunnable(p)
register int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
switch (p->p_stat) {
case 0:
case SRUN:
@@ -1022,7 +1022,7 @@ setrunnable(p)
}
else
maybe_resched(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -1036,7 +1036,7 @@ resetpriority(p)
{
register unsigned int newpriority;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT +
NICE_WEIGHT * (p->p_nice - PRIO_MIN);
@@ -1044,7 +1044,7 @@ resetpriority(p)
p->p_usrpri = newpriority;
}
maybe_resched(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/* ARGSUSED */
@@ -1100,13 +1100,13 @@ yield(struct proc *p, struct yield_args *uap)
p->p_retval[0] = 0;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
p->p_priority = MAXPRI;
setrunqueue(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
splx(s);
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 5576e08..6543b41 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -91,7 +91,7 @@ softclock(void *dummy)
steps = 0;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
while (softticks != ticks) {
softticks++;
/*
@@ -108,10 +108,10 @@ softclock(void *dummy)
if (steps >= MAX_SOFTCLOCK_STEPS) {
nextsoftcheck = c;
/* Give interrupts a chance. */
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
c = nextsoftcheck;
steps = 0;
}
@@ -134,22 +134,22 @@ softclock(void *dummy)
c->c_flags =
(c->c_flags & ~CALLOUT_PENDING);
}
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
if (!(c_flags & CALLOUT_MPSAFE))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
splx(s);
c_func(c_arg);
s = splhigh();
if (!(c_flags & CALLOUT_MPSAFE))
- mtx_exit(&Giant, MTX_DEF);
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_unlock(&Giant);
+ mtx_lock_spin(&callout_lock);
steps = 0;
c = nextsoftcheck;
}
}
}
nextsoftcheck = NULL;
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
}
@@ -180,7 +180,7 @@ timeout(ftn, arg, to_ticks)
struct callout_handle handle;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
/* Fill in the next free callout structure. */
new = SLIST_FIRST(&callfree);
@@ -192,7 +192,7 @@ timeout(ftn, arg, to_ticks)
callout_reset(new, to_ticks, ftn, arg);
handle.callout = new;
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
return (handle);
}
@@ -214,10 +214,10 @@ untimeout(ftn, arg, handle)
return;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
callout_stop(handle.callout);
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
}
@@ -251,7 +251,7 @@ callout_reset(c, to_ticks, ftn, arg)
int s;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
if (c->c_flags & CALLOUT_PENDING)
callout_stop(c);
@@ -269,7 +269,7 @@ callout_reset(c, to_ticks, ftn, arg)
c->c_time = ticks + to_ticks;
TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
c, c_links.tqe);
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
}
@@ -280,13 +280,13 @@ callout_stop(c)
int s;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
/*
* Don't attempt to delete a callout that's not on the queue.
*/
if (!(c->c_flags & CALLOUT_PENDING)) {
c->c_flags &= ~CALLOUT_ACTIVE;
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
return;
}
@@ -301,7 +301,7 @@ callout_stop(c)
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
}
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
}
@@ -366,7 +366,7 @@ adjust_timeout_calltodo(time_change)
/* don't collide with softclock() */
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
for (p = calltodo.c_next; p != NULL; p = p->c_next) {
p->c_time -= delta_ticks;
@@ -377,7 +377,7 @@ adjust_timeout_calltodo(time_change)
/* take back the ticks the timer didn't use (p->c_time <= 0) */
delta_ticks = -p->c_time;
}
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
return;
diff --git a/sys/kern/subr_eventhandler.c b/sys/kern/subr_eventhandler.c
index 12f5e3d..5c524d8 100644
--- a/sys/kern/subr_eventhandler.c
+++ b/sys/kern/subr_eventhandler.c
@@ -73,7 +73,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
KASSERT(eventhandler_lists_initted, ("eventhandler registered too early"));
/* lock the eventhandler lists */
- mtx_enter(&eventhandler_mutex, MTX_DEF);
+ mtx_lock(&eventhandler_mutex);
/* Do we need to find/create the (slow) list? */
if (list == NULL) {
@@ -84,7 +84,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
if (list == NULL) {
if ((list = malloc(sizeof(struct eventhandler_list) + strlen(name) + 1,
M_EVENTHANDLER, M_NOWAIT)) == NULL) {
- mtx_exit(&eventhandler_mutex, MTX_DEF);
+ mtx_unlock(&eventhandler_mutex);
return(NULL);
}
list->el_flags = 0;
@@ -102,7 +102,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
/* allocate an entry for this handler, populate it */
if ((eg = malloc(sizeof(struct eventhandler_entry_generic),
M_EVENTHANDLER, M_NOWAIT)) == NULL) {
- mtx_exit(&eventhandler_mutex, MTX_DEF);
+ mtx_unlock(&eventhandler_mutex);
return(NULL);
}
eg->func = func;
@@ -122,7 +122,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
if (ep == NULL)
TAILQ_INSERT_TAIL(&list->el_entries, &eg->ee, ee_link);
lockmgr(&list->el_lock, LK_RELEASE, NULL, CURPROC);
- mtx_exit(&eventhandler_mutex, MTX_DEF);
+ mtx_unlock(&eventhandler_mutex);
return(&eg->ee);
}
@@ -154,14 +154,14 @@ eventhandler_find_list(char *name)
struct eventhandler_list *list;
/* scan looking for the requested list */
- mtx_enter(&eventhandler_mutex, MTX_DEF);
+ mtx_lock(&eventhandler_mutex);
for (list = TAILQ_FIRST(&eventhandler_lists);
list != NULL;
list = TAILQ_NEXT(list, el_link)) {
if (!strcmp(name, list->el_name))
break;
}
- mtx_exit(&eventhandler_mutex, MTX_DEF);
+ mtx_unlock(&eventhandler_mutex);
return(list);
}
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 7225c54..7fadeed 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -444,12 +444,12 @@ addupc_task(p, pc, ticks)
u_short v;
/* Testing PS_PROFIL may be unnecessary, but is certainly safe. */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_PROFIL) == 0 || ticks == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
prof = &p->p_stats->p_prof;
if (pc < prof->pr_off ||
diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c
index f94bbeb..9b898c4 100644
--- a/sys/kern/subr_rman.c
+++ b/sys/kern/subr_rman.c
@@ -104,9 +104,9 @@ rman_init(struct rman *rm)
return ENOMEM;
mtx_init(rm->rm_mtx, "rman", MTX_DEF);
- mtx_enter(&rman_mtx, MTX_DEF);
+ mtx_lock(&rman_mtx);
TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
- mtx_exit(&rman_mtx, MTX_DEF);
+ mtx_unlock(&rman_mtx);
return 0;
}
@@ -129,7 +129,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end)
r->r_dev = 0;
r->r_rm = rm;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
for (s = TAILQ_FIRST(&rm->rm_list);
s && s->r_end < r->r_start;
s = TAILQ_NEXT(s, r_link))
@@ -141,7 +141,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end)
TAILQ_INSERT_BEFORE(s, r, r_link);
}
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return 0;
}
@@ -150,10 +150,10 @@ rman_fini(struct rman *rm)
{
struct resource *r;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
if (r->r_flags & RF_ALLOCATED) {
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return EBUSY;
}
}
@@ -167,10 +167,10 @@ rman_fini(struct rman *rm)
TAILQ_REMOVE(&rm->rm_list, r, r_link);
free(r, M_RMAN);
}
- mtx_exit(rm->rm_mtx, MTX_DEF);
- mtx_enter(&rman_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
+ mtx_lock(&rman_mtx);
TAILQ_REMOVE(&rman_head, rm, rm_link);
- mtx_exit(&rman_mtx, MTX_DEF);
+ mtx_unlock(&rman_mtx);
mtx_destroy(rm->rm_mtx);
free(rm->rm_mtx, M_RMAN);
@@ -193,7 +193,7 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
want_activate = (flags & RF_ACTIVE);
flags &= ~RF_ACTIVE;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
for (r = TAILQ_FIRST(&rm->rm_list);
r && r->r_end < start;
@@ -370,7 +370,7 @@ out:
}
}
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return (rv);
}
@@ -417,9 +417,9 @@ rman_activate_resource(struct resource *r)
struct rman *rm;
rm = r->r_rm;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
rv = int_rman_activate_resource(rm, r, &whohas);
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return rv;
}
@@ -432,7 +432,7 @@ rman_await_resource(struct resource *r, int pri, int timo)
rm = r->r_rm;
for (;;) {
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
rv = int_rman_activate_resource(rm, r, &whohas);
if (rv != EBUSY)
return (rv); /* returns with mutex held */
@@ -441,19 +441,19 @@ rman_await_resource(struct resource *r, int pri, int timo)
panic("rman_await_resource");
/*
* splhigh hopefully will prevent a race between
- * mtx_exit and tsleep where a process
+ * mtx_unlock and tsleep where a process
* could conceivably get in and release the resource
* before we have a chance to sleep on it.
*/
s = splhigh();
whohas->r_flags |= RF_WANTED;
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
rv = tsleep(r->r_sharehead, pri, "rmwait", timo);
if (rv) {
splx(s);
return rv;
}
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
splx(s);
}
}
@@ -478,9 +478,9 @@ rman_deactivate_resource(struct resource *r)
struct rman *rm;
rm = r->r_rm;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
int_rman_deactivate_resource(r);
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return 0;
}
@@ -576,9 +576,9 @@ rman_release_resource(struct resource *r)
int rv;
struct rman *rm = r->r_rm;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
rv = int_rman_release_resource(rm, r);
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return (rv);
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 9d53cd7..2802750 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index b4373b3..533d791 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -174,11 +174,11 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_priority = p->p_usrpri;
if (resched_wanted()) {
/*
@@ -193,30 +193,30 @@ userret(p, frame, oticks)
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
/*
* Charge system time if profiling.
*/
if (p->p_sflag & PS_PROFIL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/* XXX - do we need Giant? */
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, frame->tf_eip,
(u_int)(p->p_sticks - oticks) * psratio);
}
curpriority = p->p_priority;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -282,9 +282,9 @@ restart:
((frame.tf_eflags & PSL_VM) && !in_vm86call)) {
/* user trap */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
switch (type) {
@@ -312,9 +312,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0)
goto user;
break;
@@ -339,9 +339,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = trap_pfault(&frame, TRUE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
if (i == -2) {
/*
@@ -371,13 +371,13 @@ restart:
#ifndef TIMER_FREQ
# define TIMER_FREQ 1193182
#endif
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* machine/parity/power fail/"kitchen sink" faults */
@@ -421,9 +421,9 @@ restart:
ucode = FPE_FPU_NP_TRAP;
break;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = (*pmath_emulate)(&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0) {
if (!(frame.tf_eflags & PSL_T))
goto out;
@@ -452,9 +452,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(void) trap_pfault(&frame, FALSE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
case T_DNA:
@@ -477,9 +477,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i != 0)
/*
* returns to original process
@@ -510,9 +510,9 @@ restart:
*/
if (frame.tf_eip == (int)cpu_switch_load_gs) {
PCPU_GET(curpcb)->pcb_gs = 0;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGBUS);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
@@ -621,13 +621,13 @@ restart:
#ifdef DEV_ISA
case T_NMI:
#ifdef POWERFAIL_NMI
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* XXX Giant */
@@ -651,13 +651,13 @@ restart:
#endif /* DEV_ISA */
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
trap_fatal(&frame, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* Translate fault for emulators (e.g. Linux) */
if (*p->p_sysent->sv_transtrap)
i = (*p->p_sysent->sv_transtrap)(i, type);
@@ -673,12 +673,12 @@ restart:
uprintf("\n");
}
#endif
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
user:
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
out:
return;
}
@@ -1103,15 +1103,15 @@ syscall2(frame)
#ifdef DIAGNOSTIC
if (ISPL(frame.tf_cs) != SEL_UPL) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
panic("syscall");
/* NOT REACHED */
}
#endif
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
params = (caddr_t)frame.tf_esp + sizeof(int);
@@ -1121,9 +1121,9 @@ syscall2(frame)
/*
* The prep code is not MP aware.
*/
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
} else {
/*
* Need to check if this is a 32 bit or 64 bit syscall.
@@ -1160,7 +1160,7 @@ syscall2(frame)
*/
if (params && (i = narg * sizeof(int)) &&
(error = copyin(params, (caddr_t)args, (u_int)i))) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL))
ktrsyscall(p->p_tracep, code, narg, args);
@@ -1174,13 +1174,13 @@ syscall2(frame)
* we are ktracing
*/
if ((callp->sy_narg & SYF_MPSAFE) == 0) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsyscall(p->p_tracep, code, narg, args);
}
#endif
@@ -1230,7 +1230,7 @@ bad:
*/
if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
frame.tf_eflags &= ~PSL_T;
trapsignal(p, SIGTRAP, 0);
}
@@ -1243,7 +1243,7 @@ bad:
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
}
#endif
@@ -1259,7 +1259,7 @@ bad:
* Release Giant if we had to get it
*/
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#ifdef WITNESS
if (witness_list(p)) {
@@ -1278,38 +1278,38 @@ ast(frame)
struct proc *p = CURPROC;
u_quad_t sticks;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
astoff();
atomic_add_int(&cnt.v_soft, 1);
if (p->p_sflag & PS_OWEUPC) {
p->p_sflag &= ~PS_OWEUPC;
- mtx_exit(&sched_lock, MTX_SPIN);
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
}
if (p->p_sflag & PS_ALRMPEND) {
p->p_sflag &= ~PS_ALRMPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGVTALRM);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (p->p_sflag & PS_PROFPEND) {
p->p_sflag &= ~PS_PROFPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGPROF);
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index c13dd1d..64d3972 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -31,6 +31,11 @@
*/
/*
+ * Machine independent bits of mutex implementation and implementation of
+ * `witness' structure & related debugging routines.
+ */
+
+/*
* Main Entry: witness
* Pronunciation: 'wit-n&s
* Function: noun
@@ -53,12 +58,6 @@
#include "opt_ddb.h"
#include "opt_witness.h"
-/*
- * Cause non-inlined mtx_*() to be compiled.
- * Must be defined early because other system headers may include mutex.h.
- */
-#define _KERN_MUTEX_C_
-
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
@@ -82,9 +81,8 @@
#include <sys/mutex.h>
/*
- * Machine independent bits of the mutex implementation
+ * The WITNESS-enabled mutex debug structure.
*/
-
#ifdef WITNESS
struct mtx_debug {
struct witness *mtxd_witness;
@@ -100,138 +98,54 @@ struct mtx_debug {
#endif /* WITNESS */
/*
- * Assembly macros
- *------------------------------------------------------------------------------
- */
-
-#define _V(x) __STRING(x)
-
-/*
- * Default, unoptimized mutex micro-operations
+ * Internal utility macros.
*/
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#ifndef _obtain_lock
-/* Actually obtain mtx_lock */
-#define _obtain_lock(mp, tid) \
- atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
-#endif
-
-#ifndef _release_lock
-/* Actually release mtx_lock */
-#define _release_lock(mp, tid) \
- atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _release_lock_quick
-/* Actually release mtx_lock quickly assuming that we own it */
-#define _release_lock_quick(mp) \
- atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _getlock_sleep
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _getlock_spin_block
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_intr = save_intr(); \
- disable_intr(); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
- else \
- (mp)->mtx_saveintr = _mtx_intr; \
-} while (0)
-#endif
+#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-#ifndef _getlock_norecurse
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-#endif
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-#ifndef _exitlock_norecurse
/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
+ * Early WITNESS-enabled declarations.
*/
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-#endif
+#ifdef WITNESS
-#ifndef _exitlock
/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) { \
- if ((mp)->mtx_lock & MTX_RECURSED) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_ptr(&(mp)->mtx_lock, \
- MTX_RECURSED); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _exitlock_spin
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp) do { \
- if (!mtx_recursed((mp))) { \
- int _mtx_intr = (mp)->mtx_saveintr; \
- \
- _release_lock_quick(mp); \
- restore_intr(_mtx_intr); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-#endif
+ * Internal WITNESS routines which must be prototyped early.
+ *
+ * XXX: When/if witness code is cleaned up, it would be wise to place all
+ * witness prototyping early in this file.
+ */
+static void witness_init(struct mtx *, int flag);
+static void witness_destroy(struct mtx *);
+static void witness_display(void(*)(const char *fmt, ...));
-#ifdef WITNESS
-static void witness_init(struct mtx *, int flag);
-static void witness_destroy(struct mtx *);
-static void witness_display(void(*)(const char *fmt, ...));
+MALLOC_DEFINE(M_WITNESS, "witness", "witness mtx_debug structure");
/* All mutexes in system (used for debug/panic) */
static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0 };
+
/*
- * Set to 0 once mutexes have been fully initialized so that witness code can be
- * safely executed.
+ * This global is set to 0 once it becomes safe to use the witness code.
*/
static int witness_cold = 1;
+
#else /* WITNESS */
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
+/* XXX XXX XXX
+ * flag++ is sleazoid way of shuting up warning
*/
#define witness_init(m, flag) flag++
#define witness_destroy(m)
#define witness_try_enter(m, t, f, l)
#endif /* WITNESS */
-/* All mutexes in system (used for debug/panic) */
+/*
+ * All mutex locks in system are kept on the all_mtx list.
+ */
static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
{ NULL, NULL }, &all_mtx, &all_mtx,
@@ -242,19 +156,18 @@ static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
#endif
};
+/*
+ * Global variables for book keeping.
+ */
static int mtx_cur_cnt;
static int mtx_max_cnt;
+/*
+ * Prototypes for non-exported routines.
+ *
+ * NOTE: Prototypes for witness routines are placed at the bottom of the file.
+ */
static void propagate_priority(struct proc *);
-static void mtx_enter_hard(struct mtx *, int type, int saveintr);
-static void mtx_exit_hard(struct mtx *, int type);
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
static void
propagate_priority(struct proc *p)
@@ -277,6 +190,7 @@ propagate_priority(struct proc *p)
MPASS(m->mtx_lock == MTX_CONTESTED);
return;
}
+
MPASS(p->p_magic == P_MAGIC);
KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
if (p->p_priority <= pri)
@@ -314,7 +228,7 @@ propagate_priority(struct proc *p)
* quit.
*/
if (p->p_stat == SRUN) {
- printf("XXX: moving process %d(%s) to a new run queue\n",
+ printf("XXX: moving proc %d(%s) to a new run queue\n",
p->p_pid, p->p_comm);
MPASS(p->p_blocked == NULL);
remrunqueue(p);
@@ -338,6 +252,7 @@ propagate_priority(struct proc *p)
printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
p->p_comm, m->mtx_description);
+
/*
* Check if the proc needs to be moved up on
* the blocked chain
@@ -346,10 +261,11 @@ propagate_priority(struct proc *p)
printf("XXX: process at head of run queue\n");
continue;
}
+
p1 = TAILQ_PREV(p, rq, p_procq);
if (p1->p_priority <= pri) {
printf(
- "XXX: previous process %d(%s) has higher priority\n",
+ "XXX: previous process %d(%s) has higher priority\n",
p->p_pid, p->p_comm);
continue;
}
@@ -367,6 +283,7 @@ propagate_priority(struct proc *p)
if (p1->p_priority > pri)
break;
}
+
MPASS(p1 != NULL);
TAILQ_INSERT_BEFORE(p1, p, p_procq);
CTR4(KTR_LOCK,
@@ -376,421 +293,332 @@ propagate_priority(struct proc *p)
}
/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
+ * The important part of mtx_trylock{,_flags}()
+ * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
+ * if we're called, it's because we know we don't already own this lock.
*/
-void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
+int
+_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *mpp = mtxp;
+ int rval;
- /* bits only valid on mtx_exit() */
- MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type, file, line);
+ KASSERT(CURPROC != NULL, ("curproc is NULL in _mtx_trylock"));
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == (uintptr_t)CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
-done:
- WITNESS_ENTER(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
-
-}
+ /*
+ * _mtx_trylock does not accept MTX_NOSWITCH option.
+ */
+ MPASS((opts & MTX_NOSWITCH) == 0);
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
+ rval = _obtain_lock(m, CURTHD);
- rval = _obtain_lock(mpp, CURTHD);
#ifdef WITNESS
- if (rval && mpp->mtx_witness != NULL) {
- MPASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
+ if (rval && m->mtx_witness != NULL) {
+ /*
+ * We do not handle recursion in _mtx_trylock; see the
+ * note at the top of the routine.
+ */
+ MPASS(!mtx_recursed(m));
+ witness_try_enter(m, (opts | m->mtx_flags), file, line);
}
#endif /* WITNESS */
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, rval, file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "TRY_ENTER %s [%p] result=%d at %s:%d",
+ m->mtx_description, m, rval, file, line);
return rval;
}
/*
- * Release lock m.
+ * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
+ *
+ * We call this if the lock is either contested (i.e. we need to go to
+ * sleep waiting for it), or if we need to recurse on it.
*/
void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
+_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *const mpp = mtxp;
-
- MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line);
- WITNESS_EXIT(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- int mtx_intr = mpp->mtx_saveintr;
-
- MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse,
- file, line);
- _release_lock_quick(mpp);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- restore_intr(mtx_intr);
- }
- } else {
- if (((type & MTX_TOPHALF) == 0) &&
- (type & MTX_FIRST)) {
- ASS_IDIS;
- ASS_SIEN(mpp);
- }
- _exitlock_spin(mpp);
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
+ struct proc *p = CURPROC;
+
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recurse", m);
+ return;
}
-}
-void
-mtx_enter_hard(struct mtx *m, int type, int saveintr)
-{
- struct proc *p = CURPROC;
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK, "mtx_lock: %p contested (lock=%p) [%p]", m,
+ (void *)m->mtx_lock, (void *)RETIP(m));
+
+ /*
+ * Save our priority. Even though p_nativepri is protected by
+ * sched_lock, we don't obtain it here as it can be expensive.
+ * Since this is the only place p_nativepri is set, and since two
+ * CPUs will not be executing the same process concurrently, we know
+ * that no other CPU is going to be messing with this. Also,
+ * p_nativepri is only read when we are blocked on a mutex, so that
+ * can't be happening right now either.
+ */
+ p->p_nativepri = p->p_priority;
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
+ while (!_obtain_lock(m, p)) {
+ uintptr_t v;
+ struct proc *p1;
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
- m->mtx_recurse++;
- atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p recurse", m);
- return;
+ mtx_lock_spin(&sched_lock);
+ /*
+ * Check if the lock has been released while spinning for
+ * the sched_lock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
}
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: %p contested (lock=%p) [%p]",
- m, (void *)m->mtx_lock, (void *)RETIP(m));
/*
- * Save our priority. Even though p_nativepri is protected
- * by sched_lock, we don't obtain it here as it can be
- * expensive. Since this is the only place p_nativepri is
- * set, and since two CPUs will not be executing the same
- * process concurrently, we know that no other CPU is going
- * to be messing with this. Also, p_nativepri is only read
- * when we are blocked on a mutex, so that can't be happening
- * right now either.
+ * The mutex was marked contested on release. This means that
+ * there are processes blocked on it.
*/
- p->p_nativepri = p->p_priority;
- while (!_obtain_lock(m, p)) {
- uintptr_t v;
- struct proc *p1;
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL,
+ ("contested mutex has no contesters"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+
+ if (p1->p_priority < p->p_priority)
+ SET_PRIO(p, p1->p_priority);
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
- (void *)(v | MTX_CONTESTED))) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
+ /*
+ * If the mutex isn't already contested and a failure occurs
+ * setting the contested bit, the mutex was either released
+ * or the state of the MTX_RECURSED bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
+ }
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
+ /*
+ * We deffinately must sleep for this lock.
+ */
+ mtx_assert(m, MA_NOTOWNED);
#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ /*
+ * If we're borrowing an interrupted thread's VM context, we
+ * must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "mtx_lock: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
+ }
#endif
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
+ /*
+ * Put us on the list of threads blocked on this mutex.
+ */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
+ }
- p->p_blocked = m; /* Who we're blocked on */
- p->p_mtxname = m->mtx_description;
- p->p_stat = SMTX;
+ /*
+ * Save who we're blocked on.
+ */
+ p->p_blocked = m;
+ p->p_mtxname = m->mtx_description;
+ p->p_stat = SMTX;
#if 0
- propagate_priority(p);
+ propagate_priority(p);
#endif
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p blocked on [%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p free from blocked on [%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (uintptr_t)p) {
- m->mtx_recurse++;
- return;
- }
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (_obtain_lock(m, p))
- break;
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
+ m->mtx_description);
+
+ mi_switch();
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
+ p, m, m->mtx_description);
+
+ mtx_unlock_spin(&sched_lock);
+ }
+
+ return;
+}
+
+/*
+ * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
+ *
+ * This is only called if we need to actually spin for the lock. Recursion
+ * is handled inline.
+ */
+void
+_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+ int line)
+{
+ int i = 0;
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "mtx_lock_spin: %p spinning", m);
+
+ for (;;) {
+ if (_obtain_lock(m, CURPROC))
+ break;
+
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY(1);
#ifdef DDB
- else if (!db_active)
+ else if (!db_active)
#else
- else
+ else
#endif
- panic(
- "spin lock %s held by %p for > 5 seconds",
- m->mtx_description,
- (void *)m->mtx_lock);
- }
+ panic("spin lock %s held by %p for > 5 seconds",
+ m->mtx_description, (void *)m->mtx_lock);
}
-
-#ifdef MUTEX_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveintr = 0xbeefface;
- else
-#endif
- m->mtx_saveintr = saveintr;
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spin done", m);
- return;
- }
}
+
+ m->mtx_saveintr = mtx_intr;
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
+
+ return;
}
+/*
+ * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
+ *
+ * We are only called here if the lock is recursed or contested (i.e. we
+ * need to wake up a blocked thread).
+ */
void
-mtx_exit_hard(struct mtx *m, int type)
+_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
{
struct proc *p, *p1;
struct mtx *m1;
int pri;
p = CURPROC;
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (mtx_recursed(m)) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p contested", m);
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- _release_lock_quick(m);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p not held", m);
- } else
- atomic_store_rel_ptr(&m->mtx_lock,
- (void *)MTX_CONTESTED);
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p contested setrunqueue %p", m, p1);
- p1->p_blocked = NULL;
- p1->p_mtxname = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+ MPASS4(mtx_owned(m), "mtx_owned(mpp)", file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", m->mtx_description,
+ m, m->mtx_recurse, file, line);
+
+ if (mtx_recursed(m)) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
+ return;
+ }
+
+ mtx_lock_spin(&sched_lock);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
+
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
+ } else
+ atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
+
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
+ m, p1);
+
+ p1->p_blocked = NULL;
+ p1->p_mtxname = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+
+ if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
-#endif
- setrunqueue(p);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p switching out lock=%p",
- m, (void *)m->mtx_lock);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p resuming lock=%p",
- m, (void *)m->mtx_lock);
}
- mtx_exit(&sched_lock, MTX_SPIN);
- break;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveintr != 0xbeefface);
- restore_intr(m->mtx_saveintr);
- }
- break;
- case MTX_SPIN | MTX_TOPHALF:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- break;
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+#endif
+ setrunqueue(p);
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: %p switching out lock=%p", m,
+ (void *)m->mtx_lock);
+
+ mi_switch();
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
+ m, (void *)m->mtx_lock);
}
+
+ mtx_unlock_spin(&sched_lock);
+
+ return;
}
+/*
+ * All the unlocking of MTX_SPIN locks is done inline.
+ * See the _rel_spin_lock() macro for the details.
+ */
+
+/*
+ * The INVARIANTS-enabled mtx_assert()
+ */
#ifdef INVARIANTS
void
_mtx_assert(struct mtx *m, int what, const char *file, int line)
@@ -822,6 +650,9 @@ _mtx_assert(struct mtx *m, int what, const char *file, int line)
}
#endif
+/*
+ * The MUTEX_DEBUG-enabled mtx_validate()
+ */
#define MV_DESTROY 0 /* validate before destory */
#define MV_INIT 1 /* validate before init */
@@ -843,7 +674,7 @@ mtx_validate(struct mtx *m, int when)
if (m == &all_mtx || cold)
return 0;
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
/*
* XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
* we can re-enable the kernacc() checks.
@@ -887,50 +718,63 @@ mtx_validate(struct mtx *m, int when)
retval = 1;
}
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
return (retval);
}
#endif
+/*
+ * Mutex initialization routine; initialize lock `m' of type contained in
+ * `opts' with options contained in `opts' and description `description.'
+ * Place on "all_mtx" queue.
+ */
void
-mtx_init(struct mtx *m, const char *t, int flag)
+mtx_init(struct mtx *m, const char *description, int opts)
{
- if ((flag & MTX_QUIET) == 0)
- CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description);
+
#ifdef MUTEX_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ /* Diagnostic and error correction */
+ if (mtx_validate(m, MV_INIT))
return;
#endif
bzero((void *)m, sizeof *m);
TAILQ_INIT(&m->mtx_blocked);
+
#ifdef WITNESS
if (!witness_cold) {
- /* XXX - should not use DEVBUF */
m->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(m->mtx_debug != NULL);
}
#endif
- m->mtx_description = t;
- m->mtx_flags = flag;
+ m->mtx_description = description;
+ m->mtx_flags = opts;
m->mtx_lock = MTX_UNOWNED;
+
/* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next = &all_mtx;
m->mtx_prev = all_mtx.mtx_prev;
m->mtx_prev->mtx_next = m;
all_mtx.mtx_prev = m;
if (++mtx_cur_cnt > mtx_max_cnt)
mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
+
#ifdef WITNESS
if (!witness_cold)
- witness_init(m, flag);
+ witness_init(m, opts);
#endif
}
+/*
+ * Remove lock `m' from all_mtx queue.
+ */
void
mtx_destroy(struct mtx *m)
{
@@ -939,7 +783,9 @@ mtx_destroy(struct mtx *m)
KASSERT(!witness_cold, ("%s: Cannot destroy while still cold\n",
__FUNCTION__));
#endif
+
CTR2(KTR_LOCK, "mtx_destroy %p (%s)", m, m->mtx_description);
+
#ifdef MUTEX_DEBUG
if (m->mtx_next == NULL)
panic("mtx_destroy: %p (%s) already destroyed",
@@ -950,7 +796,9 @@ mtx_destroy(struct mtx *m)
} else {
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
}
- mtx_validate(m, MV_DESTROY); /* diagnostic */
+
+ /* diagnostic */
+ mtx_validate(m, MV_DESTROY);
#endif
#ifdef WITNESS
@@ -959,25 +807,27 @@ mtx_destroy(struct mtx *m)
#endif /* WITNESS */
/* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next->mtx_prev = m->mtx_prev;
m->mtx_prev->mtx_next = m->mtx_next;
+
#ifdef MUTEX_DEBUG
m->mtx_next = m->mtx_prev = NULL;
#endif
+
#ifdef WITNESS
- free(m->mtx_debug, M_DEVBUF);
+ free(m->mtx_debug, M_WITNESS);
m->mtx_debug = NULL;
#endif
+
mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
}
+
/*
- * The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the WITNESS kernel option being specified.
+ * The WITNESS-enabled diagnostic code.
*/
-
#ifdef WITNESS
static void
witness_fixup(void *dummy __unused)
@@ -988,26 +838,26 @@ witness_fixup(void *dummy __unused)
* We have to release Giant before initializing its witness
* structure so that WITNESS doesn't get confused.
*/
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&all_mtx, MTX_DEF);
+
+ mtx_lock(&all_mtx);
/* Iterate through all mutexes and finish up mutex initialization. */
for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- /* XXX - should not use DEVBUF */
mp->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(mp->mtx_debug != NULL);
witness_init(mp, mp->mtx_flags);
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
/* Mark the witness code as being ready for use. */
atomic_store_rel_int(&witness_cold, 0);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL)
@@ -1061,6 +911,9 @@ TUNABLE_INT_DECL("debug.witness_skipspin", 0, witness_skipspin);
SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
"");
+/*
+ * Witness-enabled globals
+ */
static struct mtx w_mtx;
static struct witness *w_free;
static struct witness *w_all;
@@ -1069,20 +922,22 @@ static int witness_dead; /* fatal error, probably no memory */
static struct witness w_data[WITNESS_COUNT];
-static struct witness *enroll __P((const char *description, int flag));
-static int itismychild __P((struct witness *parent, struct witness *child));
-static void removechild __P((struct witness *parent, struct witness *child));
-static int isitmychild __P((struct witness *parent, struct witness *child));
-static int isitmydescendant __P((struct witness *parent, struct witness *child));
-static int dup_ok __P((struct witness *));
-static int blessed __P((struct witness *, struct witness *));
-static void witness_displaydescendants
- __P((void(*)(const char *fmt, ...), struct witness *));
-static void witness_leveldescendents __P((struct witness *parent, int level));
-static void witness_levelall __P((void));
-static struct witness * witness_get __P((void));
-static void witness_free __P((struct witness *m));
-
+/*
+ * Internal witness routine prototypes
+ */
+static struct witness *enroll(const char *description, int flag);
+static int itismychild(struct witness *parent, struct witness *child);
+static void removechild(struct witness *parent, struct witness *child);
+static int isitmychild(struct witness *parent, struct witness *child);
+static int isitmydescendant(struct witness *parent, struct witness *child);
+static int dup_ok(struct witness *);
+static int blessed(struct witness *, struct witness *);
+static void
+ witness_displaydescendants(void(*)(const char *fmt, ...), struct witness *);
+static void witness_leveldescendents(struct witness *parent, int level);
+static void witness_levelall(void);
+static struct witness * witness_get(void);
+static void witness_free(struct witness *m);
static char *ignore_list[] = {
"witness lock",
@@ -1129,7 +984,8 @@ static char *sleep_list[] = {
*/
static struct witness_blessed blessed_list[] = {
};
-static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed);
+static int blessed_count =
+ sizeof(blessed_list) / sizeof(struct witness_blessed);
static void
witness_init(struct mtx *m, int flag)
@@ -1211,17 +1067,17 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
" %s:%d already holding %s:%x",
m->mtx_description, w->w_level, file, line,
spin_order_list[ffs(i)-1], i);
}
PCPU_SET(witness_spin_check, i | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1245,7 +1101,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
if (!mtx_legal2block())
- panic("blockable mtx_enter() of %s when not legal @ %s:%d",
+ panic("blockable mtx_lock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
/*
* Is this the first mutex acquired
@@ -1267,16 +1123,16 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
}
MPASS(!mtx_owned(&w_mtx));
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
/*
* If we have a known higher number just say ok
*/
if (witness_watch > 1 && w->w_level > w1->w_level) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
if (isitmydescendant(m1->mtx_witness, w)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
@@ -1284,7 +1140,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (blessed(w, w1))
goto out;
if (m1 == &Giant) {
@@ -1313,7 +1169,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
m1 = LIST_FIRST(&p->p_heldmtx);
if (!itismychild(m1->mtx_witness, w))
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
out:
#ifdef DDB
@@ -1356,10 +1212,10 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_description, file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1407,10 +1263,10 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) & ~w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return;
}
if ((m->mtx_flags & MTX_SPIN) != 0)
@@ -1426,7 +1282,7 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
}
if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold)
- panic("switchable mtx_exit() of %s when not legal @ %s:%d",
+ panic("switchable mtx_unlock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
LIST_REMOVE(m, mtx_held);
m->mtx_held.le_prev = NULL;
@@ -1497,10 +1353,10 @@ enroll(const char *description, int flag)
}
if ((flag & MTX_SPIN) && witness_skipspin)
return (NULL);
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
for (w = w_all; w; w = w->w_next) {
if (strcmp(description, w->w_description) == 0) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return (w);
}
}
@@ -1509,7 +1365,7 @@ enroll(const char *description, int flag)
w->w_next = w_all;
w_all = w;
w->w_description = description;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (flag & MTX_SPIN) {
w->w_spin = 1;
@@ -1731,7 +1587,7 @@ witness_get()
if ((w = w_free) == NULL) {
witness_dead = 1;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
printf("witness exhausted\n");
return (NULL);
}
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index c13dd1d..64d3972 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -31,6 +31,11 @@
*/
/*
+ * Machine independent bits of mutex implementation and implementation of
+ * `witness' structure & related debugging routines.
+ */
+
+/*
* Main Entry: witness
* Pronunciation: 'wit-n&s
* Function: noun
@@ -53,12 +58,6 @@
#include "opt_ddb.h"
#include "opt_witness.h"
-/*
- * Cause non-inlined mtx_*() to be compiled.
- * Must be defined early because other system headers may include mutex.h.
- */
-#define _KERN_MUTEX_C_
-
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
@@ -82,9 +81,8 @@
#include <sys/mutex.h>
/*
- * Machine independent bits of the mutex implementation
+ * The WITNESS-enabled mutex debug structure.
*/
-
#ifdef WITNESS
struct mtx_debug {
struct witness *mtxd_witness;
@@ -100,138 +98,54 @@ struct mtx_debug {
#endif /* WITNESS */
/*
- * Assembly macros
- *------------------------------------------------------------------------------
- */
-
-#define _V(x) __STRING(x)
-
-/*
- * Default, unoptimized mutex micro-operations
+ * Internal utility macros.
*/
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#ifndef _obtain_lock
-/* Actually obtain mtx_lock */
-#define _obtain_lock(mp, tid) \
- atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
-#endif
-
-#ifndef _release_lock
-/* Actually release mtx_lock */
-#define _release_lock(mp, tid) \
- atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _release_lock_quick
-/* Actually release mtx_lock quickly assuming that we own it */
-#define _release_lock_quick(mp) \
- atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _getlock_sleep
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _getlock_spin_block
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_intr = save_intr(); \
- disable_intr(); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
- else \
- (mp)->mtx_saveintr = _mtx_intr; \
-} while (0)
-#endif
+#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-#ifndef _getlock_norecurse
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-#endif
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-#ifndef _exitlock_norecurse
/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
+ * Early WITNESS-enabled declarations.
*/
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-#endif
+#ifdef WITNESS
-#ifndef _exitlock
/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) { \
- if ((mp)->mtx_lock & MTX_RECURSED) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_ptr(&(mp)->mtx_lock, \
- MTX_RECURSED); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _exitlock_spin
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp) do { \
- if (!mtx_recursed((mp))) { \
- int _mtx_intr = (mp)->mtx_saveintr; \
- \
- _release_lock_quick(mp); \
- restore_intr(_mtx_intr); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-#endif
+ * Internal WITNESS routines which must be prototyped early.
+ *
+ * XXX: When/if witness code is cleaned up, it would be wise to place all
+ * witness prototyping early in this file.
+ */
+static void witness_init(struct mtx *, int flag);
+static void witness_destroy(struct mtx *);
+static void witness_display(void(*)(const char *fmt, ...));
-#ifdef WITNESS
-static void witness_init(struct mtx *, int flag);
-static void witness_destroy(struct mtx *);
-static void witness_display(void(*)(const char *fmt, ...));
+MALLOC_DEFINE(M_WITNESS, "witness", "witness mtx_debug structure");
/* All mutexes in system (used for debug/panic) */
static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0 };
+
/*
- * Set to 0 once mutexes have been fully initialized so that witness code can be
- * safely executed.
+ * This global is set to 0 once it becomes safe to use the witness code.
*/
static int witness_cold = 1;
+
#else /* WITNESS */
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
+/* XXX XXX XXX
+ * flag++ is sleazoid way of shuting up warning
*/
#define witness_init(m, flag) flag++
#define witness_destroy(m)
#define witness_try_enter(m, t, f, l)
#endif /* WITNESS */
-/* All mutexes in system (used for debug/panic) */
+/*
+ * All mutex locks in system are kept on the all_mtx list.
+ */
static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
{ NULL, NULL }, &all_mtx, &all_mtx,
@@ -242,19 +156,18 @@ static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
#endif
};
+/*
+ * Global variables for book keeping.
+ */
static int mtx_cur_cnt;
static int mtx_max_cnt;
+/*
+ * Prototypes for non-exported routines.
+ *
+ * NOTE: Prototypes for witness routines are placed at the bottom of the file.
+ */
static void propagate_priority(struct proc *);
-static void mtx_enter_hard(struct mtx *, int type, int saveintr);
-static void mtx_exit_hard(struct mtx *, int type);
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
static void
propagate_priority(struct proc *p)
@@ -277,6 +190,7 @@ propagate_priority(struct proc *p)
MPASS(m->mtx_lock == MTX_CONTESTED);
return;
}
+
MPASS(p->p_magic == P_MAGIC);
KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
if (p->p_priority <= pri)
@@ -314,7 +228,7 @@ propagate_priority(struct proc *p)
* quit.
*/
if (p->p_stat == SRUN) {
- printf("XXX: moving process %d(%s) to a new run queue\n",
+ printf("XXX: moving proc %d(%s) to a new run queue\n",
p->p_pid, p->p_comm);
MPASS(p->p_blocked == NULL);
remrunqueue(p);
@@ -338,6 +252,7 @@ propagate_priority(struct proc *p)
printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
p->p_comm, m->mtx_description);
+
/*
* Check if the proc needs to be moved up on
* the blocked chain
@@ -346,10 +261,11 @@ propagate_priority(struct proc *p)
printf("XXX: process at head of run queue\n");
continue;
}
+
p1 = TAILQ_PREV(p, rq, p_procq);
if (p1->p_priority <= pri) {
printf(
- "XXX: previous process %d(%s) has higher priority\n",
+ "XXX: previous process %d(%s) has higher priority\n",
p->p_pid, p->p_comm);
continue;
}
@@ -367,6 +283,7 @@ propagate_priority(struct proc *p)
if (p1->p_priority > pri)
break;
}
+
MPASS(p1 != NULL);
TAILQ_INSERT_BEFORE(p1, p, p_procq);
CTR4(KTR_LOCK,
@@ -376,421 +293,332 @@ propagate_priority(struct proc *p)
}
/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
+ * The important part of mtx_trylock{,_flags}()
+ * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
+ * if we're called, it's because we know we don't already own this lock.
*/
-void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
+int
+_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *mpp = mtxp;
+ int rval;
- /* bits only valid on mtx_exit() */
- MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type, file, line);
+ KASSERT(CURPROC != NULL, ("curproc is NULL in _mtx_trylock"));
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == (uintptr_t)CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
-done:
- WITNESS_ENTER(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
-
-}
+ /*
+ * _mtx_trylock does not accept MTX_NOSWITCH option.
+ */
+ MPASS((opts & MTX_NOSWITCH) == 0);
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
+ rval = _obtain_lock(m, CURTHD);
- rval = _obtain_lock(mpp, CURTHD);
#ifdef WITNESS
- if (rval && mpp->mtx_witness != NULL) {
- MPASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
+ if (rval && m->mtx_witness != NULL) {
+ /*
+ * We do not handle recursion in _mtx_trylock; see the
+ * note at the top of the routine.
+ */
+ MPASS(!mtx_recursed(m));
+ witness_try_enter(m, (opts | m->mtx_flags), file, line);
}
#endif /* WITNESS */
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, rval, file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "TRY_ENTER %s [%p] result=%d at %s:%d",
+ m->mtx_description, m, rval, file, line);
return rval;
}
/*
- * Release lock m.
+ * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
+ *
+ * We call this if the lock is either contested (i.e. we need to go to
+ * sleep waiting for it), or if we need to recurse on it.
*/
void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
+_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *const mpp = mtxp;
-
- MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line);
- WITNESS_EXIT(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- int mtx_intr = mpp->mtx_saveintr;
-
- MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse,
- file, line);
- _release_lock_quick(mpp);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- restore_intr(mtx_intr);
- }
- } else {
- if (((type & MTX_TOPHALF) == 0) &&
- (type & MTX_FIRST)) {
- ASS_IDIS;
- ASS_SIEN(mpp);
- }
- _exitlock_spin(mpp);
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
+ struct proc *p = CURPROC;
+
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recurse", m);
+ return;
}
-}
-void
-mtx_enter_hard(struct mtx *m, int type, int saveintr)
-{
- struct proc *p = CURPROC;
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK, "mtx_lock: %p contested (lock=%p) [%p]", m,
+ (void *)m->mtx_lock, (void *)RETIP(m));
+
+ /*
+ * Save our priority. Even though p_nativepri is protected by
+ * sched_lock, we don't obtain it here as it can be expensive.
+ * Since this is the only place p_nativepri is set, and since two
+ * CPUs will not be executing the same process concurrently, we know
+ * that no other CPU is going to be messing with this. Also,
+ * p_nativepri is only read when we are blocked on a mutex, so that
+ * can't be happening right now either.
+ */
+ p->p_nativepri = p->p_priority;
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
+ while (!_obtain_lock(m, p)) {
+ uintptr_t v;
+ struct proc *p1;
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
- m->mtx_recurse++;
- atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p recurse", m);
- return;
+ mtx_lock_spin(&sched_lock);
+ /*
+ * Check if the lock has been released while spinning for
+ * the sched_lock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
}
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: %p contested (lock=%p) [%p]",
- m, (void *)m->mtx_lock, (void *)RETIP(m));
/*
- * Save our priority. Even though p_nativepri is protected
- * by sched_lock, we don't obtain it here as it can be
- * expensive. Since this is the only place p_nativepri is
- * set, and since two CPUs will not be executing the same
- * process concurrently, we know that no other CPU is going
- * to be messing with this. Also, p_nativepri is only read
- * when we are blocked on a mutex, so that can't be happening
- * right now either.
+ * The mutex was marked contested on release. This means that
+ * there are processes blocked on it.
*/
- p->p_nativepri = p->p_priority;
- while (!_obtain_lock(m, p)) {
- uintptr_t v;
- struct proc *p1;
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL,
+ ("contested mutex has no contesters"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+
+ if (p1->p_priority < p->p_priority)
+ SET_PRIO(p, p1->p_priority);
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
- (void *)(v | MTX_CONTESTED))) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
+ /*
+ * If the mutex isn't already contested and a failure occurs
+ * setting the contested bit, the mutex was either released
+ * or the state of the MTX_RECURSED bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
+ }
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
+ /*
+ * We deffinately must sleep for this lock.
+ */
+ mtx_assert(m, MA_NOTOWNED);
#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ /*
+ * If we're borrowing an interrupted thread's VM context, we
+ * must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "mtx_lock: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
+ }
#endif
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
+ /*
+ * Put us on the list of threads blocked on this mutex.
+ */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
+ }
- p->p_blocked = m; /* Who we're blocked on */
- p->p_mtxname = m->mtx_description;
- p->p_stat = SMTX;
+ /*
+ * Save who we're blocked on.
+ */
+ p->p_blocked = m;
+ p->p_mtxname = m->mtx_description;
+ p->p_stat = SMTX;
#if 0
- propagate_priority(p);
+ propagate_priority(p);
#endif
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p blocked on [%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p free from blocked on [%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (uintptr_t)p) {
- m->mtx_recurse++;
- return;
- }
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (_obtain_lock(m, p))
- break;
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
+ m->mtx_description);
+
+ mi_switch();
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
+ p, m, m->mtx_description);
+
+ mtx_unlock_spin(&sched_lock);
+ }
+
+ return;
+}
+
+/*
+ * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
+ *
+ * This is only called if we need to actually spin for the lock. Recursion
+ * is handled inline.
+ */
+void
+_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+ int line)
+{
+ int i = 0;
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "mtx_lock_spin: %p spinning", m);
+
+ for (;;) {
+ if (_obtain_lock(m, CURPROC))
+ break;
+
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY(1);
#ifdef DDB
- else if (!db_active)
+ else if (!db_active)
#else
- else
+ else
#endif
- panic(
- "spin lock %s held by %p for > 5 seconds",
- m->mtx_description,
- (void *)m->mtx_lock);
- }
+ panic("spin lock %s held by %p for > 5 seconds",
+ m->mtx_description, (void *)m->mtx_lock);
}
-
-#ifdef MUTEX_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveintr = 0xbeefface;
- else
-#endif
- m->mtx_saveintr = saveintr;
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spin done", m);
- return;
- }
}
+
+ m->mtx_saveintr = mtx_intr;
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
+
+ return;
}
+/*
+ * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
+ *
+ * We are only called here if the lock is recursed or contested (i.e. we
+ * need to wake up a blocked thread).
+ */
void
-mtx_exit_hard(struct mtx *m, int type)
+_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
{
struct proc *p, *p1;
struct mtx *m1;
int pri;
p = CURPROC;
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (mtx_recursed(m)) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p contested", m);
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- _release_lock_quick(m);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p not held", m);
- } else
- atomic_store_rel_ptr(&m->mtx_lock,
- (void *)MTX_CONTESTED);
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p contested setrunqueue %p", m, p1);
- p1->p_blocked = NULL;
- p1->p_mtxname = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+ MPASS4(mtx_owned(m), "mtx_owned(mpp)", file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", m->mtx_description,
+ m, m->mtx_recurse, file, line);
+
+ if (mtx_recursed(m)) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
+ return;
+ }
+
+ mtx_lock_spin(&sched_lock);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
+
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
+ } else
+ atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
+
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
+ m, p1);
+
+ p1->p_blocked = NULL;
+ p1->p_mtxname = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+
+ if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
-#endif
- setrunqueue(p);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p switching out lock=%p",
- m, (void *)m->mtx_lock);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p resuming lock=%p",
- m, (void *)m->mtx_lock);
}
- mtx_exit(&sched_lock, MTX_SPIN);
- break;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveintr != 0xbeefface);
- restore_intr(m->mtx_saveintr);
- }
- break;
- case MTX_SPIN | MTX_TOPHALF:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- break;
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+#endif
+ setrunqueue(p);
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: %p switching out lock=%p", m,
+ (void *)m->mtx_lock);
+
+ mi_switch();
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
+ m, (void *)m->mtx_lock);
}
+
+ mtx_unlock_spin(&sched_lock);
+
+ return;
}
+/*
+ * All the unlocking of MTX_SPIN locks is done inline.
+ * See the _rel_spin_lock() macro for the details.
+ */
+
+/*
+ * The INVARIANTS-enabled mtx_assert()
+ */
#ifdef INVARIANTS
void
_mtx_assert(struct mtx *m, int what, const char *file, int line)
@@ -822,6 +650,9 @@ _mtx_assert(struct mtx *m, int what, const char *file, int line)
}
#endif
+/*
+ * The MUTEX_DEBUG-enabled mtx_validate()
+ */
#define MV_DESTROY 0 /* validate before destory */
#define MV_INIT 1 /* validate before init */
@@ -843,7 +674,7 @@ mtx_validate(struct mtx *m, int when)
if (m == &all_mtx || cold)
return 0;
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
/*
* XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
* we can re-enable the kernacc() checks.
@@ -887,50 +718,63 @@ mtx_validate(struct mtx *m, int when)
retval = 1;
}
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
return (retval);
}
#endif
+/*
+ * Mutex initialization routine; initialize lock `m' of type contained in
+ * `opts' with options contained in `opts' and description `description.'
+ * Place on "all_mtx" queue.
+ */
void
-mtx_init(struct mtx *m, const char *t, int flag)
+mtx_init(struct mtx *m, const char *description, int opts)
{
- if ((flag & MTX_QUIET) == 0)
- CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description);
+
#ifdef MUTEX_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ /* Diagnostic and error correction */
+ if (mtx_validate(m, MV_INIT))
return;
#endif
bzero((void *)m, sizeof *m);
TAILQ_INIT(&m->mtx_blocked);
+
#ifdef WITNESS
if (!witness_cold) {
- /* XXX - should not use DEVBUF */
m->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(m->mtx_debug != NULL);
}
#endif
- m->mtx_description = t;
- m->mtx_flags = flag;
+ m->mtx_description = description;
+ m->mtx_flags = opts;
m->mtx_lock = MTX_UNOWNED;
+
/* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next = &all_mtx;
m->mtx_prev = all_mtx.mtx_prev;
m->mtx_prev->mtx_next = m;
all_mtx.mtx_prev = m;
if (++mtx_cur_cnt > mtx_max_cnt)
mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
+
#ifdef WITNESS
if (!witness_cold)
- witness_init(m, flag);
+ witness_init(m, opts);
#endif
}
+/*
+ * Remove lock `m' from all_mtx queue.
+ */
void
mtx_destroy(struct mtx *m)
{
@@ -939,7 +783,9 @@ mtx_destroy(struct mtx *m)
KASSERT(!witness_cold, ("%s: Cannot destroy while still cold\n",
__FUNCTION__));
#endif
+
CTR2(KTR_LOCK, "mtx_destroy %p (%s)", m, m->mtx_description);
+
#ifdef MUTEX_DEBUG
if (m->mtx_next == NULL)
panic("mtx_destroy: %p (%s) already destroyed",
@@ -950,7 +796,9 @@ mtx_destroy(struct mtx *m)
} else {
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
}
- mtx_validate(m, MV_DESTROY); /* diagnostic */
+
+ /* diagnostic */
+ mtx_validate(m, MV_DESTROY);
#endif
#ifdef WITNESS
@@ -959,25 +807,27 @@ mtx_destroy(struct mtx *m)
#endif /* WITNESS */
/* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next->mtx_prev = m->mtx_prev;
m->mtx_prev->mtx_next = m->mtx_next;
+
#ifdef MUTEX_DEBUG
m->mtx_next = m->mtx_prev = NULL;
#endif
+
#ifdef WITNESS
- free(m->mtx_debug, M_DEVBUF);
+ free(m->mtx_debug, M_WITNESS);
m->mtx_debug = NULL;
#endif
+
mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
}
+
/*
- * The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the WITNESS kernel option being specified.
+ * The WITNESS-enabled diagnostic code.
*/
-
#ifdef WITNESS
static void
witness_fixup(void *dummy __unused)
@@ -988,26 +838,26 @@ witness_fixup(void *dummy __unused)
* We have to release Giant before initializing its witness
* structure so that WITNESS doesn't get confused.
*/
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&all_mtx, MTX_DEF);
+
+ mtx_lock(&all_mtx);
/* Iterate through all mutexes and finish up mutex initialization. */
for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- /* XXX - should not use DEVBUF */
mp->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(mp->mtx_debug != NULL);
witness_init(mp, mp->mtx_flags);
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
/* Mark the witness code as being ready for use. */
atomic_store_rel_int(&witness_cold, 0);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL)
@@ -1061,6 +911,9 @@ TUNABLE_INT_DECL("debug.witness_skipspin", 0, witness_skipspin);
SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
"");
+/*
+ * Witness-enabled globals
+ */
static struct mtx w_mtx;
static struct witness *w_free;
static struct witness *w_all;
@@ -1069,20 +922,22 @@ static int witness_dead; /* fatal error, probably no memory */
static struct witness w_data[WITNESS_COUNT];
-static struct witness *enroll __P((const char *description, int flag));
-static int itismychild __P((struct witness *parent, struct witness *child));
-static void removechild __P((struct witness *parent, struct witness *child));
-static int isitmychild __P((struct witness *parent, struct witness *child));
-static int isitmydescendant __P((struct witness *parent, struct witness *child));
-static int dup_ok __P((struct witness *));
-static int blessed __P((struct witness *, struct witness *));
-static void witness_displaydescendants
- __P((void(*)(const char *fmt, ...), struct witness *));
-static void witness_leveldescendents __P((struct witness *parent, int level));
-static void witness_levelall __P((void));
-static struct witness * witness_get __P((void));
-static void witness_free __P((struct witness *m));
-
+/*
+ * Internal witness routine prototypes
+ */
+static struct witness *enroll(const char *description, int flag);
+static int itismychild(struct witness *parent, struct witness *child);
+static void removechild(struct witness *parent, struct witness *child);
+static int isitmychild(struct witness *parent, struct witness *child);
+static int isitmydescendant(struct witness *parent, struct witness *child);
+static int dup_ok(struct witness *);
+static int blessed(struct witness *, struct witness *);
+static void
+ witness_displaydescendants(void(*)(const char *fmt, ...), struct witness *);
+static void witness_leveldescendents(struct witness *parent, int level);
+static void witness_levelall(void);
+static struct witness * witness_get(void);
+static void witness_free(struct witness *m);
static char *ignore_list[] = {
"witness lock",
@@ -1129,7 +984,8 @@ static char *sleep_list[] = {
*/
static struct witness_blessed blessed_list[] = {
};
-static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed);
+static int blessed_count =
+ sizeof(blessed_list) / sizeof(struct witness_blessed);
static void
witness_init(struct mtx *m, int flag)
@@ -1211,17 +1067,17 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
" %s:%d already holding %s:%x",
m->mtx_description, w->w_level, file, line,
spin_order_list[ffs(i)-1], i);
}
PCPU_SET(witness_spin_check, i | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1245,7 +1101,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
if (!mtx_legal2block())
- panic("blockable mtx_enter() of %s when not legal @ %s:%d",
+ panic("blockable mtx_lock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
/*
* Is this the first mutex acquired
@@ -1267,16 +1123,16 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
}
MPASS(!mtx_owned(&w_mtx));
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
/*
* If we have a known higher number just say ok
*/
if (witness_watch > 1 && w->w_level > w1->w_level) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
if (isitmydescendant(m1->mtx_witness, w)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
@@ -1284,7 +1140,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (blessed(w, w1))
goto out;
if (m1 == &Giant) {
@@ -1313,7 +1169,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
m1 = LIST_FIRST(&p->p_heldmtx);
if (!itismychild(m1->mtx_witness, w))
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
out:
#ifdef DDB
@@ -1356,10 +1212,10 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_description, file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1407,10 +1263,10 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) & ~w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return;
}
if ((m->mtx_flags & MTX_SPIN) != 0)
@@ -1426,7 +1282,7 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
}
if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold)
- panic("switchable mtx_exit() of %s when not legal @ %s:%d",
+ panic("switchable mtx_unlock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
LIST_REMOVE(m, mtx_held);
m->mtx_held.le_prev = NULL;
@@ -1497,10 +1353,10 @@ enroll(const char *description, int flag)
}
if ((flag & MTX_SPIN) && witness_skipspin)
return (NULL);
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
for (w = w_all; w; w = w->w_next) {
if (strcmp(description, w->w_description) == 0) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return (w);
}
}
@@ -1509,7 +1365,7 @@ enroll(const char *description, int flag)
w->w_next = w_all;
w_all = w;
w->w_description = description;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (flag & MTX_SPIN) {
w->w_spin = 1;
@@ -1731,7 +1587,7 @@ witness_get()
if ((w = w_free) == NULL) {
witness_dead = 1;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
printf("witness exhausted\n");
return (NULL);
}
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index 4b2c5d8..eb63ee4 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -1025,13 +1025,13 @@ selrecord(selector, sip)
if (sip->si_pid == mypid)
return;
if (sip->si_pid && (p = pfind(sip->si_pid))) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == (caddr_t)&selwait) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
sip->si_flags |= SI_COLL;
return;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
sip->si_pid = mypid;
}
@@ -1055,15 +1055,15 @@ selwakeup(sip)
p = pfind(sip->si_pid);
sip->si_pid = 0;
if (p != NULL) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == (caddr_t)&selwait) {
if (p->p_stat == SSLEEP)
setrunnable(p);
else
unsleep(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
p->p_flag &= ~P_SELECT;
PROC_UNLOCK(p);
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 2c03000..d53bf72 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -284,12 +284,12 @@ ptrace(curp, uap)
PROCTREE_LOCK(PT_RELEASE);
/* not currently stopped */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return EBUSY;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/* OK */
break;
@@ -377,13 +377,13 @@ ptrace(curp, uap)
sendsig:
/* deliver or queue signal */
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SSTOP) {
p->p_xstat = uap->data;
setrunnable(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (uap->data) {
mtx_assert(&Giant, MA_OWNED);
psignal(p, uap->data);
@@ -437,14 +437,14 @@ ptrace(curp, uap)
}
error = 0;
PHOLD(p); /* user had damn well better be incore! */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_INMEM) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
fill_kinfo_proc (p, &p->p_addr->u_kproc);
curp->p_retval[0] = *(int *)
((uintptr_t)p->p_addr + (uintptr_t)uap->addr);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
curp->p_retval[0] = 0;
error = EFAULT;
}
@@ -453,13 +453,13 @@ ptrace(curp, uap)
case PT_WRITE_U:
PHOLD(p); /* user had damn well better be incore! */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_INMEM) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
fill_kinfo_proc (p, &p->p_addr->u_kproc);
error = ptrace_write_u(p, (vm_offset_t)uap->addr, uap->data);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
error = EFAULT;
}
PRELE(p);
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index f4fe297..b815e7c 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -2251,7 +2251,7 @@ ttyinfo(tp)
else if ((p = LIST_FIRST(&tp->t_pgrp->pg_members)) == 0)
ttyprintf(tp, "empty foreground process group\n");
else {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
/* Pick interesting process. */
for (pick = NULL; p != 0; p = LIST_NEXT(p, p_pglist))
@@ -2264,7 +2264,7 @@ ttyinfo(tp)
ltmp = pick->p_stat == SIDL || pick->p_stat == SWAIT ||
pick->p_stat == SZOMB ? 0 :
pgtok(vmspace_resident_count(pick->p_vmspace));
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
ttyprintf(tp, " cmd: %s %d [%s] ", pick->p_comm, pick->p_pid,
stmp);
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 1489157..adbfe31 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -152,20 +152,20 @@ mbinit(dummy)
/*
* Perform some initial allocations.
*/
- mtx_enter(&mcntfree.m_mtx, MTX_DEF);
+ mtx_lock(&mcntfree.m_mtx);
if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0)
goto bad;
- mtx_exit(&mcntfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mcntfree.m_mtx);
- mtx_enter(&mmbfree.m_mtx, MTX_DEF);
+ mtx_lock(&mmbfree.m_mtx);
if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
goto bad;
- mtx_exit(&mmbfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mmbfree.m_mtx);
- mtx_enter(&mclfree.m_mtx, MTX_DEF);
+ mtx_lock(&mclfree.m_mtx);
if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
goto bad;
- mtx_exit(&mclfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mclfree.m_mtx);
return;
bad:
@@ -204,10 +204,10 @@ m_alloc_ref(nmb, how)
*/
nbytes = round_page(nmb * sizeof(union mext_refcnt));
- mtx_exit(&mcntfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mcntfree.m_mtx);
if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ?
M_WAITOK : M_NOWAIT)) == NULL) {
- mtx_enter(&mcntfree.m_mtx, MTX_DEF);
+ mtx_lock(&mcntfree.m_mtx);
return (0);
}
nmb = nbytes / sizeof(union mext_refcnt);
@@ -216,7 +216,7 @@ m_alloc_ref(nmb, how)
* We don't let go of the mutex in order to avoid a race.
* It is up to the caller to let go of the mutex.
*/
- mtx_enter(&mcntfree.m_mtx, MTX_DEF);
+ mtx_lock(&mcntfree.m_mtx);
for (i = 0; i < nmb; i++) {
((union mext_refcnt *)p)->next_ref = mcntfree.m_head;
mcntfree.m_head = (union mext_refcnt *)p;
@@ -260,13 +260,13 @@ m_mballoc(nmb, how)
nbytes = round_page(nmb * MSIZE);
- mtx_exit(&mmbfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mmbfree.m_mtx);
p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
if (p == 0 && how == M_TRYWAIT) {
atomic_add_long(&mbstat.m_wait, 1);
p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
}
- mtx_enter(&mmbfree.m_mtx, MTX_DEF);
+ mtx_lock(&mmbfree.m_mtx);
/*
* Either the map is now full, or `how' is M_DONTWAIT and there
@@ -318,10 +318,10 @@ m_mballoc_wait(void)
* importantly, to avoid a potential lock order reversal which may
* result in deadlock (See comment above m_reclaim()).
*/
- mtx_exit(&mmbfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mmbfree.m_mtx);
m_reclaim();
- mtx_enter(&mmbfree.m_mtx, MTX_DEF);
+ mtx_lock(&mmbfree.m_mtx);
_MGET(p, M_DONTWAIT);
if (p == NULL) {
@@ -381,11 +381,11 @@ m_clalloc(ncl, how)
}
npg = ncl;
- mtx_exit(&mclfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mclfree.m_mtx);
p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
ncl = ncl * PAGE_SIZE / MCLBYTES;
- mtx_enter(&mclfree.m_mtx, MTX_DEF);
+ mtx_lock(&mclfree.m_mtx);
/*
* Either the map is now full, or `how' is M_DONTWAIT and there
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 983742e..43c6c27 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1418,7 +1418,7 @@ sf_buf_init(void *arg)
int i;
mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", MTX_DEF);
- mtx_enter(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
SLIST_INIT(&sf_freelist.sf_head);
sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
@@ -1428,7 +1428,7 @@ sf_buf_init(void *arg)
SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
}
sf_buf_alloc_want = 0;
- mtx_exit(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_unlock(&sf_freelist.sf_lock);
}
/*
@@ -1439,13 +1439,13 @@ sf_buf_alloc()
{
struct sf_buf *sf;
- mtx_enter(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
sf_buf_alloc_want++;
msleep(&sf_freelist, &sf_freelist.sf_lock, PVM, "sfbufa", 0);
}
SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
- mtx_exit(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_unlock(&sf_freelist.sf_lock);
return (sf);
}
@@ -1475,13 +1475,13 @@ sf_buf_free(caddr_t addr, void *args)
vm_page_free(m);
splx(s);
sf->m = NULL;
- mtx_enter(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
if (sf_buf_alloc_want) {
sf_buf_alloc_want--;
wakeup_one(&sf_freelist);
}
- mtx_exit(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_unlock(&sf_freelist.sf_lock);
}
/*
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index 8c4175a..8335264 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -638,7 +638,7 @@ aio_daemon(void *uproc)
struct proc *curcp, *mycp, *userp;
struct vmspace *myvm, *tmpvm;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* Local copies of curproc (cp) and vmspace (myvm)
*/
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index a0d693c..c124559 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1800,7 +1800,7 @@ buf_daemon()
{
int s;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* This process needs to be suspended prior to shutdown sync.
diff --git a/sys/kern/vfs_conf.c b/sys/kern/vfs_conf.c
index 2ca46185..c1447ff 100644
--- a/sys/kern/vfs_conf.c
+++ b/sys/kern/vfs_conf.c
@@ -230,9 +230,9 @@ done:
} else {
/* register with list of mounted filesystems */
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 3a31666..618ce56 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -449,7 +449,7 @@ vop_nolock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK)
- mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
+ mtx_unlock(&ap->a_vp->v_interlock);
return (0);
#endif
}
@@ -471,7 +471,7 @@ vop_nounlock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK)
- mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
+ mtx_unlock(&ap->a_vp->v_interlock);
return (0);
}
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 3abcc82..38c1895 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -390,15 +390,15 @@ vfs_getvfs(fsid)
{
register struct mount *mp;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (mp);
}
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return ((struct mount *) 0);
}
@@ -422,7 +422,7 @@ vfs_getnewfsid(mp)
fsid_t tfsid;
int mtype;
- mtx_enter(&mntid_mtx, MTX_DEF);
+ mtx_lock(&mntid_mtx);
mtype = mp->mnt_vfc->vfc_typenum;
tfsid.val[1] = mtype;
mtype = (mtype & 0xFF) << 24;
@@ -435,7 +435,7 @@ vfs_getnewfsid(mp)
}
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
- mtx_exit(&mntid_mtx, MTX_DEF);
+ mtx_unlock(&mntid_mtx);
}
/*
@@ -538,7 +538,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
if (wantfreevnodes && freevnodes < wantfreevnodes) {
vp = NULL;
@@ -560,7 +560,7 @@ getnewvnode(tag, mp, vops, vpp)
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
- !mtx_try_enter(&vp->v_interlock, MTX_DEF)) {
+ !mtx_trylock(&vp->v_interlock)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
continue;
@@ -570,7 +570,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
break;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
}
@@ -578,13 +578,13 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_flag |= VDOOMED;
vp->v_flag &= ~VFREE;
freevnodes--;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD) {
vgonel(vp, p);
} else {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
vn_finished_write(vnmp);
@@ -609,7 +609,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_clen = 0;
vp->v_socket = 0;
} else {
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
@@ -646,7 +646,7 @@ insmntque(vp, mp)
register struct mount *mp;
{
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
/*
* Delete from old mount point vnode list, if on one.
*/
@@ -656,11 +656,11 @@ insmntque(vp, mp)
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) == NULL) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
return;
}
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
}
/*
@@ -785,12 +785,12 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Destroy the copy in the VM cache, too.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
panic("vinvalbuf: flush failed");
@@ -1010,7 +1010,7 @@ sched_sync(void)
int s;
struct proc *p = updateproc;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
SHUTDOWN_PRI_LAST);
@@ -1104,10 +1104,10 @@ int
speedup_syncer()
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (updateproc->p_wchan == &lbolt)
setrunnable(updateproc);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;
@@ -1407,9 +1407,9 @@ addalias(nvp, dev)
KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
nvp->v_rdev = dev;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
}
/*
@@ -1435,7 +1435,7 @@ vget(vp, flags, p)
* the VXLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
if (vp->v_vxproc == curproc) {
printf("VXLOCK interlock avoided\n");
@@ -1461,15 +1461,15 @@ vget(vp, flags, p)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
return (error);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1479,9 +1479,9 @@ vget(vp, flags, p)
void
vref(struct vnode *vp)
{
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_usecount++;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
/*
@@ -1496,14 +1496,14 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
if (vp->v_usecount > 1) {
vp->v_usecount--;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return;
}
@@ -1525,7 +1525,7 @@ vrele(vp)
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
#endif
panic("vrele: negative ref cnt");
}
@@ -1543,7 +1543,7 @@ vput(vp)
struct proc *p = curproc; /* XXX */
KASSERT(vp != NULL, ("vput: null vp"));
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
if (vp->v_usecount > 1) {
@@ -1564,7 +1564,7 @@ vput(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
} else {
@@ -1633,7 +1633,7 @@ vflush(mp, skipvp, flags)
struct vnode *vp, *nvp;
int busy = 0;
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
/*
@@ -1649,12 +1649,12 @@ loop:
if (vp == skipvp)
continue;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
/*
* Skip over a vnodes marked VSYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
/*
@@ -1663,7 +1663,7 @@ loop:
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
@@ -1672,9 +1672,9 @@ loop:
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
vgonel(vp, p);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
continue;
}
@@ -1684,7 +1684,7 @@ loop:
* all other files, just kill them.
*/
if (flags & FORCECLOSE) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if (vp->v_type != VCHR) {
vgonel(vp, p);
} else {
@@ -1692,17 +1692,17 @@ loop:
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
continue;
}
#ifdef DIAGNOSTIC
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
busy++;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if (busy)
return (EBUSY);
return (0);
@@ -1784,7 +1784,7 @@ vclean(vp, flags, p)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -1794,7 +1794,7 @@ vclean(vp, flags, p)
#endif
vfree(vp);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
cache_purge(vp);
@@ -1847,9 +1847,9 @@ vop_revoke(ap)
}
dev = vp->v_rdev;
for (;;) {
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
vq = SLIST_FIRST(&dev->si_hlist);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
if (!vq)
break;
vgone(vq);
@@ -1868,15 +1868,15 @@ vrecycle(vp, inter_lkp, p)
struct proc *p;
{
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount == 0) {
if (inter_lkp) {
- mtx_exit(inter_lkp, MTX_DEF);
+ mtx_unlock(inter_lkp);
}
vgonel(vp, p);
return (1);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1890,7 +1890,7 @@ vgone(vp)
{
struct proc *p = curproc; /* XXX */
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vgonel(vp, p);
}
@@ -1919,7 +1919,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
/*
* Delete from old mount point vnode list, if on one.
@@ -1931,10 +1931,10 @@ vgonel(vp, p)
* if it is on one.
*/
if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
freedev(vp->v_rdev);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
vp->v_rdev = NULL;
}
@@ -1950,19 +1950,19 @@ vgonel(vp, p)
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
if (vp->v_flag & VFREE)
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
else
freevnodes++;
vp->v_flag |= VFREE;
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
splx(s);
}
vp->v_type = VBAD;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
/*
@@ -1976,15 +1976,15 @@ vfinddev(dev, type, vpp)
{
struct vnode *vp;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
if (type == vp->v_type) {
*vpp = vp;
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (1);
}
}
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (0);
}
@@ -1999,10 +1999,10 @@ vcount(vp)
int count;
count = 0;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
count += vq->v_usecount;
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (count);
}
@@ -2083,7 +2083,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
struct vnode *vp;
printf("Locked vnodes\n");
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -2093,11 +2093,11 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
}
#endif
@@ -2202,14 +2202,14 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
return (SYSCTL_OUT(req, 0,
(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
again:
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
vp != NULL;
vp = nvp) {
@@ -2219,22 +2219,22 @@ again:
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
goto again;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
return (error);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (0);
}
@@ -2592,7 +2592,7 @@ loop:
continue;
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
@@ -2604,7 +2604,7 @@ loop:
vput(vp);
}
} else {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
}
if (anyio && (--tries > 0))
@@ -2638,7 +2638,7 @@ vfree(vp)
int s;
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
if (vp->v_flag & VAGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@@ -2646,7 +2646,7 @@ vfree(vp)
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
freevnodes++;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
splx(s);
@@ -2662,11 +2662,11 @@ vbusy(vp)
int s;
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~(VFREE|VAGE);
splx(s);
}
@@ -2685,7 +2685,7 @@ vn_pollrecord(vp, p, events)
struct proc *p;
short events;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_revents & events) {
/*
* This leaves events we are not interested
@@ -2697,12 +2697,12 @@ vn_pollrecord(vp, p, events)
events &= vp->v_pollinfo.vpi_revents;
vp->v_pollinfo.vpi_revents &= ~events;
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return events;
}
vp->v_pollinfo.vpi_events |= events;
selrecord(p, &vp->v_pollinfo.vpi_selinfo);
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return 0;
}
@@ -2717,7 +2717,7 @@ vn_pollevent(vp, events)
struct vnode *vp;
short events;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events & events) {
/*
* We clear vpi_events so that we don't
@@ -2734,7 +2734,7 @@ vn_pollevent(vp, events)
vp->v_pollinfo.vpi_revents |= events;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
/*
@@ -2746,12 +2746,12 @@ void
vn_pollgone(vp)
struct vnode *vp;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events) {
vp->v_pollinfo.vpi_events = 0;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
@@ -2856,9 +2856,9 @@ sync_fsync(ap)
* Walk the list of vnodes pushing all that are dirty and
* not already on the sync list.
*/
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 16e8984..178d2a2 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -176,16 +176,16 @@ mount(p, uap)
vput(vp);
return (EBUSY);
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vfs_unbusy(mp, p);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
mp->mnt_flag |= SCARG(uap, flags) &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, p);
@@ -243,15 +243,15 @@ mount(p, uap)
return (ENODEV);
}
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
/*
* Allocate and initialize the filesystem.
@@ -310,9 +310,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, p);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vrele(vp);
return (error);
}
@@ -322,13 +322,13 @@ update:
*/
cache_purge(vp);
if (!error) {
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
vp->v_mountedhere = mp;
- mtx_exit(&vp->v_interlock, MTX_DEF);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
checkdirs(vp);
VOP_UNLOCK(vp, 0, p);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
@@ -337,9 +337,9 @@ update:
if ((error = VFS_START(mp, 0, p)) != 0)
vrele(vp);
} else {
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free((caddr_t)mp, M_MOUNT);
@@ -464,7 +464,7 @@ dounmount(mp, flags, p)
int error;
int async_flag;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
vn_start_write(NULL, &mp, V_WAIT);
@@ -484,7 +484,7 @@ dounmount(mp, flags, p)
error = VFS_UNMOUNT(mp, flags, p);
}
vn_finished_write(mp);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
if (error) {
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
(void) vfs_allocate_syncvnode(mp);
@@ -535,7 +535,7 @@ sync(p, uap)
struct mount *mp, *nmp;
int asyncflag;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -551,11 +551,11 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
#if 0
/*
* XXX don't call vfs_bufstats() yet because that routine
@@ -727,7 +727,7 @@ getfsstat(p, uap)
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -743,7 +743,7 @@ getfsstat(p, uap)
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
@@ -757,11 +757,11 @@ getfsstat(p, uap)
sfsp += sizeof(*sp);
}
count++;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
if (sfsp && count > maxcount)
p->p_retval[0] = maxcount;
else
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 2ca46185..c1447ff 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -230,9 +230,9 @@ done:
} else {
/* register with list of mounted filesystems */
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 3abcc82..38c1895 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -390,15 +390,15 @@ vfs_getvfs(fsid)
{
register struct mount *mp;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (mp);
}
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return ((struct mount *) 0);
}
@@ -422,7 +422,7 @@ vfs_getnewfsid(mp)
fsid_t tfsid;
int mtype;
- mtx_enter(&mntid_mtx, MTX_DEF);
+ mtx_lock(&mntid_mtx);
mtype = mp->mnt_vfc->vfc_typenum;
tfsid.val[1] = mtype;
mtype = (mtype & 0xFF) << 24;
@@ -435,7 +435,7 @@ vfs_getnewfsid(mp)
}
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
- mtx_exit(&mntid_mtx, MTX_DEF);
+ mtx_unlock(&mntid_mtx);
}
/*
@@ -538,7 +538,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
if (wantfreevnodes && freevnodes < wantfreevnodes) {
vp = NULL;
@@ -560,7 +560,7 @@ getnewvnode(tag, mp, vops, vpp)
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
- !mtx_try_enter(&vp->v_interlock, MTX_DEF)) {
+ !mtx_trylock(&vp->v_interlock)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
continue;
@@ -570,7 +570,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
break;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
}
@@ -578,13 +578,13 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_flag |= VDOOMED;
vp->v_flag &= ~VFREE;
freevnodes--;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD) {
vgonel(vp, p);
} else {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
vn_finished_write(vnmp);
@@ -609,7 +609,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_clen = 0;
vp->v_socket = 0;
} else {
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
@@ -646,7 +646,7 @@ insmntque(vp, mp)
register struct mount *mp;
{
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
/*
* Delete from old mount point vnode list, if on one.
*/
@@ -656,11 +656,11 @@ insmntque(vp, mp)
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) == NULL) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
return;
}
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
}
/*
@@ -785,12 +785,12 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Destroy the copy in the VM cache, too.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
panic("vinvalbuf: flush failed");
@@ -1010,7 +1010,7 @@ sched_sync(void)
int s;
struct proc *p = updateproc;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
SHUTDOWN_PRI_LAST);
@@ -1104,10 +1104,10 @@ int
speedup_syncer()
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (updateproc->p_wchan == &lbolt)
setrunnable(updateproc);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;
@@ -1407,9 +1407,9 @@ addalias(nvp, dev)
KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
nvp->v_rdev = dev;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
}
/*
@@ -1435,7 +1435,7 @@ vget(vp, flags, p)
* the VXLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
if (vp->v_vxproc == curproc) {
printf("VXLOCK interlock avoided\n");
@@ -1461,15 +1461,15 @@ vget(vp, flags, p)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
return (error);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1479,9 +1479,9 @@ vget(vp, flags, p)
void
vref(struct vnode *vp)
{
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_usecount++;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
/*
@@ -1496,14 +1496,14 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
if (vp->v_usecount > 1) {
vp->v_usecount--;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return;
}
@@ -1525,7 +1525,7 @@ vrele(vp)
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
#endif
panic("vrele: negative ref cnt");
}
@@ -1543,7 +1543,7 @@ vput(vp)
struct proc *p = curproc; /* XXX */
KASSERT(vp != NULL, ("vput: null vp"));
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
if (vp->v_usecount > 1) {
@@ -1564,7 +1564,7 @@ vput(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
} else {
@@ -1633,7 +1633,7 @@ vflush(mp, skipvp, flags)
struct vnode *vp, *nvp;
int busy = 0;
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
/*
@@ -1649,12 +1649,12 @@ loop:
if (vp == skipvp)
continue;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
/*
* Skip over a vnodes marked VSYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
/*
@@ -1663,7 +1663,7 @@ loop:
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
@@ -1672,9 +1672,9 @@ loop:
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
vgonel(vp, p);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
continue;
}
@@ -1684,7 +1684,7 @@ loop:
* all other files, just kill them.
*/
if (flags & FORCECLOSE) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if (vp->v_type != VCHR) {
vgonel(vp, p);
} else {
@@ -1692,17 +1692,17 @@ loop:
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
continue;
}
#ifdef DIAGNOSTIC
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
busy++;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if (busy)
return (EBUSY);
return (0);
@@ -1784,7 +1784,7 @@ vclean(vp, flags, p)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -1794,7 +1794,7 @@ vclean(vp, flags, p)
#endif
vfree(vp);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
cache_purge(vp);
@@ -1847,9 +1847,9 @@ vop_revoke(ap)
}
dev = vp->v_rdev;
for (;;) {
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
vq = SLIST_FIRST(&dev->si_hlist);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
if (!vq)
break;
vgone(vq);
@@ -1868,15 +1868,15 @@ vrecycle(vp, inter_lkp, p)
struct proc *p;
{
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount == 0) {
if (inter_lkp) {
- mtx_exit(inter_lkp, MTX_DEF);
+ mtx_unlock(inter_lkp);
}
vgonel(vp, p);
return (1);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1890,7 +1890,7 @@ vgone(vp)
{
struct proc *p = curproc; /* XXX */
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vgonel(vp, p);
}
@@ -1919,7 +1919,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
/*
* Delete from old mount point vnode list, if on one.
@@ -1931,10 +1931,10 @@ vgonel(vp, p)
* if it is on one.
*/
if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
freedev(vp->v_rdev);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
vp->v_rdev = NULL;
}
@@ -1950,19 +1950,19 @@ vgonel(vp, p)
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
if (vp->v_flag & VFREE)
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
else
freevnodes++;
vp->v_flag |= VFREE;
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
splx(s);
}
vp->v_type = VBAD;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
/*
@@ -1976,15 +1976,15 @@ vfinddev(dev, type, vpp)
{
struct vnode *vp;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
if (type == vp->v_type) {
*vpp = vp;
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (1);
}
}
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (0);
}
@@ -1999,10 +1999,10 @@ vcount(vp)
int count;
count = 0;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
count += vq->v_usecount;
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (count);
}
@@ -2083,7 +2083,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
struct vnode *vp;
printf("Locked vnodes\n");
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -2093,11 +2093,11 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
}
#endif
@@ -2202,14 +2202,14 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
return (SYSCTL_OUT(req, 0,
(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
again:
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
vp != NULL;
vp = nvp) {
@@ -2219,22 +2219,22 @@ again:
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
goto again;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
return (error);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (0);
}
@@ -2592,7 +2592,7 @@ loop:
continue;
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
@@ -2604,7 +2604,7 @@ loop:
vput(vp);
}
} else {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
}
if (anyio && (--tries > 0))
@@ -2638,7 +2638,7 @@ vfree(vp)
int s;
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
if (vp->v_flag & VAGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@@ -2646,7 +2646,7 @@ vfree(vp)
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
freevnodes++;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
splx(s);
@@ -2662,11 +2662,11 @@ vbusy(vp)
int s;
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~(VFREE|VAGE);
splx(s);
}
@@ -2685,7 +2685,7 @@ vn_pollrecord(vp, p, events)
struct proc *p;
short events;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_revents & events) {
/*
* This leaves events we are not interested
@@ -2697,12 +2697,12 @@ vn_pollrecord(vp, p, events)
events &= vp->v_pollinfo.vpi_revents;
vp->v_pollinfo.vpi_revents &= ~events;
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return events;
}
vp->v_pollinfo.vpi_events |= events;
selrecord(p, &vp->v_pollinfo.vpi_selinfo);
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return 0;
}
@@ -2717,7 +2717,7 @@ vn_pollevent(vp, events)
struct vnode *vp;
short events;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events & events) {
/*
* We clear vpi_events so that we don't
@@ -2734,7 +2734,7 @@ vn_pollevent(vp, events)
vp->v_pollinfo.vpi_revents |= events;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
/*
@@ -2746,12 +2746,12 @@ void
vn_pollgone(vp)
struct vnode *vp;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events) {
vp->v_pollinfo.vpi_events = 0;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
@@ -2856,9 +2856,9 @@ sync_fsync(ap)
* Walk the list of vnodes pushing all that are dirty and
* not already on the sync list.
*/
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 16e8984..178d2a2 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -176,16 +176,16 @@ mount(p, uap)
vput(vp);
return (EBUSY);
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vfs_unbusy(mp, p);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
mp->mnt_flag |= SCARG(uap, flags) &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, p);
@@ -243,15 +243,15 @@ mount(p, uap)
return (ENODEV);
}
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
/*
* Allocate and initialize the filesystem.
@@ -310,9 +310,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, p);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vrele(vp);
return (error);
}
@@ -322,13 +322,13 @@ update:
*/
cache_purge(vp);
if (!error) {
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
vp->v_mountedhere = mp;
- mtx_exit(&vp->v_interlock, MTX_DEF);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
checkdirs(vp);
VOP_UNLOCK(vp, 0, p);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
@@ -337,9 +337,9 @@ update:
if ((error = VFS_START(mp, 0, p)) != 0)
vrele(vp);
} else {
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free((caddr_t)mp, M_MOUNT);
@@ -464,7 +464,7 @@ dounmount(mp, flags, p)
int error;
int async_flag;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
vn_start_write(NULL, &mp, V_WAIT);
@@ -484,7 +484,7 @@ dounmount(mp, flags, p)
error = VFS_UNMOUNT(mp, flags, p);
}
vn_finished_write(mp);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
if (error) {
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
(void) vfs_allocate_syncvnode(mp);
@@ -535,7 +535,7 @@ sync(p, uap)
struct mount *mp, *nmp;
int asyncflag;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -551,11 +551,11 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
#if 0
/*
* XXX don't call vfs_bufstats() yet because that routine
@@ -727,7 +727,7 @@ getfsstat(p, uap)
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -743,7 +743,7 @@ getfsstat(p, uap)
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
@@ -757,11 +757,11 @@ getfsstat(p, uap)
sfsp += sizeof(*sp);
}
count++;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
if (sfsp && count > maxcount)
p->p_retval[0] = maxcount;
else
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 221e9c0..0175123 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -641,10 +641,10 @@ debug_vn_lock(vp, flags, p, filename, line)
do {
if ((flags & LK_INTERLOCK) == 0)
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curproc) {
vp->v_flag |= VXWANT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
error = ENOENT;
} else {
@@ -833,9 +833,9 @@ filt_vnattach(struct knote *kn)
if ((vp)->v_tag != VT_UFS)
return (EOPNOTSUPP);
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return (0);
}
@@ -845,10 +845,10 @@ filt_vndetach(struct knote *kn)
{
struct vnode *vp = (struct vnode *)kn->kn_fp->f_data;
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
kn, knote, kn_selnext);
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
static int
OpenPOWER on IntegriCloud