summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_exit.c2
-rw-r--r--sys/kern/kern_kthread.c2
-rw-r--r--sys/kern/kern_thr.c1
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/kern/kern_umtx.c62
-rw-r--r--sys/kern/subr_witness.c6
6 files changed, 42 insertions, 33 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 3b5fc76..07b7c6c 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -73,6 +73,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sdt.h>
#include <sys/shm.h>
#include <sys/sem.h>
+#include <sys/umtx.h>
#ifdef KTRACE
#include <sys/ktrace.h>
#endif
@@ -637,6 +638,7 @@ exit1(struct thread *td, int rv)
wakeup(p->p_pptr);
cv_broadcast(&p->p_pwait);
sched_exit(p->p_pptr, td);
+ umtx_thread_exit(td);
PROC_SLOCK(p);
p->p_state = PRS_ZOMBIE;
PROC_UNLOCK(p->p_pptr);
diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c
index 969c513..ee94de0 100644
--- a/sys/kern/kern_kthread.c
+++ b/sys/kern/kern_kthread.c
@@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rwlock.h>
#include <sys/signalvar.h>
#include <sys/sx.h>
+#include <sys/umtx.h>
#include <sys/unistd.h>
#include <sys/wait.h>
#include <sys/sched.h>
@@ -339,6 +340,7 @@ kthread_exit(void)
}
LIST_REMOVE(curthread, td_hash);
rw_wunlock(&tidhash_lock);
+ umtx_thread_exit(curthread);
PROC_SLOCK(p);
thread_exit();
}
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index b298f33..280bc0b 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -322,6 +322,7 @@ sys_thr_exit(struct thread *td, struct thr_exit_args *uap)
LIST_REMOVE(td, td_hash);
rw_wunlock(&tidhash_lock);
tdsigcleanup(td);
+ umtx_thread_exit(td);
PROC_SLOCK(p);
thread_stopped(p);
thread_exit();
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 62d92b0..0549c07 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -414,7 +414,6 @@ thread_exit(void)
#ifdef AUDIT
AUDIT_SYSCALL_EXIT(0, td);
#endif
- umtx_thread_exit(td);
/*
* drop FPU & debug register state storage, or any other
* architecture specific resources that
@@ -863,6 +862,7 @@ thread_suspend_check(int return_instead)
tidhash_remove(td);
PROC_LOCK(p);
tdsigcleanup(td);
+ umtx_thread_exit(td);
PROC_SLOCK(p);
thread_stopped(p);
thread_exit();
diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c
index 108ac70..bd2b051 100644
--- a/sys/kern/kern_umtx.c
+++ b/sys/kern/kern_umtx.c
@@ -396,7 +396,7 @@ umtxq_sysinit(void *arg __unused)
#ifdef UMTX_PROFILING
umtx_init_profiling();
#endif
- mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN);
+ mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
EVENTHANDLER_PRI_ANY);
}
@@ -1832,9 +1832,9 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
struct umtx_q *uq, *uq_owner;
uq_owner = owner->td_umtxq;
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (pi->pi_owner == owner) {
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
return (0);
}
@@ -1842,7 +1842,7 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
/*
* userland may have already messed the mutex, sigh.
*/
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
return (EPERM);
}
umtx_pi_setowner(pi, owner);
@@ -1856,7 +1856,7 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
sched_lend_user_prio(owner, pri);
thread_unlock(owner);
}
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
return (0);
}
@@ -1871,7 +1871,7 @@ umtx_pi_adjust(struct thread *td, u_char oldpri)
struct umtx_pi *pi;
uq = td->td_umtxq;
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
/*
* Pick up the lock that td is blocked on.
*/
@@ -1880,7 +1880,7 @@ umtx_pi_adjust(struct thread *td, u_char oldpri)
umtx_pi_adjust_thread(pi, td);
umtx_repropagate_priority(pi);
}
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
/*
@@ -1902,12 +1902,12 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
UMTXQ_LOCKED_ASSERT(uc);
KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
umtxq_insert(uq);
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (pi->pi_owner == NULL) {
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
/* XXX Only look up thread in current process. */
td1 = tdfind(owner, curproc->p_pid);
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (td1 != NULL) {
if (pi->pi_owner == NULL)
umtx_pi_setowner(pi, td1);
@@ -1931,20 +1931,20 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
td->td_flags |= TDF_UPIBLOCKED;
thread_unlock(td);
umtx_propagate_priority(td);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
umtxq_unbusy(&uq->uq_key);
error = umtxq_sleep(uq, wmesg, timo);
umtxq_remove(uq);
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
uq->uq_pi_blocked = NULL;
thread_lock(td);
td->td_flags &= ~TDF_UPIBLOCKED;
thread_unlock(td);
TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
umtx_repropagate_priority(pi);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
umtxq_unlock(&uq->uq_key);
return (error);
@@ -1976,7 +1976,7 @@ umtx_pi_unref(struct umtx_pi *pi)
UMTXQ_LOCKED_ASSERT(uc);
KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
if (--pi->pi_refcount == 0) {
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (pi->pi_owner != NULL) {
TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested,
pi, pi_link);
@@ -1984,7 +1984,7 @@ umtx_pi_unref(struct umtx_pi *pi)
}
KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
("blocked queue not empty"));
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
umtx_pi_free(pi);
}
@@ -2238,11 +2238,11 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
umtxq_busy(&key);
count = umtxq_count_pi(&key, &uq_first);
if (uq_first != NULL) {
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
pi = uq_first->uq_pi_blocked;
KASSERT(pi != NULL, ("pi == NULL?"));
if (pi->pi_owner != curthread) {
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
umtxq_unbusy(&key);
umtxq_unlock(&key);
umtx_key_release(&key);
@@ -2268,7 +2268,7 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
thread_lock(curthread);
sched_lend_user_prio(curthread, pri);
thread_unlock(curthread);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
if (uq_first)
umtxq_signal_thread(uq_first);
} else {
@@ -2285,10 +2285,10 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
* umtx_pi, and unlocked the umtxq.
* If the current thread owns it, it must disown it.
*/
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (pi->pi_owner == td)
umtx_pi_disown(pi);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
}
umtxq_unlock(&key);
@@ -2351,9 +2351,9 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
goto out;
}
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
error = EINVAL;
goto out;
}
@@ -2364,7 +2364,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
sched_lend_user_prio(td, uq->uq_inherited_pri);
thread_unlock(td);
}
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
rv = casueword32(&m->m_owner,
UMUTEX_CONTESTED, &owner, id | UMUTEX_CONTESTED);
@@ -2405,7 +2405,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
umtxq_remove(uq);
umtxq_unlock(&uq->uq_key);
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
uq->uq_inherited_pri = old_inherited_pri;
pri = PRI_MAX;
TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
@@ -2420,11 +2420,11 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
thread_lock(td);
sched_lend_user_prio(td, pri);
thread_unlock(td);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
if (error != 0) {
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
uq->uq_inherited_pri = old_inherited_pri;
pri = PRI_MAX;
TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
@@ -2439,7 +2439,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
thread_lock(td);
sched_lend_user_prio(td, pri);
thread_unlock(td);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
out:
@@ -2511,7 +2511,7 @@ do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
if (error == -1)
error = EFAULT;
else {
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (su != 0)
uq->uq_inherited_pri = new_inherited_pri;
pri = PRI_MAX;
@@ -2527,7 +2527,7 @@ do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
thread_lock(td);
sched_lend_user_prio(td, pri);
thread_unlock(td);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
umtx_key_release(&key);
return (error);
@@ -4102,13 +4102,13 @@ umtx_thread_cleanup(struct thread *td)
if ((uq = td->td_umtxq) == NULL)
return;
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
uq->uq_inherited_pri = PRI_MAX;
while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
pi->pi_owner = NULL;
TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
}
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
thread_lock(td);
sched_lend_user_prio(td, PRI_MAX);
thread_unlock(td);
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index d1087bc..1fa3020 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -491,6 +491,11 @@ static struct witness_order_list_entry order_lists[] = {
{ "time lock", &lock_class_mtx_sleep },
{ NULL, NULL },
/*
+ * umtx
+ */
+ { "umtx lock", &lock_class_mtx_sleep },
+ { NULL, NULL },
+ /*
* Sockets
*/
{ "accept", &lock_class_mtx_sleep },
@@ -642,7 +647,6 @@ static struct witness_order_list_entry order_lists[] = {
#endif
{ "process slock", &lock_class_mtx_spin },
{ "sleepq chain", &lock_class_mtx_spin },
- { "umtx lock", &lock_class_mtx_spin },
{ "rm_spinlock", &lock_class_mtx_spin },
{ "turnstile chain", &lock_class_mtx_spin },
{ "turnstile lock", &lock_class_mtx_spin },
OpenPOWER on IntegriCloud