summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_umtx.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2015-03-21 01:39:44 +0000
committerkib <kib@FreeBSD.org>2015-03-21 01:39:44 +0000
commit48a0d48fb5a8382e19f93ab4c1faa34d06cfa1e7 (patch)
treebec0d412be0a696c8494b879c8e6ddcf97edbe1d /sys/kern/kern_umtx.c
parentfe7bfd664683289b1b664debb31903b1cac6c5a3 (diff)
downloadFreeBSD-src-48a0d48fb5a8382e19f93ab4c1faa34d06cfa1e7.zip
FreeBSD-src-48a0d48fb5a8382e19f93ab4c1faa34d06cfa1e7.tar.gz
MFC r279390:
Change umtx_lock to be the sleepable mutex.
Diffstat (limited to 'sys/kern/kern_umtx.c')
-rw-r--r--sys/kern/kern_umtx.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c
index 108ac70..bd2b051 100644
--- a/sys/kern/kern_umtx.c
+++ b/sys/kern/kern_umtx.c
@@ -396,7 +396,7 @@ umtxq_sysinit(void *arg __unused)
#ifdef UMTX_PROFILING
umtx_init_profiling();
#endif
- mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN);
+ mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
EVENTHANDLER_PRI_ANY);
}
@@ -1832,9 +1832,9 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
struct umtx_q *uq, *uq_owner;
uq_owner = owner->td_umtxq;
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (pi->pi_owner == owner) {
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
return (0);
}
@@ -1842,7 +1842,7 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
/*
* userland may have already messed the mutex, sigh.
*/
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
return (EPERM);
}
umtx_pi_setowner(pi, owner);
@@ -1856,7 +1856,7 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
sched_lend_user_prio(owner, pri);
thread_unlock(owner);
}
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
return (0);
}
@@ -1871,7 +1871,7 @@ umtx_pi_adjust(struct thread *td, u_char oldpri)
struct umtx_pi *pi;
uq = td->td_umtxq;
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
/*
* Pick up the lock that td is blocked on.
*/
@@ -1880,7 +1880,7 @@ umtx_pi_adjust(struct thread *td, u_char oldpri)
umtx_pi_adjust_thread(pi, td);
umtx_repropagate_priority(pi);
}
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
/*
@@ -1902,12 +1902,12 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
UMTXQ_LOCKED_ASSERT(uc);
KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
umtxq_insert(uq);
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (pi->pi_owner == NULL) {
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
/* XXX Only look up thread in current process. */
td1 = tdfind(owner, curproc->p_pid);
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (td1 != NULL) {
if (pi->pi_owner == NULL)
umtx_pi_setowner(pi, td1);
@@ -1931,20 +1931,20 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
td->td_flags |= TDF_UPIBLOCKED;
thread_unlock(td);
umtx_propagate_priority(td);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
umtxq_unbusy(&uq->uq_key);
error = umtxq_sleep(uq, wmesg, timo);
umtxq_remove(uq);
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
uq->uq_pi_blocked = NULL;
thread_lock(td);
td->td_flags &= ~TDF_UPIBLOCKED;
thread_unlock(td);
TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
umtx_repropagate_priority(pi);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
umtxq_unlock(&uq->uq_key);
return (error);
@@ -1976,7 +1976,7 @@ umtx_pi_unref(struct umtx_pi *pi)
UMTXQ_LOCKED_ASSERT(uc);
KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
if (--pi->pi_refcount == 0) {
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (pi->pi_owner != NULL) {
TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested,
pi, pi_link);
@@ -1984,7 +1984,7 @@ umtx_pi_unref(struct umtx_pi *pi)
}
KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
("blocked queue not empty"));
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
umtx_pi_free(pi);
}
@@ -2238,11 +2238,11 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
umtxq_busy(&key);
count = umtxq_count_pi(&key, &uq_first);
if (uq_first != NULL) {
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
pi = uq_first->uq_pi_blocked;
KASSERT(pi != NULL, ("pi == NULL?"));
if (pi->pi_owner != curthread) {
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
umtxq_unbusy(&key);
umtxq_unlock(&key);
umtx_key_release(&key);
@@ -2268,7 +2268,7 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
thread_lock(curthread);
sched_lend_user_prio(curthread, pri);
thread_unlock(curthread);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
if (uq_first)
umtxq_signal_thread(uq_first);
} else {
@@ -2285,10 +2285,10 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
* umtx_pi, and unlocked the umtxq.
* If the current thread owns it, it must disown it.
*/
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (pi->pi_owner == td)
umtx_pi_disown(pi);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
}
umtxq_unlock(&key);
@@ -2351,9 +2351,9 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
goto out;
}
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
error = EINVAL;
goto out;
}
@@ -2364,7 +2364,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
sched_lend_user_prio(td, uq->uq_inherited_pri);
thread_unlock(td);
}
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
rv = casueword32(&m->m_owner,
UMUTEX_CONTESTED, &owner, id | UMUTEX_CONTESTED);
@@ -2405,7 +2405,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
umtxq_remove(uq);
umtxq_unlock(&uq->uq_key);
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
uq->uq_inherited_pri = old_inherited_pri;
pri = PRI_MAX;
TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
@@ -2420,11 +2420,11 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
thread_lock(td);
sched_lend_user_prio(td, pri);
thread_unlock(td);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
if (error != 0) {
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
uq->uq_inherited_pri = old_inherited_pri;
pri = PRI_MAX;
TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
@@ -2439,7 +2439,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
thread_lock(td);
sched_lend_user_prio(td, pri);
thread_unlock(td);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
out:
@@ -2511,7 +2511,7 @@ do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
if (error == -1)
error = EFAULT;
else {
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
if (su != 0)
uq->uq_inherited_pri = new_inherited_pri;
pri = PRI_MAX;
@@ -2527,7 +2527,7 @@ do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
thread_lock(td);
sched_lend_user_prio(td, pri);
thread_unlock(td);
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
}
umtx_key_release(&key);
return (error);
@@ -4102,13 +4102,13 @@ umtx_thread_cleanup(struct thread *td)
if ((uq = td->td_umtxq) == NULL)
return;
- mtx_lock_spin(&umtx_lock);
+ mtx_lock(&umtx_lock);
uq->uq_inherited_pri = PRI_MAX;
while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
pi->pi_owner = NULL;
TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
}
- mtx_unlock_spin(&umtx_lock);
+ mtx_unlock(&umtx_lock);
thread_lock(td);
sched_lend_user_prio(td, PRI_MAX);
thread_unlock(td);
OpenPOWER on IntegriCloud