summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-04 23:51:44 +0000
committerjeff <jeff@FreeBSD.org>2007-06-04 23:51:44 +0000
commit15c2dd7a1fd67c01c85a5ed9bea3c15a0d5a4d0b (patch)
tree98103ce3dfc26a461168bb27d35de8de7593d97e /sys/kern/kern_mutex.c
parentea7c909871b08e0c77c4e21ea7490715d243d562 (diff)
downloadFreeBSD-src-15c2dd7a1fd67c01c85a5ed9bea3c15a0d5a4d0b.zip
FreeBSD-src-15c2dd7a1fd67c01c85a5ed9bea3c15a0d5a4d0b.tar.gz
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation deadlocks that are possible with thread_lock(). - The turnstile lock order is defined as the exact opposite of the lock order used with the sleep locks they represent. This allows us to walk in reverse order in priority_propagate and this is the only place we wish to multiply acquire turnstile locks. - Use the turnstile_chain lock to protect assigning mutexes to turnstiles. - Change the turnstile interface to pass back turnstile pointers to the consumers. This allows us to reduce some locking and makes it easier to cancel turnstile assignment while the turnstile chain lock is held. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c149
1 files changed, 122 insertions, 27 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index fa498b5..ae4a36b 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -127,6 +127,7 @@ struct lock_class lock_class_mtx_spin = {
/*
* System-wide mutexes
*/
+struct mtx blocked_lock;
struct mtx sched_lock;
struct mtx Giant;
@@ -305,6 +306,7 @@ void
_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
int line)
{
+ struct turnstile *ts;
#ifdef ADAPTIVE_MUTEXES
volatile struct thread *owner;
#endif
@@ -334,7 +336,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
while (!_obtain_lock(m, tid)) {
- turnstile_lock(&m->lock_object);
+ ts = turnstile_trywait(&m->lock_object);
v = m->mtx_lock;
/*
@@ -342,7 +344,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
* the turnstile chain lock.
*/
if (v == MTX_UNOWNED) {
- turnstile_release(&m->lock_object);
+ turnstile_cancel(ts);
cpu_spinwait();
continue;
}
@@ -358,7 +360,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
*/
if (v == MTX_CONTESTED) {
m->mtx_lock = tid | MTX_CONTESTED;
- turnstile_claim(&m->lock_object);
+ turnstile_claim(ts);
break;
}
#endif
@@ -370,7 +372,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
*/
if ((v & MTX_CONTESTED) == 0 &&
!atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
- turnstile_release(&m->lock_object);
+ turnstile_cancel(ts);
cpu_spinwait();
continue;
}
@@ -387,7 +389,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
if (m != &Giant && TD_IS_RUNNING(owner))
#endif
{
- turnstile_release(&m->lock_object);
+ turnstile_cancel(ts);
while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
cpu_spinwait();
}
@@ -414,8 +416,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
/*
* Block on the turnstile.
*/
- turnstile_wait(&m->lock_object, mtx_owner(m),
- TS_EXCLUSIVE_QUEUE);
+ turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
}
#ifdef KTR
if (cont_logged) {
@@ -428,7 +429,25 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
waittime, (file), (line));
}
+static void
+_mtx_lock_spin_failed(struct mtx *m)
+{
+ struct thread *td;
+
+ td = mtx_owner(m);
+
+ /* If the mutex is unlocked, try again. */
+ if (td == NULL)
+ return;
#ifdef SMP
+ printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
+ m, m->lock_object.lo_name, td, td->td_tid);
+#ifdef WITNESS
+ witness_display_spinlock(&m->lock_object, td);
+#endif
+ panic("spin lock held too long");
+}
+
/*
* _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
*
@@ -440,7 +459,6 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
int line)
{
int i = 0, contested = 0;
- struct thread *td;
uint64_t waittime = 0;
if (LOCK_LOG_TEST(&m->lock_object, opts))
@@ -458,20 +476,8 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
}
if (i < 60000000 || kdb_active || panicstr != NULL)
DELAY(1);
- else {
- td = mtx_owner(m);
-
- /* If the mutex is unlocked, try again. */
- if (td == NULL)
- continue;
- printf(
- "spin lock %p (%s) held by %p (tid %d) too long\n",
- m, m->lock_object.lo_name, td, td->td_tid);
-#ifdef WITNESS
- witness_display_spinlock(&m->lock_object, td);
-#endif
- panic("spin lock held too long");
- }
+ else
+ _mtx_lock_spin_failed(m);
cpu_spinwait();
}
spinlock_enter();
@@ -482,10 +488,87 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
lock_profile_obtain_lock_success(&m->lock_object, contested,
waittime, (file), (line));
-
}
#endif /* SMP */
+void
+_thread_lock_flags(struct thread *td, int opts, const char *file, int line)
+{
+ struct mtx *m;
+ uintptr_t tid;
+ int i;
+
+ i = 0;
+ tid = (uintptr_t)curthread;
+ for (;;) {
+retry:
+ spinlock_enter();
+ m = __DEVOLATILE(struct mtx *, td->td_lock);
+ WITNESS_CHECKORDER(&m->lock_object,
+ opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line);
+ while (!_obtain_lock(m, tid)) {
+ if (m->mtx_lock == tid) {
+ m->mtx_recurse++;
+ break;
+ }
+ /* Give interrupts a chance while we spin. */
+ spinlock_exit();
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 10000000)
+ cpu_spinwait();
+ else if (i < 60000000 ||
+ kdb_active || panicstr != NULL)
+ DELAY(1);
+ else
+ _mtx_lock_spin_failed(m);
+ cpu_spinwait();
+ if (m != td->td_lock)
+ goto retry;
+ }
+ spinlock_enter();
+ }
+ if (m == td->td_lock)
+ break;
+ _rel_spin_lock(m); /* does spinlock_exit() */
+ }
+ WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
+}
+
+struct mtx *
+thread_lock_block(struct thread *td)
+{
+ struct mtx *lock;
+
+ spinlock_enter();
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ lock = __DEVOLATILE(struct mtx *, td->td_lock);
+ td->td_lock = &blocked_lock;
+ mtx_unlock_spin(lock);
+
+ return (lock);
+}
+
+void
+thread_lock_unblock(struct thread *td, struct mtx *new)
+{
+ mtx_assert(new, MA_OWNED);
+ MPASS(td->td_lock == &blocked_lock);
+ atomic_store_rel_ptr((void *)&td->td_lock, (uintptr_t)new);
+ spinlock_exit();
+}
+
+void
+thread_lock_set(struct thread *td, struct mtx *new)
+{
+ struct mtx *lock;
+
+ mtx_assert(new, MA_OWNED);
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ lock = __DEVOLATILE(struct mtx *, td->td_lock);
+ td->td_lock = new;
+ mtx_unlock_spin(lock);
+}
+
/*
* _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
*
@@ -508,7 +591,11 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
return;
}
- turnstile_lock(&m->lock_object);
+ /*
+ * We have to lock the chain before the turnstile so this turnstile
+ * can be removed from the hash list if it is empty.
+ */
+ turnstile_chain_lock(&m->lock_object);
ts = turnstile_lookup(&m->lock_object);
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
@@ -518,7 +605,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
_release_lock_quick(m);
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
- turnstile_release(&m->lock_object);
+ turnstile_chain_unlock(&m->lock_object);
return;
}
#else
@@ -543,7 +630,12 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
m);
}
#endif
+ /*
+ * This turnstile is now no longer associated with the mutex. We can
+ * unlock the chain lock so a new turnstile may take it's place.
+ */
turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
+ turnstile_chain_unlock(&m->lock_object);
#ifndef PREEMPTION
/*
@@ -557,7 +649,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
return;
- mtx_lock_spin(&sched_lock);
+ thread_lock(td1);
if (!TD_IS_RUNNING(td1)) {
#ifdef notyet
if (td->td_ithd != NULL) {
@@ -582,7 +674,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
m, (void *)m->mtx_lock);
}
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td1);
#endif
}
@@ -761,7 +853,10 @@ mutex_init(void)
*/
mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
+ mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
+ blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
+ mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
mtx_lock(&Giant);
OpenPOWER on IntegriCloud