summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_turnstile.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-10-12 18:36:20 +0000
committerjhb <jhb@FreeBSD.org>2004-10-12 18:36:20 +0000
commita8c1c80ef5a33d82995b1cd3c3100a4b0af157b5 (patch)
tree81c7d98eb60f7036658dff8a8da6c4a5f54833aa /sys/kern/subr_turnstile.c
parent30fc565c2db8d56e60ce56f26d783cf95ddad314 (diff)
downloadFreeBSD-src-a8c1c80ef5a33d82995b1cd3c3100a4b0af157b5.zip
FreeBSD-src-a8c1c80ef5a33d82995b1cd3c3100a4b0af157b5.tar.gz
Refine the turnstile and sleep queue interfaces just a bit:
- Add a new _lock() call to each API that locks the associated chain lock for a lock_object pointer or wait channel. The _lookup() functions now require that the chain lock be locked via _lock() when they are called. - Change sleepq_add(), turnstile_wait() and turnstile_claim() to lookup the associated queue structure internally via _lookup() rather than accepting a pointer from the caller. For turnstiles, this means that the actual lookup of the turnstile in the hash table is only done when the thread actually blocks rather than being done on each loop iteration in _mtx_lock_sleep(). For sleep queues, this means that sleepq_lookup() is no longer used outside of the sleep queue code except to implement an assertion in cv_destroy(). - Change sleepq_broadcast() and sleepq_signal() to require that the chain lock is already required. For condition variables, this lets the cv_broadcast() and cv_signal() functions lock the sleep queue chain lock while testing the waiters count. This means that the waiters count internal to condition variables is no longer protected by the interlock mutex and cv_broadcast() and cv_signal() now no longer require that the interlock be held when they are called. This lets consumers of condition variables drop the lock before waking other threads which can result in fewer context switches. MFC after: 1 month
Diffstat (limited to 'sys/kern/subr_turnstile.c')
-rw-r--r--sys/kern/subr_turnstile.c46
1 files changed, 34 insertions, 12 deletions
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 980a517..3bb6e94 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -397,9 +397,21 @@ turnstile_free(struct turnstile *ts)
}
/*
+ * Lock the turnstile chain associated with the specified lock.
+ */
+void
+turnstile_lock(struct lock_object *lock)
+{
+ struct turnstile_chain *tc;
+
+ tc = TC_LOOKUP(lock);
+ mtx_lock_spin(&tc->tc_lock);
+}
+
+/*
* Look up the turnstile for a lock in the hash table locking the associated
- * turnstile chain along the way. Return with the turnstile chain locked.
- * If no turnstile is found in the hash table, NULL is returned.
+ * turnstile chain along the way. If no turnstile is found in the hash
+ * table, NULL is returned.
*/
struct turnstile *
turnstile_lookup(struct lock_object *lock)
@@ -408,7 +420,7 @@ turnstile_lookup(struct lock_object *lock)
struct turnstile *ts;
tc = TC_LOOKUP(lock);
- mtx_lock_spin(&tc->tc_lock);
+ mtx_assert(&tc->tc_lock, MA_OWNED);
LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
if (ts->ts_lockobj == lock)
return (ts);
@@ -432,13 +444,16 @@ turnstile_release(struct lock_object *lock)
* owner appropriately.
*/
void
-turnstile_claim(struct turnstile *ts)
+turnstile_claim(struct lock_object *lock)
{
struct turnstile_chain *tc;
+ struct turnstile *ts;
struct thread *td, *owner;
- tc = TC_LOOKUP(ts->ts_lockobj);
+ tc = TC_LOOKUP(lock);
mtx_assert(&tc->tc_lock, MA_OWNED);
+ ts = turnstile_lookup(lock);
+ MPASS(ts != NULL);
owner = curthread;
mtx_lock_spin(&td_contested_lock);
@@ -460,16 +475,16 @@ turnstile_claim(struct turnstile *ts)
}
/*
- * Block the current thread on the turnstile ts. This function will context
- * switch and not return until this thread has been woken back up. This
- * function must be called with the appropriate turnstile chain locked and
- * will return with it unlocked.
+ * Block the current thread on the turnstile assicated with 'lock'. This
+ * function will context switch and not return until this thread has been
+ * woken back up. This function must be called with the appropriate
+ * turnstile chain locked and will return with it unlocked.
*/
void
-turnstile_wait(struct turnstile *ts, struct lock_object *lock,
- struct thread *owner)
+turnstile_wait(struct lock_object *lock, struct thread *owner)
{
struct turnstile_chain *tc;
+ struct turnstile *ts;
struct thread *td, *td1;
td = curthread;
@@ -479,7 +494,14 @@ turnstile_wait(struct turnstile *ts, struct lock_object *lock,
MPASS(owner != NULL);
MPASS(owner->td_proc->p_magic == P_MAGIC);
- /* If the passed in turnstile is NULL, use this thread's turnstile. */
+ /* Look up the turnstile associated with the lock 'lock'. */
+ ts = turnstile_lookup(lock);
+
+ /*
+ * If the lock does not already have a turnstile, use this thread's
+ * turnstile. Otherwise insert the current thread into the
+ * turnstile already in use by this lock.
+ */
if (ts == NULL) {
#ifdef TURNSTILE_PROFILING
tc->tc_depth++;
OpenPOWER on IntegriCloud