From a8c1c80ef5a33d82995b1cd3c3100a4b0af157b5 Mon Sep 17 00:00:00 2001 From: jhb Date: Tue, 12 Oct 2004 18:36:20 +0000 Subject: Refine the turnstile and sleep queue interfaces just a bit: - Add a new _lock() call to each API that locks the associated chain lock for a lock_object pointer or wait channel. The _lookup() functions now require that the chain lock be locked via _lock() when they are called. - Change sleepq_add(), turnstile_wait() and turnstile_claim() to lookup the associated queue structure internally via _lookup() rather than accepting a pointer from the caller. For turnstiles, this means that the actual lookup of the turnstile in the hash table is only done when the thread actually blocks rather than being done on each loop iteration in _mtx_lock_sleep(). For sleep queues, this means that sleepq_lookup() is no longer used outside of the sleep queue code except to implement an assertion in cv_destroy(). - Change sleepq_broadcast() and sleepq_signal() to require that the chain lock is already required. For condition variables, this lets the cv_broadcast() and cv_signal() functions lock the sleep queue chain lock while testing the waiters count. This means that the waiters count internal to condition variables is no longer protected by the interlock mutex and cv_broadcast() and cv_signal() now no longer require that the interlock be held when they are called. This lets consumers of condition variables drop the lock before waking other threads which can result in fewer context switches. MFC after: 1 month --- sys/kern/kern_condvar.c | 31 ++++++++++++++++--------------- sys/kern/kern_mutex.c | 9 ++++----- sys/kern/kern_synch.c | 7 ++++--- sys/kern/subr_sleepqueue.c | 45 +++++++++++++++++++++++++++++++-------------- sys/kern/subr_turnstile.c | 46 ++++++++++++++++++++++++++++++++++------------ 5 files changed, 89 insertions(+), 49 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c index fc0f799..8ccea3a 100644 --- a/sys/kern/kern_condvar.c +++ b/sys/kern/kern_condvar.c @@ -76,8 +76,9 @@ void cv_destroy(struct cv *cvp) { #ifdef INVARIANTS - struct sleepqueue *sq; + struct sleepqueue *sq; + sleepq_lock(cvp); sq = sleepq_lookup(cvp); sleepq_release(cvp); KASSERT(sq == NULL, ("%s: associated sleep queue non-empty", __func__)); @@ -94,7 +95,6 @@ cv_destroy(struct cv *cvp) void cv_wait(struct cv *cvp, struct mtx *mp) { - struct sleepqueue *sq; struct thread *td; WITNESS_SAVE_DECL(mp); @@ -118,13 +118,13 @@ cv_wait(struct cv *cvp, struct mtx *mp) return; } - sq = sleepq_lookup(cvp); + sleepq_lock(cvp); cvp->cv_waiters++; DROP_GIANT(); mtx_unlock(mp); - sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR); + sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR); sleepq_wait(cvp); #ifdef KTRACE @@ -145,7 +145,6 @@ cv_wait(struct cv *cvp, struct mtx *mp) int cv_wait_sig(struct cv *cvp, struct mtx *mp) { - struct sleepqueue *sq; struct thread *td; struct proc *p; int rval, sig; @@ -172,7 +171,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp) return (0); } - sq = sleepq_lookup(cvp); + sleepq_lock(cvp); /* * Don't bother sleeping if we are exiting and not the exiting @@ -190,7 +189,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp) DROP_GIANT(); mtx_unlock(mp); - sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR | + sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR | SLEEPQ_INTERRUPTIBLE); sig = sleepq_catch_signals(cvp); rval = sleepq_wait_sig(cvp); @@ -216,7 +215,6 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp) int cv_timedwait(struct cv *cvp, struct mtx *mp, int timo) { - struct sleepqueue *sq; struct thread *td; int rval; WITNESS_SAVE_DECL(mp); @@ -242,13 +240,13 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo) return 0; } - sq = sleepq_lookup(cvp); + sleepq_lock(cvp); cvp->cv_waiters++; DROP_GIANT(); mtx_unlock(mp); - sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR); + sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR); sleepq_set_timeout(cvp, timo); rval = sleepq_timedwait(cvp); @@ -272,7 +270,6 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo) int cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo) { - struct sleepqueue *sq; struct thread *td; struct proc *p; int rval; @@ -301,7 +298,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo) return 0; } - sq = sleepq_lookup(cvp); + sleepq_lock(cvp); /* * Don't bother sleeping if we are exiting and not the exiting @@ -319,7 +316,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo) DROP_GIANT(); mtx_unlock(mp); - sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR | + sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR | SLEEPQ_INTERRUPTIBLE); sleepq_set_timeout(cvp, timo); sig = sleepq_catch_signals(cvp); @@ -349,10 +346,12 @@ void cv_signal(struct cv *cvp) { + sleepq_lock(cvp); if (cvp->cv_waiters > 0) { cvp->cv_waiters--; sleepq_signal(cvp, SLEEPQ_CONDVAR, -1); - } + } else + sleepq_release(cvp); } /* @@ -363,8 +362,10 @@ void cv_broadcastpri(struct cv *cvp, int pri) { + sleepq_lock(cvp); if (cvp->cv_waiters > 0) { cvp->cv_waiters = 0; sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri); - } + } else + sleepq_release(cvp); } diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 012f304..af14067 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -440,7 +440,6 @@ void _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file, int line) { - struct turnstile *ts; #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) struct thread *owner; #endif @@ -476,7 +475,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file, contested = 1; atomic_add_int(&m->mtx_contest_holding, 1); #endif - ts = turnstile_lookup(&m->mtx_object); + turnstile_lock(&m->mtx_object); v = m->mtx_lock; /* @@ -499,9 +498,8 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file, * necessary. */ if (v == MTX_CONTESTED) { - MPASS(ts != NULL); m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; - turnstile_claim(ts); + turnstile_claim(&m->mtx_object); break; } #endif @@ -557,7 +555,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file, /* * Block on the turnstile. */ - turnstile_wait(ts, &m->mtx_object, mtx_owner(m)); + turnstile_wait(&m->mtx_object, mtx_owner(m)); } #ifdef KTR @@ -645,6 +643,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) return; } + turnstile_lock(&m->mtx_object); ts = turnstile_lookup(&m->mtx_object); if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 52863e3..d67887a 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -122,7 +122,6 @@ msleep(ident, mtx, priority, wmesg, timo) int priority, timo; const char *wmesg; { - struct sleepqueue *sq; struct thread *td; struct proc *p; int catch, rval, sig, flags; @@ -165,7 +164,7 @@ msleep(ident, mtx, priority, wmesg, timo) if (TD_ON_SLEEPQ(td)) sleepq_remove(td, td->td_wchan); - sq = sleepq_lookup(ident); + sleepq_lock(ident); if (catch) { /* * Don't bother sleeping if we are exiting and not the exiting @@ -201,7 +200,7 @@ msleep(ident, mtx, priority, wmesg, timo) flags = SLEEPQ_MSLEEP; if (catch) flags |= SLEEPQ_INTERRUPTIBLE; - sleepq_add(sq, ident, mtx, wmesg, flags); + sleepq_add(ident, mtx, wmesg, flags); if (timo) sleepq_set_timeout(ident, timo); if (catch) { @@ -250,6 +249,7 @@ wakeup(ident) register void *ident; { + sleepq_lock(ident); sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1); } @@ -263,6 +263,7 @@ wakeup_one(ident) register void *ident; { + sleepq_lock(ident); sleepq_signal(ident, SLEEPQ_MSLEEP, -1); } diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c index 7e86e22..eeb8859 100644 --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -113,8 +113,8 @@ struct sleepqueue { LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ void *sq_wchan; /* (c) Wait channel. */ - int sq_type; /* (c) Queue type. */ #ifdef INVARIANTS + int sq_type; /* (c) Queue type. */ struct mtx *sq_lock; /* (c) Associated lock. */ #endif }; @@ -208,9 +208,21 @@ sleepq_free(struct sleepqueue *sq) } /* + * Lock the sleep queue chain associated with the specified wait channel. + */ +void +sleepq_lock(void *wchan) +{ + struct sleepqueue_chain *sc; + + sc = SC_LOOKUP(wchan); + mtx_lock_spin(&sc->sc_lock); +} + +/* * Look up the sleep queue associated with a given wait channel in the hash - * table locking the associated sleep queue chain. Return holdind the sleep - * queue chain lock. If no queue is found in the table, NULL is returned. + * table locking the associated sleep queue chain. If no queue is found in + * the table, NULL is returned. */ struct sleepqueue * sleepq_lookup(void *wchan) @@ -220,7 +232,7 @@ sleepq_lookup(void *wchan) KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); sc = SC_LOOKUP(wchan); - mtx_lock_spin(&sc->sc_lock); + mtx_assert(&sc->sc_lock, MA_OWNED); LIST_FOREACH(sq, &sc->sc_queues, sq_hash) if (sq->sq_wchan == wchan) return (sq); @@ -246,10 +258,10 @@ sleepq_release(void *wchan) * woken up. */ void -sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock, - const char *wmesg, int flags) +sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags) { struct sleepqueue_chain *sc; + struct sleepqueue *sq; struct thread *td, *td1; td = curthread; @@ -258,7 +270,14 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock, MPASS(td->td_sleepqueue != NULL); MPASS(wchan != NULL); - /* If the passed in sleep queue is NULL, use this thread's queue. */ + /* Look up the sleep queue associated with the wait channel 'wchan'. */ + sq = sleepq_lookup(wchan); + + /* + * If the wait channel does not already have a sleep queue, use + * this thread's sleep queue. Otherwise, insert the current thread + * into the sleep queue already in use by this wait channel. + */ if (sq == NULL) { #ifdef SLEEPQUEUE_PROFILING sc->sc_depth++; @@ -278,12 +297,13 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock, sq->sq_wchan = wchan; #ifdef INVARIANTS sq->sq_lock = lock; -#endif sq->sq_type = flags & SLEEPQ_TYPE; +#endif TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); } else { MPASS(wchan == sq->sq_wchan); MPASS(lock == sq->sq_lock); + MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq) if (td1->td_priority > td->td_priority) break; @@ -368,6 +388,7 @@ sleepq_catch_signals(void *wchan) * thread was removed from the sleep queue while we were blocked * above, then clear TDF_SINTR before returning. */ + sleepq_lock(wchan); sq = sleepq_lookup(wchan); mtx_lock_spin(&sched_lock); if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) { @@ -665,9 +686,6 @@ sleepq_signal(void *wchan, int flags, int pri) } KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), ("%s: mismatch between sleep/wakeup and cv_*", __func__)); - /* XXX: Do for all sleep queues eventually. */ - if (flags & SLEEPQ_CONDVAR) - mtx_assert(sq->sq_lock, MA_OWNED); /* Remove first thread from queue and awaken it. */ td = TAILQ_FIRST(&sq->sq_blocked); @@ -695,9 +713,6 @@ sleepq_broadcast(void *wchan, int flags, int pri) } KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), ("%s: mismatch between sleep/wakeup and cv_*", __func__)); - /* XXX: Do for all sleep queues eventually. */ - if (flags & SLEEPQ_CONDVAR) - mtx_assert(sq->sq_lock, MA_OWNED); /* Move blocked threads from the sleep queue to a temporary list. */ TAILQ_INIT(&list); @@ -739,6 +754,7 @@ sleepq_timeout(void *arg) if (TD_ON_SLEEPQ(td)) { wchan = td->td_wchan; mtx_unlock_spin(&sched_lock); + sleepq_lock(wchan); sq = sleepq_lookup(wchan); mtx_lock_spin(&sched_lock); } else { @@ -802,6 +818,7 @@ sleepq_remove(struct thread *td, void *wchan) * bail. */ MPASS(wchan != NULL); + sleepq_lock(wchan); sq = sleepq_lookup(wchan); mtx_lock_spin(&sched_lock); if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index 980a517..3bb6e94 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -397,9 +397,21 @@ turnstile_free(struct turnstile *ts) } /* + * Lock the turnstile chain associated with the specified lock. + */ +void +turnstile_lock(struct lock_object *lock) +{ + struct turnstile_chain *tc; + + tc = TC_LOOKUP(lock); + mtx_lock_spin(&tc->tc_lock); +} + +/* * Look up the turnstile for a lock in the hash table locking the associated - * turnstile chain along the way. Return with the turnstile chain locked. - * If no turnstile is found in the hash table, NULL is returned. + * turnstile chain along the way. If no turnstile is found in the hash + * table, NULL is returned. */ struct turnstile * turnstile_lookup(struct lock_object *lock) @@ -408,7 +420,7 @@ turnstile_lookup(struct lock_object *lock) struct turnstile *ts; tc = TC_LOOKUP(lock); - mtx_lock_spin(&tc->tc_lock); + mtx_assert(&tc->tc_lock, MA_OWNED); LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) if (ts->ts_lockobj == lock) return (ts); @@ -432,13 +444,16 @@ turnstile_release(struct lock_object *lock) * owner appropriately. */ void -turnstile_claim(struct turnstile *ts) +turnstile_claim(struct lock_object *lock) { struct turnstile_chain *tc; + struct turnstile *ts; struct thread *td, *owner; - tc = TC_LOOKUP(ts->ts_lockobj); + tc = TC_LOOKUP(lock); mtx_assert(&tc->tc_lock, MA_OWNED); + ts = turnstile_lookup(lock); + MPASS(ts != NULL); owner = curthread; mtx_lock_spin(&td_contested_lock); @@ -460,16 +475,16 @@ turnstile_claim(struct turnstile *ts) } /* - * Block the current thread on the turnstile ts. This function will context - * switch and not return until this thread has been woken back up. This - * function must be called with the appropriate turnstile chain locked and - * will return with it unlocked. + * Block the current thread on the turnstile assicated with 'lock'. This + * function will context switch and not return until this thread has been + * woken back up. This function must be called with the appropriate + * turnstile chain locked and will return with it unlocked. */ void -turnstile_wait(struct turnstile *ts, struct lock_object *lock, - struct thread *owner) +turnstile_wait(struct lock_object *lock, struct thread *owner) { struct turnstile_chain *tc; + struct turnstile *ts; struct thread *td, *td1; td = curthread; @@ -479,7 +494,14 @@ turnstile_wait(struct turnstile *ts, struct lock_object *lock, MPASS(owner != NULL); MPASS(owner->td_proc->p_magic == P_MAGIC); - /* If the passed in turnstile is NULL, use this thread's turnstile. */ + /* Look up the turnstile associated with the lock 'lock'. */ + ts = turnstile_lookup(lock); + + /* + * If the lock does not already have a turnstile, use this thread's + * turnstile. Otherwise insert the current thread into the + * turnstile already in use by this lock. + */ if (ts == NULL) { #ifdef TURNSTILE_PROFILING tc->tc_depth++; -- cgit v1.1