summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_condvar.c31
-rw-r--r--sys/kern/kern_mutex.c9
-rw-r--r--sys/kern/kern_synch.c7
-rw-r--r--sys/kern/subr_sleepqueue.c45
-rw-r--r--sys/kern/subr_turnstile.c46
-rw-r--r--sys/sys/sleepqueue.h23
-rw-r--r--sys/sys/turnstile.h33
7 files changed, 117 insertions, 77 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index fc0f799..8ccea3a 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -76,8 +76,9 @@ void
cv_destroy(struct cv *cvp)
{
#ifdef INVARIANTS
- struct sleepqueue *sq;
+ struct sleepqueue *sq;
+ sleepq_lock(cvp);
sq = sleepq_lookup(cvp);
sleepq_release(cvp);
KASSERT(sq == NULL, ("%s: associated sleep queue non-empty", __func__));
@@ -94,7 +95,6 @@ cv_destroy(struct cv *cvp)
void
cv_wait(struct cv *cvp, struct mtx *mp)
{
- struct sleepqueue *sq;
struct thread *td;
WITNESS_SAVE_DECL(mp);
@@ -118,13 +118,13 @@ cv_wait(struct cv *cvp, struct mtx *mp)
return;
}
- sq = sleepq_lookup(cvp);
+ sleepq_lock(cvp);
cvp->cv_waiters++;
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
sleepq_wait(cvp);
#ifdef KTRACE
@@ -145,7 +145,6 @@ cv_wait(struct cv *cvp, struct mtx *mp)
int
cv_wait_sig(struct cv *cvp, struct mtx *mp)
{
- struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int rval, sig;
@@ -172,7 +171,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
return (0);
}
- sq = sleepq_lookup(cvp);
+ sleepq_lock(cvp);
/*
* Don't bother sleeping if we are exiting and not the exiting
@@ -190,7 +189,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
+ sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
SLEEPQ_INTERRUPTIBLE);
sig = sleepq_catch_signals(cvp);
rval = sleepq_wait_sig(cvp);
@@ -216,7 +215,6 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
int
cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
{
- struct sleepqueue *sq;
struct thread *td;
int rval;
WITNESS_SAVE_DECL(mp);
@@ -242,13 +240,13 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
return 0;
}
- sq = sleepq_lookup(cvp);
+ sleepq_lock(cvp);
cvp->cv_waiters++;
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
sleepq_set_timeout(cvp, timo);
rval = sleepq_timedwait(cvp);
@@ -272,7 +270,6 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
int
cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
{
- struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int rval;
@@ -301,7 +298,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
return 0;
}
- sq = sleepq_lookup(cvp);
+ sleepq_lock(cvp);
/*
* Don't bother sleeping if we are exiting and not the exiting
@@ -319,7 +316,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
+ sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
SLEEPQ_INTERRUPTIBLE);
sleepq_set_timeout(cvp, timo);
sig = sleepq_catch_signals(cvp);
@@ -349,10 +346,12 @@ void
cv_signal(struct cv *cvp)
{
+ sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters--;
sleepq_signal(cvp, SLEEPQ_CONDVAR, -1);
- }
+ } else
+ sleepq_release(cvp);
}
/*
@@ -363,8 +362,10 @@ void
cv_broadcastpri(struct cv *cvp, int pri)
{
+ sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters = 0;
sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri);
- }
+ } else
+ sleepq_release(cvp);
}
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 012f304..af14067 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -440,7 +440,6 @@ void
_mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
int line)
{
- struct turnstile *ts;
#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
struct thread *owner;
#endif
@@ -476,7 +475,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
contested = 1;
atomic_add_int(&m->mtx_contest_holding, 1);
#endif
- ts = turnstile_lookup(&m->mtx_object);
+ turnstile_lock(&m->mtx_object);
v = m->mtx_lock;
/*
@@ -499,9 +498,8 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
* necessary.
*/
if (v == MTX_CONTESTED) {
- MPASS(ts != NULL);
m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
- turnstile_claim(ts);
+ turnstile_claim(&m->mtx_object);
break;
}
#endif
@@ -557,7 +555,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
/*
* Block on the turnstile.
*/
- turnstile_wait(ts, &m->mtx_object, mtx_owner(m));
+ turnstile_wait(&m->mtx_object, mtx_owner(m));
}
#ifdef KTR
@@ -645,6 +643,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
return;
}
+ turnstile_lock(&m->mtx_object);
ts = turnstile_lookup(&m->mtx_object);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 52863e3..d67887a 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -122,7 +122,6 @@ msleep(ident, mtx, priority, wmesg, timo)
int priority, timo;
const char *wmesg;
{
- struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int catch, rval, sig, flags;
@@ -165,7 +164,7 @@ msleep(ident, mtx, priority, wmesg, timo)
if (TD_ON_SLEEPQ(td))
sleepq_remove(td, td->td_wchan);
- sq = sleepq_lookup(ident);
+ sleepq_lock(ident);
if (catch) {
/*
* Don't bother sleeping if we are exiting and not the exiting
@@ -201,7 +200,7 @@ msleep(ident, mtx, priority, wmesg, timo)
flags = SLEEPQ_MSLEEP;
if (catch)
flags |= SLEEPQ_INTERRUPTIBLE;
- sleepq_add(sq, ident, mtx, wmesg, flags);
+ sleepq_add(ident, mtx, wmesg, flags);
if (timo)
sleepq_set_timeout(ident, timo);
if (catch) {
@@ -250,6 +249,7 @@ wakeup(ident)
register void *ident;
{
+ sleepq_lock(ident);
sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1);
}
@@ -263,6 +263,7 @@ wakeup_one(ident)
register void *ident;
{
+ sleepq_lock(ident);
sleepq_signal(ident, SLEEPQ_MSLEEP, -1);
}
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index 7e86e22..eeb8859 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -113,8 +113,8 @@ struct sleepqueue {
LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
void *sq_wchan; /* (c) Wait channel. */
- int sq_type; /* (c) Queue type. */
#ifdef INVARIANTS
+ int sq_type; /* (c) Queue type. */
struct mtx *sq_lock; /* (c) Associated lock. */
#endif
};
@@ -208,9 +208,21 @@ sleepq_free(struct sleepqueue *sq)
}
/*
+ * Lock the sleep queue chain associated with the specified wait channel.
+ */
+void
+sleepq_lock(void *wchan)
+{
+ struct sleepqueue_chain *sc;
+
+ sc = SC_LOOKUP(wchan);
+ mtx_lock_spin(&sc->sc_lock);
+}
+
+/*
* Look up the sleep queue associated with a given wait channel in the hash
- * table locking the associated sleep queue chain. Return holdind the sleep
- * queue chain lock. If no queue is found in the table, NULL is returned.
+ * table locking the associated sleep queue chain. If no queue is found in
+ * the table, NULL is returned.
*/
struct sleepqueue *
sleepq_lookup(void *wchan)
@@ -220,7 +232,7 @@ sleepq_lookup(void *wchan)
KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
sc = SC_LOOKUP(wchan);
- mtx_lock_spin(&sc->sc_lock);
+ mtx_assert(&sc->sc_lock, MA_OWNED);
LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
if (sq->sq_wchan == wchan)
return (sq);
@@ -246,10 +258,10 @@ sleepq_release(void *wchan)
* woken up.
*/
void
-sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
- const char *wmesg, int flags)
+sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags)
{
struct sleepqueue_chain *sc;
+ struct sleepqueue *sq;
struct thread *td, *td1;
td = curthread;
@@ -258,7 +270,14 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
MPASS(td->td_sleepqueue != NULL);
MPASS(wchan != NULL);
- /* If the passed in sleep queue is NULL, use this thread's queue. */
+ /* Look up the sleep queue associated with the wait channel 'wchan'. */
+ sq = sleepq_lookup(wchan);
+
+ /*
+ * If the wait channel does not already have a sleep queue, use
+ * this thread's sleep queue. Otherwise, insert the current thread
+ * into the sleep queue already in use by this wait channel.
+ */
if (sq == NULL) {
#ifdef SLEEPQUEUE_PROFILING
sc->sc_depth++;
@@ -278,12 +297,13 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
sq->sq_wchan = wchan;
#ifdef INVARIANTS
sq->sq_lock = lock;
-#endif
sq->sq_type = flags & SLEEPQ_TYPE;
+#endif
TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
} else {
MPASS(wchan == sq->sq_wchan);
MPASS(lock == sq->sq_lock);
+ MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq)
if (td1->td_priority > td->td_priority)
break;
@@ -368,6 +388,7 @@ sleepq_catch_signals(void *wchan)
* thread was removed from the sleep queue while we were blocked
* above, then clear TDF_SINTR before returning.
*/
+ sleepq_lock(wchan);
sq = sleepq_lookup(wchan);
mtx_lock_spin(&sched_lock);
if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
@@ -665,9 +686,6 @@ sleepq_signal(void *wchan, int flags, int pri)
}
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
- /* XXX: Do for all sleep queues eventually. */
- if (flags & SLEEPQ_CONDVAR)
- mtx_assert(sq->sq_lock, MA_OWNED);
/* Remove first thread from queue and awaken it. */
td = TAILQ_FIRST(&sq->sq_blocked);
@@ -695,9 +713,6 @@ sleepq_broadcast(void *wchan, int flags, int pri)
}
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
- /* XXX: Do for all sleep queues eventually. */
- if (flags & SLEEPQ_CONDVAR)
- mtx_assert(sq->sq_lock, MA_OWNED);
/* Move blocked threads from the sleep queue to a temporary list. */
TAILQ_INIT(&list);
@@ -739,6 +754,7 @@ sleepq_timeout(void *arg)
if (TD_ON_SLEEPQ(td)) {
wchan = td->td_wchan;
mtx_unlock_spin(&sched_lock);
+ sleepq_lock(wchan);
sq = sleepq_lookup(wchan);
mtx_lock_spin(&sched_lock);
} else {
@@ -802,6 +818,7 @@ sleepq_remove(struct thread *td, void *wchan)
* bail.
*/
MPASS(wchan != NULL);
+ sleepq_lock(wchan);
sq = sleepq_lookup(wchan);
mtx_lock_spin(&sched_lock);
if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 980a517..3bb6e94 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -397,9 +397,21 @@ turnstile_free(struct turnstile *ts)
}
/*
+ * Lock the turnstile chain associated with the specified lock.
+ */
+void
+turnstile_lock(struct lock_object *lock)
+{
+ struct turnstile_chain *tc;
+
+ tc = TC_LOOKUP(lock);
+ mtx_lock_spin(&tc->tc_lock);
+}
+
+/*
* Look up the turnstile for a lock in the hash table locking the associated
- * turnstile chain along the way. Return with the turnstile chain locked.
- * If no turnstile is found in the hash table, NULL is returned.
+ * turnstile chain along the way. If no turnstile is found in the hash
+ * table, NULL is returned.
*/
struct turnstile *
turnstile_lookup(struct lock_object *lock)
@@ -408,7 +420,7 @@ turnstile_lookup(struct lock_object *lock)
struct turnstile *ts;
tc = TC_LOOKUP(lock);
- mtx_lock_spin(&tc->tc_lock);
+ mtx_assert(&tc->tc_lock, MA_OWNED);
LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
if (ts->ts_lockobj == lock)
return (ts);
@@ -432,13 +444,16 @@ turnstile_release(struct lock_object *lock)
* owner appropriately.
*/
void
-turnstile_claim(struct turnstile *ts)
+turnstile_claim(struct lock_object *lock)
{
struct turnstile_chain *tc;
+ struct turnstile *ts;
struct thread *td, *owner;
- tc = TC_LOOKUP(ts->ts_lockobj);
+ tc = TC_LOOKUP(lock);
mtx_assert(&tc->tc_lock, MA_OWNED);
+ ts = turnstile_lookup(lock);
+ MPASS(ts != NULL);
owner = curthread;
mtx_lock_spin(&td_contested_lock);
@@ -460,16 +475,16 @@ turnstile_claim(struct turnstile *ts)
}
/*
- * Block the current thread on the turnstile ts. This function will context
- * switch and not return until this thread has been woken back up. This
- * function must be called with the appropriate turnstile chain locked and
- * will return with it unlocked.
+ * Block the current thread on the turnstile assicated with 'lock'. This
+ * function will context switch and not return until this thread has been
+ * woken back up. This function must be called with the appropriate
+ * turnstile chain locked and will return with it unlocked.
*/
void
-turnstile_wait(struct turnstile *ts, struct lock_object *lock,
- struct thread *owner)
+turnstile_wait(struct lock_object *lock, struct thread *owner)
{
struct turnstile_chain *tc;
+ struct turnstile *ts;
struct thread *td, *td1;
td = curthread;
@@ -479,7 +494,14 @@ turnstile_wait(struct turnstile *ts, struct lock_object *lock,
MPASS(owner != NULL);
MPASS(owner->td_proc->p_magic == P_MAGIC);
- /* If the passed in turnstile is NULL, use this thread's turnstile. */
+ /* Look up the turnstile associated with the lock 'lock'. */
+ ts = turnstile_lookup(lock);
+
+ /*
+ * If the lock does not already have a turnstile, use this thread's
+ * turnstile. Otherwise insert the current thread into the
+ * turnstile already in use by this lock.
+ */
if (ts == NULL) {
#ifdef TURNSTILE_PROFILING
tc->tc_depth++;
diff --git a/sys/sys/sleepqueue.h b/sys/sys/sleepqueue.h
index 3314ddc..de1a85f 100644
--- a/sys/sys/sleepqueue.h
+++ b/sys/sys/sleepqueue.h
@@ -36,15 +36,13 @@
* Sleep queue interface. Sleep/wakeup and condition variables use a sleep
* queue for the queue of threads blocked on a sleep channel.
*
- * A thread calls sleepq_lookup() to look up the proper sleep queue in the
- * hash table that is associated with a specified wait channel. This
- * function returns a pointer to the queue and locks the associated sleep
- * queue chain. A thread calls sleepq_add() to add themself onto a sleep
- * queue and calls one of the sleepq_wait() functions to actually go to
- * sleep. If a thread needs to abort a sleep operation it should call
- * sleepq_release() to unlock the associated sleep queue chain lock. If
- * the thread also needs to remove itself from a queue it just enqueued
- * itself on, it can use sleepq_remove().
+ * A thread calls sleepq_lock() to lock the sleep queue chain associated
+ * with a given wait channel. A thread can then call call sleepq_add() to
+ * add themself onto a sleep queue and call one of the sleepq_wait()
+ * functions to actually go to sleep. If a thread needs to abort a sleep
+ * operation it should call sleepq_release() to unlock the associated sleep
+ * queue chain lock. If the thread also needs to remove itself from a queue
+ * it just enqueued itself on, it can use sleepq_remove() instead.
*
* If the thread only wishes to sleep for a limited amount of time, it can
* call sleepq_set_timeout() after sleepq_add() to setup a timeout. It
@@ -64,7 +62,8 @@
* on the specified wait channel. A thread sleeping in an interruptible
* sleep can be interrupted by calling sleepq_abort(). A thread can also
* be removed from a specified sleep queue using the sleepq_remove()
- * function.
+ * function. Note that the sleep queue chain must first be locked via
+ * sleepq_lock() when calling sleepq_signal() and sleepq_broadcast().
*
* Each thread allocates a sleep queue at thread creation via sleepq_alloc()
* and releases it at thread destruction via sleepq_free(). Note that
@@ -89,13 +88,13 @@ struct thread;
void init_sleepqueues(void);
void sleepq_abort(struct thread *td);
-void sleepq_add(struct sleepqueue *, void *, struct mtx *, const char *,
- int);
+void sleepq_add(void *, struct mtx *, const char *, int);
struct sleepqueue *sleepq_alloc(void);
void sleepq_broadcast(void *, int, int);
int sleepq_calc_signal_retval(int sig);
int sleepq_catch_signals(void *wchan);
void sleepq_free(struct sleepqueue *);
+void sleepq_lock(void *);
struct sleepqueue *sleepq_lookup(void *);
void sleepq_release(void *);
void sleepq_remove(struct thread *, void *);
diff --git a/sys/sys/turnstile.h b/sys/sys/turnstile.h
index 68b7e79..64d3bd3 100644
--- a/sys/sys/turnstile.h
+++ b/sys/sys/turnstile.h
@@ -36,20 +36,21 @@
* Turnstile interface. Non-sleepable locks use a turnstile for the
* queue of threads blocked on them when they are contested.
*
- * A thread calls turnstile_lookup() to look up the proper turnstile in
- * the hash table. This function returns a pointer to the turnstile and
- * locks the associated turnstile chain. A thread calls turnstile_wait()
- * when the lock is contested to be put on the queue and block. If a
- * thread needs to retry a lock operation instead of blocking, it should
- * call turnstile_release() to unlock the associated turnstile chain lock.
+ * A thread calls turnstile_lock() to lock the turnstile chain associated
+ * with a given lock. A thread calls turnstile_wait() when the lock is
+ * contested to be put on the queue and block. If a thread needs to retry
+ * a lock operation instead of blocking, it should call turnstile_release()
+ * to unlock the associated turnstile chain lock.
*
- * When a lock is released, either turnstile_signal() or turnstile_broadcast()
- * is called to mark blocked threads for a pending wakeup.
- * turnstile_signal() marks the highest priority blocked thread while
- * turnstile_broadcast() marks all blocked threads. The turnstile_signal()
- * function returns true if the turnstile became empty as a result. After
- * the higher level code finishes releasing the lock, turnstile_unpend()
- * must be called to wakeup the pending thread(s).
+ * When a lock is released, the thread calls turnstile_lookup() to loop
+ * up the turnstile associated with the given lock in the hash table. Then
+ * it calls either turnstile_signal() or turnstile_broadcast() to mark
+ * blocked threads for a pending wakeup. turnstile_signal() marks the
+ * highest priority blocked thread while turnstile_broadcast() marks all
+ * blocked threads. The turnstile_signal() function returns true if the
+ * turnstile became empty as a result. After the higher level code finishes
+ * releasing the lock, turnstile_unpend() must be called to wake up the
+ * pending thread(s).
*
* When a lock is acquired that already has at least one thread contested
* on it, the new owner of the lock must claim ownership of the turnstile
@@ -75,16 +76,16 @@ struct turnstile;
void init_turnstiles(void);
struct turnstile *turnstile_alloc(void);
void turnstile_broadcast(struct turnstile *);
-void turnstile_claim(struct turnstile *);
+void turnstile_claim(struct lock_object *);
int turnstile_empty(struct turnstile *);
void turnstile_free(struct turnstile *);
struct thread *turnstile_head(struct turnstile *);
+void turnstile_lock(struct lock_object *);
struct turnstile *turnstile_lookup(struct lock_object *);
void turnstile_release(struct lock_object *);
int turnstile_signal(struct turnstile *);
void turnstile_unpend(struct turnstile *);
-void turnstile_wait(struct turnstile *, struct lock_object *,
- struct thread *);
+void turnstile_wait(struct lock_object *, struct thread *);
#endif /* _KERNEL */
#endif /* _SYS_TURNSTILE_H_ */
OpenPOWER on IntegriCloud