summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_condvar.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_condvar.c')
-rw-r--r--sys/kern/kern_condvar.c341
1 files changed, 46 insertions, 295 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 580334a..0816213 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <sys/condvar.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
+#include <sys/sleepqueue.h>
#include <sys/resourcevar.h>
#ifdef KTRACE
#include <sys/uio.h>
@@ -56,35 +57,6 @@ __FBSDID("$FreeBSD$");
mtx_assert((mp), MA_OWNED | MA_NOTRECURSED); \
} while (0)
-#ifdef INVARIANTS
-#define CV_WAIT_VALIDATE(cvp, mp) do { \
- if (TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
- /* Only waiter. */ \
- (cvp)->cv_mtx = (mp); \
- } else { \
- /* \
- * Other waiter; assert that we're using the \
- * same mutex. \
- */ \
- KASSERT((cvp)->cv_mtx == (mp), \
- ("%s: Multiple mutexes", __func__)); \
- } \
-} while (0)
-
-#define CV_SIGNAL_VALIDATE(cvp) do { \
- if (!TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
- KASSERT(mtx_owned((cvp)->cv_mtx), \
- ("%s: Mutex not owned", __func__)); \
- } \
-} while (0)
-
-#else
-#define CV_WAIT_VALIDATE(cvp, mp)
-#define CV_SIGNAL_VALIDATE(cvp)
-#endif
-
-static void cv_timedwait_end(void *arg);
-
/*
* Initialize a condition variable. Must be called before use.
*/
@@ -92,8 +64,6 @@ void
cv_init(struct cv *cvp, const char *desc)
{
- TAILQ_INIT(&cvp->cv_waitq);
- cvp->cv_mtx = NULL;
cvp->cv_description = desc;
}
@@ -104,82 +74,13 @@ cv_init(struct cv *cvp, const char *desc)
void
cv_destroy(struct cv *cvp)
{
+#ifdef INVARIANTS
+ struct sleepqueue *sq;
- KASSERT(cv_waitq_empty(cvp), ("%s: cv_waitq non-empty", __func__));
-}
-
-/*
- * Common code for cv_wait* functions. All require sched_lock.
- */
-
-/*
- * Switch context.
- */
-static __inline void
-cv_switch(struct thread *td)
-{
- TD_SET_SLEEPING(td);
- mi_switch(SW_VOL);
- CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)", td,
- td->td_proc->p_pid, td->td_proc->p_comm);
-}
-
-/*
- * Switch context, catching signals.
- */
-static __inline int
-cv_switch_catch(struct thread *td)
-{
- struct proc *p;
- int sig;
-
- /*
- * We put ourselves on the sleep queue and start our timeout before
- * calling cursig, as we could stop there, and a wakeup or a SIGCONT (or
- * both) could occur while we were stopped. A SIGCONT would cause us to
- * be marked as TDS_SLP without resuming us, thus we must be ready for
- * sleep when cursig is called. If the wakeup happens while we're
- * stopped, td->td_wchan will be 0 upon return from cursig,
- * and TD_ON_SLEEPQ() will return false.
- */
- td->td_flags |= TDF_SINTR;
- mtx_unlock_spin(&sched_lock);
- p = td->td_proc;
- PROC_LOCK(p);
- mtx_lock(&p->p_sigacts->ps_mtx);
- sig = cursig(td);
- mtx_unlock(&p->p_sigacts->ps_mtx);
- if (thread_suspend_check(1))
- sig = SIGSTOP;
- mtx_lock_spin(&sched_lock);
- PROC_UNLOCK(p);
- if (sig != 0) {
- if (TD_ON_SLEEPQ(td))
- cv_waitq_remove(td);
- TD_SET_RUNNING(td);
- } else if (TD_ON_SLEEPQ(td)) {
- cv_switch(td);
- }
- td->td_flags &= ~TDF_SINTR;
-
- return sig;
-}
-
-/*
- * Add a thread to the wait queue of a condition variable.
- */
-static __inline void
-cv_waitq_add(struct cv *cvp, struct thread *td)
-{
-
- td->td_flags |= TDF_CVWAITQ;
- TD_SET_ON_SLEEPQ(td);
- td->td_wchan = cvp;
- td->td_wmesg = cvp->cv_description;
- CTR3(KTR_PROC, "cv_waitq_add: thread %p (pid %d, %s)", td,
- td->td_proc->p_pid, td->td_proc->p_comm);
- TAILQ_INSERT_TAIL(&cvp->cv_waitq, td, td_slpq);
- sched_sleep(td, td->td_priority);
+ sq = sleepq_lookup(cvp);
+ sleepq_release(cvp);
+ KASSERT(sq == NULL, ("%s: associated sleep queue non-empty", __func__));
+#endif
}
/*
@@ -192,6 +93,7 @@ cv_waitq_add(struct cv *cvp, struct thread *td)
void
cv_wait(struct cv *cvp, struct mtx *mp)
{
+ struct sleepqueue *sq;
struct thread *td;
WITNESS_SAVE_DECL(mp);
@@ -205,7 +107,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
"Waiting on \"%s\"", cvp->cv_description);
WITNESS_SAVE(&mp->mtx_object, mp);
- if (cold ) {
+ if (cold || panicstr) {
/*
* During autoconfiguration, just give interrupts
* a chance, then just return. Don't run any other
@@ -215,17 +117,14 @@ cv_wait(struct cv *cvp, struct mtx *mp)
return;
}
- mtx_lock_spin(&sched_lock);
-
- CV_WAIT_VALIDATE(cvp, mp);
+ sq = sleepq_lookup(cvp);
DROP_GIANT();
mtx_unlock(mp);
- cv_waitq_add(cvp, td);
- cv_switch(td);
+ sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_wait(cvp);
- mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
@@ -244,10 +143,10 @@ cv_wait(struct cv *cvp, struct mtx *mp)
int
cv_wait_sig(struct cv *cvp, struct mtx *mp)
{
+ struct sleepqueue *sq;
struct thread *td;
struct proc *p;
- int rval;
- int sig;
+ int rval, sig;
WITNESS_SAVE_DECL(mp);
td = curthread;
@@ -272,32 +171,25 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
return 0;
}
- mtx_lock_spin(&sched_lock);
+ sq = sleepq_lookup(cvp);
- CV_WAIT_VALIDATE(cvp, mp);
+ /* XXX: Missing the threading checks from msleep! */
DROP_GIANT();
mtx_unlock(mp);
- cv_waitq_add(cvp, td);
- sig = cv_switch_catch(td);
-
- mtx_unlock_spin(&sched_lock);
+ sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sig = sleepq_catch_signals(cvp);
+ /*
+ * XXX: Missing magic return value handling for no signal
+ * caught but thread woken up during check.
+ */
+ rval = sleepq_wait_sig(cvp);
+ if (rval == 0)
+ rval = sleepq_calc_signal_retval(sig);
+ /* XXX: Part of missing threading checks? */
PROC_LOCK(p);
- mtx_lock(&p->p_sigacts->ps_mtx);
- if (sig == 0) {
- sig = cursig(td); /* XXXKSE */
- if (sig == 0 && td->td_flags & TDF_INTERRUPT)
- rval = td->td_intrval;
- }
- if (sig != 0) {
- if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
- rval = EINTR;
- else
- rval = ERESTART;
- }
- mtx_unlock(&p->p_sigacts->ps_mtx);
if (p->p_flag & P_WEXIT)
rval = EINTR;
PROC_UNLOCK(p);
@@ -321,6 +213,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
int
cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
{
+ struct sleepqueue *sq;
struct thread *td;
int rval;
WITNESS_SAVE_DECL(mp);
@@ -346,34 +239,15 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
return 0;
}
- mtx_lock_spin(&sched_lock);
-
- CV_WAIT_VALIDATE(cvp, mp);
+ sq = sleepq_lookup(cvp);
DROP_GIANT();
mtx_unlock(mp);
- cv_waitq_add(cvp, td);
- callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
- cv_switch(td);
-
- if (td->td_flags & TDF_TIMEOUT) {
- td->td_flags &= ~TDF_TIMEOUT;
- rval = EWOULDBLOCK;
- } else if (td->td_flags & TDF_TIMOFAIL)
- td->td_flags &= ~TDF_TIMOFAIL;
- else if (callout_stop(&td->td_slpcallout) == 0) {
- /*
- * Work around race with cv_timedwait_end similar to that
- * between msleep and endtsleep.
- * Go back to sleep.
- */
- TD_SET_SLEEPING(td);
- mi_switch(SW_INVOL);
- td->td_flags &= ~TDF_TIMOFAIL;
- }
+ sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_set_timeout(sq, cvp, timo);
+ rval = sleepq_timedwait(cvp, 0);
- mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
@@ -394,6 +268,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
int
cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
{
+ struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int rval;
@@ -422,48 +297,24 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
return 0;
}
- mtx_lock_spin(&sched_lock);
-
- CV_WAIT_VALIDATE(cvp, mp);
+ sq = sleepq_lookup(cvp);
DROP_GIANT();
mtx_unlock(mp);
- cv_waitq_add(cvp, td);
- callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
- sig = cv_switch_catch(td);
-
- if (td->td_flags & TDF_TIMEOUT) {
- td->td_flags &= ~TDF_TIMEOUT;
- rval = EWOULDBLOCK;
- } else if (td->td_flags & TDF_TIMOFAIL)
- td->td_flags &= ~TDF_TIMOFAIL;
- else if (callout_stop(&td->td_slpcallout) == 0) {
- /*
- * Work around race with cv_timedwait_end similar to that
- * between msleep and endtsleep.
- * Go back to sleep.
- */
- TD_SET_SLEEPING(td);
- mi_switch(SW_INVOL);
- td->td_flags &= ~TDF_TIMOFAIL;
- }
- mtx_unlock_spin(&sched_lock);
+ sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_set_timeout(sq, cvp, timo);
+ sig = sleepq_catch_signals(cvp);
+ /*
+ * XXX: Missing magic return value handling for no signal
+ * caught but thread woken up during check.
+ */
+ rval = sleepq_timedwait_sig(cvp, sig != 0);
+ if (rval == 0)
+ rval = sleepq_calc_signal_retval(sig);
+ /* XXX: Part of missing threading checks? */
PROC_LOCK(p);
- mtx_lock(&p->p_sigacts->ps_mtx);
- if (sig == 0) {
- sig = cursig(td);
- if (sig == 0 && td->td_flags & TDF_INTERRUPT)
- rval = td->td_intrval;
- }
- if (sig != 0) {
- if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
- rval = EINTR;
- else
- rval = ERESTART;
- }
- mtx_unlock(&p->p_sigacts->ps_mtx);
if (p->p_flag & P_WEXIT)
rval = EINTR;
PROC_UNLOCK(p);
@@ -480,24 +331,6 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
}
/*
- * Common code for signal and broadcast. Assumes waitq is not empty. Must be
- * called with sched_lock held.
- */
-static __inline void
-cv_wakeup(struct cv *cvp)
-{
- struct thread *td;
-
- mtx_assert(&sched_lock, MA_OWNED);
- td = TAILQ_FIRST(&cvp->cv_waitq);
- KASSERT(td->td_wchan == cvp, ("%s: bogus wchan", __func__));
- KASSERT(td->td_flags & TDF_CVWAITQ, ("%s: not on waitq", __func__));
- cv_waitq_remove(td);
- TD_CLR_SLEEPING(td);
- setrunnable(td);
-}
-
-/*
* Signal a condition variable, wakes up one waiting thread. Will also wakeup
* the swapper if the process is not in memory, so that it can bring the
* sleeping process in. Note that this may also result in additional threads
@@ -508,13 +341,7 @@ void
cv_signal(struct cv *cvp)
{
- KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
- mtx_lock_spin(&sched_lock);
- if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
- CV_SIGNAL_VALIDATE(cvp);
- cv_wakeup(cvp);
- }
- mtx_unlock_spin(&sched_lock);
+ sleepq_signal(cvp, SLEEPQ_CONDVAR, -1);
}
/*
@@ -524,82 +351,6 @@ cv_signal(struct cv *cvp)
void
cv_broadcastpri(struct cv *cvp, int pri)
{
- struct thread *td;
-
- KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
- mtx_lock_spin(&sched_lock);
- CV_SIGNAL_VALIDATE(cvp);
- while (!TAILQ_EMPTY(&cvp->cv_waitq)) {
- if (pri >= PRI_MIN && pri <= PRI_MAX) {
- td = TAILQ_FIRST(&cvp->cv_waitq);
- if (td->td_priority > pri)
- td->td_priority = pri;
- }
- cv_wakeup(cvp);
- }
- mtx_unlock_spin(&sched_lock);
-}
-/*
- * Remove a thread from the wait queue of its condition variable. This may be
- * called externally.
- */
-void
-cv_waitq_remove(struct thread *td)
-{
- struct cv *cvp;
-
- mtx_assert(&sched_lock, MA_OWNED);
- if ((cvp = td->td_wchan) != NULL && td->td_flags & TDF_CVWAITQ) {
- TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
- td->td_flags &= ~TDF_CVWAITQ;
- td->td_wmesg = NULL;
- TD_CLR_ON_SLEEPQ(td);
- }
+ sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri);
}
-
-/*
- * Timeout function for cv_timedwait. Put the thread on the runqueue and set
- * its timeout flag.
- */
-static void
-cv_timedwait_end(void *arg)
-{
- struct thread *td;
-
- td = arg;
- CTR3(KTR_PROC, "cv_timedwait_end: thread %p (pid %d, %s)",
- td, td->td_proc->p_pid, td->td_proc->p_comm);
- mtx_lock_spin(&sched_lock);
- if (TD_ON_SLEEPQ(td)) {
- cv_waitq_remove(td);
- td->td_flags |= TDF_TIMEOUT;
- } else {
- td->td_flags |= TDF_TIMOFAIL;
- }
- TD_CLR_SLEEPING(td);
- setrunnable(td);
- mtx_unlock_spin(&sched_lock);
-}
-
-/*
- * For now only abort interruptable waits.
- * The others will have to either complete on their own or have a timeout.
- */
-void
-cv_abort(struct thread *td)
-{
-
- CTR3(KTR_PROC, "cv_abort: thread %p (pid %d, %s)", td,
- td->td_proc->p_pid, td->td_proc->p_comm);
- mtx_lock_spin(&sched_lock);
- if ((td->td_flags & (TDF_SINTR|TDF_TIMEOUT)) == TDF_SINTR) {
- if (TD_ON_SLEEPQ(td)) {
- cv_waitq_remove(td);
- }
- TD_CLR_SLEEPING(td);
- setrunnable(td);
- }
- mtx_unlock_spin(&sched_lock);
-}
-
OpenPOWER on IntegriCloud