summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_synch.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r--sys/kern/kern_synch.c298
1 files changed, 81 insertions, 217 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 9ffe5a9..e988e2c 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/resourcevar.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
+#include <sys/sleepqueue.h>
#include <sys/smp.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
@@ -95,7 +96,6 @@ static fixpt_t cexp[3] = {
static int fscale __unused = FSCALE;
SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
-static void endtsleep(void *);
static void loadav(void *arg);
static void lboltcb(void *arg);
@@ -116,6 +116,7 @@ sleepinit(void)
hogticks = (hz / 10) * 2; /* Default only. */
for (i = 0; i < TABLESIZE; i++)
TAILQ_INIT(&slpque[i]);
+ init_sleepqueues();
}
/*
@@ -141,21 +142,52 @@ msleep(ident, mtx, priority, wmesg, timo)
int priority, timo;
const char *wmesg;
{
- struct thread *td = curthread;
- struct proc *p = td->td_proc;
- int sig, catch = priority & PCATCH;
- int rval = 0;
+ struct sleepqueue *sq;
+ struct thread *td;
+ struct proc *p;
+ int catch, rval, sig;
WITNESS_SAVE_DECL(mtx);
+ td = curthread;
+ p = td->td_proc;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
- /* XXX: mtx == NULL ?? */
- WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mtx->mtx_object,
- "Sleeping on \"%s\"", wmesg);
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, mtx == NULL ? NULL :
+ &mtx->mtx_object, "Sleeping on \"%s\"", wmesg);
KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
("sleeping without a mutex"));
+ KASSERT(p != NULL, ("msleep1"));
+ KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
+
+ if (cold) {
+ /*
+ * During autoconfiguration, just return;
+ * don't run any other procs or panic below,
+ * in case this is the idle process and already asleep.
+ * XXX: this used to do "s = splhigh(); splx(safepri);
+ * splx(s);" to give interrupts a chance, but there is
+ * no way to give interrupts a chance now.
+ */
+ if (mtx != NULL && priority & PDROP)
+ mtx_unlock(mtx);
+ return (0);
+ }
+ catch = priority & PCATCH;
+ rval = 0;
+
+ /*
+ * If we are already on a sleep queue, then remove us from that
+ * sleep queue first. We have to do this to handle recursive
+ * sleeps.
+ */
+ if (TD_ON_SLEEPQ(td))
+ sleepq_remove(td, td->td_wchan);
+
+ sq = sleepq_lookup(ident);
+ mtx_lock_spin(&sched_lock);
+
/*
* If we are capable of async syscalls and there isn't already
* another one ready to return, start a new thread
@@ -163,7 +195,6 @@ msleep(ident, mtx, priority, wmesg, timo)
* because we need to make sure that we don't sleep allocating
* the thread (recursion here might be bad).
*/
- mtx_lock_spin(&sched_lock);
if (p->p_flag & P_SA || p->p_numthreads > 1) {
/*
* Just don't bother if we are exiting
@@ -173,28 +204,20 @@ msleep(ident, mtx, priority, wmesg, timo)
if (catch) {
if ((p->p_flag & P_WEXIT) && p->p_singlethread != td) {
mtx_unlock_spin(&sched_lock);
+ sleepq_release(ident);
return (EINTR);
}
if (td->td_flags & TDF_INTERRUPT) {
mtx_unlock_spin(&sched_lock);
+ sleepq_release(ident);
return (td->td_intrval);
}
}
}
- if (cold ) {
- /*
- * During autoconfiguration, just return;
- * don't run any other procs or panic below,
- * in case this is the idle process and already asleep.
- * XXX: this used to do "s = splhigh(); splx(safepri);
- * splx(s);" to give interrupts a chance, but there is
- * no way to give interrupts a chance now.
- */
- if (mtx != NULL && priority & PDROP)
- mtx_unlock(mtx);
- mtx_unlock_spin(&sched_lock);
- return (0);
- }
+ mtx_unlock_spin(&sched_lock);
+ CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
+ td, p->p_pid, p->p_comm, wmesg, ident);
+
DROP_GIANT();
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
@@ -203,101 +226,55 @@ msleep(ident, mtx, priority, wmesg, timo)
if (priority & PDROP)
mtx = NULL;
}
- KASSERT(p != NULL, ("msleep1"));
- KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
-
- CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
- td, p->p_pid, p->p_comm, wmesg, ident);
- td->td_wchan = ident;
- td->td_wmesg = wmesg;
- TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
- TD_SET_ON_SLEEPQ(td);
- if (timo)
- callout_reset(&td->td_slpcallout, timo, endtsleep, td);
/*
* We put ourselves on the sleep queue and start our timeout
- * before calling thread_suspend_check, as we could stop there, and
- * a wakeup or a SIGCONT (or both) could occur while we were stopped.
- * without resuming us, thus we must be ready for sleep
- * when cursig is called. If the wakeup happens while we're
- * stopped, td->td_wchan will be 0 upon return from cursig.
+ * before calling thread_suspend_check, as we could stop there,
+ * and a wakeup or a SIGCONT (or both) could occur while we were
+ * stopped without resuming us. Thus, we must be ready for sleep
+ * when cursig() is called. If the wakeup happens while we're
+ * stopped, then td will no longer be on a sleep queue upon
+ * return from cursig().
*/
+ sleepq_add(sq, ident, mtx, wmesg, 0);
+ if (timo)
+ sleepq_set_timeout(sq, ident, timo);
if (catch) {
- CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
- p->p_pid, p->p_comm);
- td->td_flags |= TDF_SINTR;
- mtx_unlock_spin(&sched_lock);
- PROC_LOCK(p);
- mtx_lock(&p->p_sigacts->ps_mtx);
- sig = cursig(td);
- mtx_unlock(&p->p_sigacts->ps_mtx);
- if (sig == 0 && thread_suspend_check(1))
- sig = SIGSTOP;
- mtx_lock_spin(&sched_lock);
- PROC_UNLOCK(p);
- if (sig != 0) {
- if (TD_ON_SLEEPQ(td))
- unsleep(td);
- } else if (!TD_ON_SLEEPQ(td))
+ sig = sleepq_catch_signals(ident);
+ if (sig == 0 && !TD_ON_SLEEPQ(td)) {
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_SINTR;
+ mtx_unlock_spin(&sched_lock);
catch = 0;
+ }
} else
sig = 0;
/*
- * Let the scheduler know we're about to voluntarily go to sleep.
+ * Adjust this threads priority.
+ *
+ * XXX: Do we need to save priority in td_base_pri?
*/
- sched_sleep(td, priority & PRIMASK);
+ mtx_lock_spin(&sched_lock);
+ sched_prio(td, priority & PRIMASK);
+ mtx_unlock_spin(&sched_lock);
- if (TD_ON_SLEEPQ(td)) {
- TD_SET_SLEEPING(td);
- mi_switch(SW_VOL);
+ if (timo && catch)
+ rval = sleepq_timedwait_sig(ident, sig != 0);
+ else if (timo)
+ rval = sleepq_timedwait(ident, sig != 0);
+ else if (catch)
+ rval = sleepq_wait_sig(ident);
+ else {
+ sleepq_wait(ident);
+ rval = 0;
}
+
/*
* We're awake from voluntary sleep.
*/
- CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
- p->p_comm);
- KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
- td->td_flags &= ~TDF_SINTR;
- if (td->td_flags & TDF_TIMEOUT) {
- td->td_flags &= ~TDF_TIMEOUT;
- if (sig == 0)
- rval = EWOULDBLOCK;
- } else if (td->td_flags & TDF_TIMOFAIL) {
- td->td_flags &= ~TDF_TIMOFAIL;
- } else if (timo && callout_stop(&td->td_slpcallout) == 0) {
- /*
- * This isn't supposed to be pretty. If we are here, then
- * the endtsleep() callout is currently executing on another
- * CPU and is either spinning on the sched_lock or will be
- * soon. If we don't synchronize here, there is a chance
- * that this process may msleep() again before the callout
- * has a chance to run and the callout may end up waking up
- * the wrong msleep(). Yuck.
- */
- TD_SET_SLEEPING(td);
- mi_switch(SW_INVOL);
- td->td_flags &= ~TDF_TIMOFAIL;
- }
- if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
- (rval == 0)) {
- rval = td->td_intrval;
- }
- mtx_unlock_spin(&sched_lock);
- if (rval == 0 && catch) {
- PROC_LOCK(p);
- /* XXX: shouldn't we always be calling cursig()? */
- mtx_lock(&p->p_sigacts->ps_mtx);
- if (sig != 0 || (sig = cursig(td))) {
- if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
- rval = EINTR;
- else
- rval = ERESTART;
- }
- mtx_unlock(&p->p_sigacts->ps_mtx);
- PROC_UNLOCK(p);
- }
+ if (rval == 0 && catch)
+ rval = sleepq_calc_signal_retval(sig);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
@@ -311,109 +288,14 @@ msleep(ident, mtx, priority, wmesg, timo)
}
/*
- * Implement timeout for msleep().
- *
- * If process hasn't been awakened (wchan non-zero),
- * set timeout flag and undo the sleep. If proc
- * is stopped, just unsleep so it will remain stopped.
- * MP-safe, called without the Giant mutex.
- */
-static void
-endtsleep(arg)
- void *arg;
-{
- register struct thread *td;
-
- td = (struct thread *)arg;
- CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
- td, td->td_proc->p_pid, td->td_proc->p_comm);
- mtx_lock_spin(&sched_lock);
- /*
- * This is the other half of the synchronization with msleep()
- * described above. If the TDS_TIMEOUT flag is set, we lost the
- * race and just need to put the process back on the runqueue.
- */
- if (TD_ON_SLEEPQ(td)) {
- TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
- TD_CLR_ON_SLEEPQ(td);
- td->td_flags |= TDF_TIMEOUT;
- td->td_wmesg = NULL;
- } else
- td->td_flags |= TDF_TIMOFAIL;
- TD_CLR_SLEEPING(td);
- setrunnable(td);
- mtx_unlock_spin(&sched_lock);
-}
-
-/*
- * Abort a thread, as if an interrupt had occured. Only abort
- * interruptable waits (unfortunatly it isn't only safe to abort others).
- * This is about identical to cv_abort().
- * Think about merging them?
- * Also, whatever the signal code does...
- */
-void
-abortsleep(struct thread *td)
-{
-
- mtx_assert(&sched_lock, MA_OWNED);
- /*
- * If the TDF_TIMEOUT flag is set, just leave. A
- * timeout is scheduled anyhow.
- */
- if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
- if (TD_ON_SLEEPQ(td)) {
- unsleep(td);
- TD_CLR_SLEEPING(td);
- setrunnable(td);
- }
- }
-}
-
-/*
- * Remove a process from its wait queue
- */
-void
-unsleep(struct thread *td)
-{
-
- mtx_lock_spin(&sched_lock);
- if (TD_ON_SLEEPQ(td)) {
- TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
- TD_CLR_ON_SLEEPQ(td);
- td->td_wmesg = NULL;
- }
- mtx_unlock_spin(&sched_lock);
-}
-
-/*
* Make all processes sleeping on the specified identifier runnable.
*/
void
wakeup(ident)
register void *ident;
{
- register struct slpquehead *qp;
- register struct thread *td;
- struct thread *ntd;
- struct proc *p;
- mtx_lock_spin(&sched_lock);
- qp = &slpque[LOOKUP(ident)];
-restart:
- for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
- ntd = TAILQ_NEXT(td, td_slpq);
- if (td->td_wchan == ident) {
- unsleep(td);
- TD_CLR_SLEEPING(td);
- setrunnable(td);
- p = td->td_proc;
- CTR3(KTR_PROC,"wakeup: thread %p (pid %d, %s)",
- td, p->p_pid, p->p_comm);
- goto restart;
- }
- }
- mtx_unlock_spin(&sched_lock);
+ sleepq_broadcast(ident, 0, -1);
}
/*
@@ -425,26 +307,8 @@ void
wakeup_one(ident)
register void *ident;
{
- register struct proc *p;
- register struct slpquehead *qp;
- register struct thread *td;
- struct thread *ntd;
- mtx_lock_spin(&sched_lock);
- qp = &slpque[LOOKUP(ident)];
- for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
- ntd = TAILQ_NEXT(td, td_slpq);
- if (td->td_wchan == ident) {
- unsleep(td);
- TD_CLR_SLEEPING(td);
- setrunnable(td);
- p = td->td_proc;
- CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
- td, p->p_pid, p->p_comm);
- break;
- }
- }
- mtx_unlock_spin(&sched_lock);
+ sleepq_signal(ident, 0, -1);
}
/*
OpenPOWER on IntegriCloud