summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-08-19 11:31:42 +0000
committerjhb <jhb@FreeBSD.org>2004-08-19 11:31:42 +0000
commit9e08178eb7f76d4c0238edff1e39930e0d6ee70e (patch)
tree0e4978383f95ccaee9fad964201d8694bc8eaf63 /sys/kern
parent4b7b5c6f6a763532ffe2c98abd2a233bb1484224 (diff)
downloadFreeBSD-src-9e08178eb7f76d4c0238edff1e39930e0d6ee70e.zip
FreeBSD-src-9e08178eb7f76d4c0238edff1e39930e0d6ee70e.tar.gz
Now that the return value semantics of cv's for multithreaded processes
have been unified with that of msleep(9), further refine the sleepq interface and consolidate some duplicated code: - Move the pre-sleep checks for theaded processes into a thread_sleep_check() function in kern_thread.c. - Move all handling of TDF_SINTR to be internal to subr_sleepqueue.c. Specifically, if a thread is awakened by something other than a signal while checking for signals before going to sleep, clear TDF_SINTR in sleepq_catch_signals(). This removes a sched_lock lock/unlock combo in that edge case during an interruptible sleep. Also, fix sleepq_check_signals() to properly handle the condition if TDF_SINTR is clear rather than requiring the callers of the sleepq API to notice this edge case and call a non-_sig variant of sleepq_wait(). - Clarify the flags arguments to sleepq_add(), sleepq_signal() and sleepq_broadcast() by creating an explicit submask for sleepq types. Also, add an explicit SLEEPQ_MSLEEP type rather than a magic number of 0. Also, add a SLEEPQ_INTERRUPTIBLE flag for use with sleepq_add() and move the setting of TDF_SINTR to sleepq_add() if this flag is set rather than sleepq_catch_signals(). Note that it is the caller's responsibility to ensure that sleepq_catch_signals() is called if and only if this flag is passed to the preceeding sleepq_add(). Note that this also removes a sched_lock lock/unlock pair from sleepq_catch_signals(). It also ensures that for an interruptible sleep, TDF_SINTR is always set when TD_ON_SLEEPQ() is true.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_condvar.c55
-rw-r--r--sys/kern/kern_synch.c44
-rw-r--r--sys/kern/kern_thread.c19
-rw-r--r--sys/kern/subr_sleepqueue.c32
4 files changed, 74 insertions, 76 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index ff619ec..fc0f799 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -153,7 +153,6 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
td = curthread;
p = td->td_proc;
- rval = 0;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
@@ -180,32 +179,21 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
* thread or if our thread is marked as interrupted.
*/
mtx_lock_spin(&sched_lock);
- if (p->p_flag & P_SA || p->p_numthreads > 1) {
- if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td)
- rval = EINTR;
- else if (td->td_flags & TDF_INTERRUPT)
- rval = td->td_intrval;
- if (rval != 0) {
- mtx_unlock_spin(&sched_lock);
- sleepq_release(cvp);
- return (rval);
- }
- }
+ rval = thread_sleep_check(td);
mtx_unlock_spin(&sched_lock);
+ if (rval != 0) {
+ sleepq_release(cvp);
+ return (rval);
+ }
cvp->cv_waiters++;
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
+ SLEEPQ_INTERRUPTIBLE);
sig = sleepq_catch_signals(cvp);
- if (sig == 0 && !TD_ON_SLEEPQ(td)) {
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_SINTR;
- mtx_unlock_spin(&sched_lock);
- sleepq_wait(cvp);
- } else
- rval = sleepq_wait_sig(cvp);
+ rval = sleepq_wait_sig(cvp);
if (rval == 0)
rval = sleepq_calc_signal_retval(sig);
@@ -320,33 +308,22 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
* thread or if our thread is marked as interrupted.
*/
mtx_lock_spin(&sched_lock);
- if (p->p_flag & P_SA || p->p_numthreads > 1) {
- if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td)
- rval = EINTR;
- else if (td->td_flags & TDF_INTERRUPT)
- rval = td->td_intrval;
- if (rval != 0) {
- mtx_unlock_spin(&sched_lock);
- sleepq_release(cvp);
- return (rval);
- }
- }
+ rval = thread_sleep_check(td);
mtx_unlock_spin(&sched_lock);
+ if (rval != 0) {
+ sleepq_release(cvp);
+ return (rval);
+ }
cvp->cv_waiters++;
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
+ SLEEPQ_INTERRUPTIBLE);
sleepq_set_timeout(cvp, timo);
sig = sleepq_catch_signals(cvp);
- if (sig == 0 && !TD_ON_SLEEPQ(td)) {
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_SINTR;
- mtx_unlock_spin(&sched_lock);
- rval = sleepq_timedwait(cvp);
- } else
- rval = sleepq_timedwait_sig(cvp, sig != 0);
+ rval = sleepq_timedwait_sig(cvp, sig != 0);
if (rval == 0)
rval = sleepq_calc_signal_retval(sig);
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 7830521..aae9d95 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -125,7 +125,7 @@ msleep(ident, mtx, priority, wmesg, timo)
struct sleepqueue *sq;
struct thread *td;
struct proc *p;
- int catch, rval, sig;
+ int catch, rval, sig, flags;
WITNESS_SAVE_DECL(mtx);
td = curthread;
@@ -166,28 +166,19 @@ msleep(ident, mtx, priority, wmesg, timo)
sleepq_remove(td, td->td_wchan);
sq = sleepq_lookup(ident);
- mtx_lock_spin(&sched_lock);
-
- if (p->p_flag & P_SA || p->p_numthreads > 1) {
+ if (catch) {
/*
- * Just don't bother if we are exiting
- * and not the exiting thread or thread was marked as
- * interrupted.
+ * Don't bother sleeping if we are exiting and not the exiting
+ * thread or if our thread is marked as interrupted.
*/
- if (catch) {
- if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td) {
- mtx_unlock_spin(&sched_lock);
- sleepq_release(ident);
- return (EINTR);
- }
- if (td->td_flags & TDF_INTERRUPT) {
- mtx_unlock_spin(&sched_lock);
- sleepq_release(ident);
- return (td->td_intrval);
- }
+ mtx_lock_spin(&sched_lock);
+ rval = thread_sleep_check(td);
+ mtx_unlock_spin(&sched_lock);
+ if (rval != 0) {
+ sleepq_release(ident);
+ return (rval);
}
}
- mtx_unlock_spin(&sched_lock);
CTR5(KTR_PROC, "msleep: thread %p (pid %ld, %s) on %s (%p)",
(void *)td, (long)p->p_pid, p->p_comm, wmesg, ident);
@@ -207,17 +198,14 @@ msleep(ident, mtx, priority, wmesg, timo)
* stopped, then td will no longer be on a sleep queue upon
* return from cursig().
*/
- sleepq_add(sq, ident, mtx, wmesg, 0);
+ flags = SLEEPQ_MSLEEP;
+ if (catch)
+ flags |= SLEEPQ_INTERRUPTIBLE;
+ sleepq_add(sq, ident, mtx, wmesg, flags);
if (timo)
sleepq_set_timeout(ident, timo);
if (catch) {
sig = sleepq_catch_signals(ident);
- if (sig == 0 && !TD_ON_SLEEPQ(td)) {
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_SINTR;
- mtx_unlock_spin(&sched_lock);
- catch = 0;
- }
} else
sig = 0;
@@ -262,7 +250,7 @@ wakeup(ident)
register void *ident;
{
- sleepq_broadcast(ident, 0, -1);
+ sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1);
}
/*
@@ -275,7 +263,7 @@ wakeup_one(ident)
register void *ident;
{
- sleepq_signal(ident, 0, -1);
+ sleepq_signal(ident, SLEEPQ_MSLEEP, -1);
}
/*
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 99b692b..f16e2f6 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -1110,3 +1110,22 @@ thread_single_end(void)
mtx_unlock_spin(&sched_lock);
}
+/*
+ * Called before going into an interruptible sleep to see if we have been
+ * interrupted or requested to exit.
+ */
+int
+thread_sleep_check(struct thread *td)
+{
+ struct proc *p;
+
+ p = td->td_proc;
+ mtx_assert(&sched_lock, MA_OWNED);
+ if (p->p_flag & P_SA || p->p_numthreads > 1) {
+ if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td)
+ return (EINTR);
+ if (td->td_flags & TDF_INTERRUPT)
+ return (td->td_intrval);
+ }
+ return (0);
+}
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index a7a07d5..5c92695 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -113,7 +113,7 @@ struct sleepqueue {
LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
void *sq_wchan; /* (c) Wait channel. */
- int sq_flags; /* (c) Flags. */
+ int sq_type; /* (c) Queue type. */
#ifdef INVARIANTS
struct mtx *sq_lock; /* (c) Associated lock. */
#endif
@@ -279,7 +279,7 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
#ifdef INVARIANTS
sq->sq_lock = lock;
#endif
- sq->sq_flags = flags;
+ sq->sq_type = flags & SLEEPQ_TYPE;
TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
} else {
MPASS(wchan == sq->sq_wchan);
@@ -297,6 +297,8 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
mtx_lock_spin(&sched_lock);
td->td_wchan = wchan;
td->td_wmesg = wmesg;
+ if (flags & SLEEPQ_INTERRUPTIBLE)
+ td->td_flags |= TDF_SINTR;
mtx_unlock_spin(&sched_lock);
}
@@ -345,10 +347,8 @@ sleepq_catch_signals(void *wchan)
(void *)td, (long)p->p_pid, p->p_comm);
/* Mark thread as being in an interruptible sleep. */
- mtx_lock_spin(&sched_lock);
+ MPASS(td->td_flags & TDF_SINTR);
MPASS(TD_ON_SLEEPQ(td));
- td->td_flags |= TDF_SINTR;
- mtx_unlock_spin(&sched_lock);
sleepq_release(wchan);
/* See if there are any pending signals for this thread. */
@@ -364,15 +364,20 @@ sleepq_catch_signals(void *wchan)
/*
* If there were pending signals and this thread is still on
- * the sleep queue, remove it from the sleep queue.
+ * the sleep queue, remove it from the sleep queue. If the
+ * thread was removed from the sleep queue while we were blocked
+ * above, then clear TDF_SINTR before returning.
*/
sq = sleepq_lookup(wchan);
mtx_lock_spin(&sched_lock);
if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
mtx_unlock_spin(&sched_lock);
sleepq_remove_thread(sq, td);
- } else
+ } else {
+ if (!TD_ON_SLEEPQ(td) && sig == 0)
+ td->td_flags &= ~TDF_SINTR;
mtx_unlock_spin(&sched_lock);
+ }
return (sig);
}
@@ -465,6 +470,13 @@ sleepq_check_signals(void)
mtx_assert(&sched_lock, MA_OWNED);
td = curthread;
+ /*
+ * If TDF_SINTR is clear, then we were awakened while executing
+ * sleepq_catch_signals().
+ */
+ if (!(td->td_flags & TDF_SINTR))
+ return (0);
+
/* We are no longer in an interruptible sleep. */
td->td_flags &= ~TDF_SINTR;
@@ -513,6 +525,7 @@ void
sleepq_wait(void *wchan)
{
+ MPASS(!(curthread->td_flags & TDF_SINTR));
sleepq_switch(wchan);
mtx_unlock_spin(&sched_lock);
}
@@ -541,6 +554,7 @@ sleepq_timedwait(void *wchan)
{
int rval;
+ MPASS(!(curthread->td_flags & TDF_SINTR));
sleepq_switch(wchan);
rval = sleepq_check_timeout();
mtx_unlock_spin(&sched_lock);
@@ -649,7 +663,7 @@ sleepq_signal(void *wchan, int flags, int pri)
sleepq_release(wchan);
return;
}
- KASSERT(sq->sq_flags == flags,
+ KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
/* XXX: Do for all sleep queues eventually. */
if (flags & SLEEPQ_CONDVAR)
@@ -679,7 +693,7 @@ sleepq_broadcast(void *wchan, int flags, int pri)
sleepq_release(wchan);
return;
}
- KASSERT(sq->sq_flags == flags,
+ KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
/* XXX: Do for all sleep queues eventually. */
if (flags & SLEEPQ_CONDVAR)
OpenPOWER on IntegriCloud