summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_sleepqueue.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-08-19 11:31:42 +0000
committerjhb <jhb@FreeBSD.org>2004-08-19 11:31:42 +0000
commit9e08178eb7f76d4c0238edff1e39930e0d6ee70e (patch)
tree0e4978383f95ccaee9fad964201d8694bc8eaf63 /sys/kern/subr_sleepqueue.c
parent4b7b5c6f6a763532ffe2c98abd2a233bb1484224 (diff)
downloadFreeBSD-src-9e08178eb7f76d4c0238edff1e39930e0d6ee70e.zip
FreeBSD-src-9e08178eb7f76d4c0238edff1e39930e0d6ee70e.tar.gz
Now that the return value semantics of cv's for multithreaded processes
have been unified with that of msleep(9), further refine the sleepq interface and consolidate some duplicated code: - Move the pre-sleep checks for theaded processes into a thread_sleep_check() function in kern_thread.c. - Move all handling of TDF_SINTR to be internal to subr_sleepqueue.c. Specifically, if a thread is awakened by something other than a signal while checking for signals before going to sleep, clear TDF_SINTR in sleepq_catch_signals(). This removes a sched_lock lock/unlock combo in that edge case during an interruptible sleep. Also, fix sleepq_check_signals() to properly handle the condition if TDF_SINTR is clear rather than requiring the callers of the sleepq API to notice this edge case and call a non-_sig variant of sleepq_wait(). - Clarify the flags arguments to sleepq_add(), sleepq_signal() and sleepq_broadcast() by creating an explicit submask for sleepq types. Also, add an explicit SLEEPQ_MSLEEP type rather than a magic number of 0. Also, add a SLEEPQ_INTERRUPTIBLE flag for use with sleepq_add() and move the setting of TDF_SINTR to sleepq_add() if this flag is set rather than sleepq_catch_signals(). Note that it is the caller's responsibility to ensure that sleepq_catch_signals() is called if and only if this flag is passed to the preceeding sleepq_add(). Note that this also removes a sched_lock lock/unlock pair from sleepq_catch_signals(). It also ensures that for an interruptible sleep, TDF_SINTR is always set when TD_ON_SLEEPQ() is true.
Diffstat (limited to 'sys/kern/subr_sleepqueue.c')
-rw-r--r--sys/kern/subr_sleepqueue.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index a7a07d5..5c92695 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -113,7 +113,7 @@ struct sleepqueue {
LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
void *sq_wchan; /* (c) Wait channel. */
- int sq_flags; /* (c) Flags. */
+ int sq_type; /* (c) Queue type. */
#ifdef INVARIANTS
struct mtx *sq_lock; /* (c) Associated lock. */
#endif
@@ -279,7 +279,7 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
#ifdef INVARIANTS
sq->sq_lock = lock;
#endif
- sq->sq_flags = flags;
+ sq->sq_type = flags & SLEEPQ_TYPE;
TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
} else {
MPASS(wchan == sq->sq_wchan);
@@ -297,6 +297,8 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
mtx_lock_spin(&sched_lock);
td->td_wchan = wchan;
td->td_wmesg = wmesg;
+ if (flags & SLEEPQ_INTERRUPTIBLE)
+ td->td_flags |= TDF_SINTR;
mtx_unlock_spin(&sched_lock);
}
@@ -345,10 +347,8 @@ sleepq_catch_signals(void *wchan)
(void *)td, (long)p->p_pid, p->p_comm);
/* Mark thread as being in an interruptible sleep. */
- mtx_lock_spin(&sched_lock);
+ MPASS(td->td_flags & TDF_SINTR);
MPASS(TD_ON_SLEEPQ(td));
- td->td_flags |= TDF_SINTR;
- mtx_unlock_spin(&sched_lock);
sleepq_release(wchan);
/* See if there are any pending signals for this thread. */
@@ -364,15 +364,20 @@ sleepq_catch_signals(void *wchan)
/*
* If there were pending signals and this thread is still on
- * the sleep queue, remove it from the sleep queue.
+ * the sleep queue, remove it from the sleep queue. If the
+ * thread was removed from the sleep queue while we were blocked
+ * above, then clear TDF_SINTR before returning.
*/
sq = sleepq_lookup(wchan);
mtx_lock_spin(&sched_lock);
if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
mtx_unlock_spin(&sched_lock);
sleepq_remove_thread(sq, td);
- } else
+ } else {
+ if (!TD_ON_SLEEPQ(td) && sig == 0)
+ td->td_flags &= ~TDF_SINTR;
mtx_unlock_spin(&sched_lock);
+ }
return (sig);
}
@@ -465,6 +470,13 @@ sleepq_check_signals(void)
mtx_assert(&sched_lock, MA_OWNED);
td = curthread;
+ /*
+ * If TDF_SINTR is clear, then we were awakened while executing
+ * sleepq_catch_signals().
+ */
+ if (!(td->td_flags & TDF_SINTR))
+ return (0);
+
/* We are no longer in an interruptible sleep. */
td->td_flags &= ~TDF_SINTR;
@@ -513,6 +525,7 @@ void
sleepq_wait(void *wchan)
{
+ MPASS(!(curthread->td_flags & TDF_SINTR));
sleepq_switch(wchan);
mtx_unlock_spin(&sched_lock);
}
@@ -541,6 +554,7 @@ sleepq_timedwait(void *wchan)
{
int rval;
+ MPASS(!(curthread->td_flags & TDF_SINTR));
sleepq_switch(wchan);
rval = sleepq_check_timeout();
mtx_unlock_spin(&sched_lock);
@@ -649,7 +663,7 @@ sleepq_signal(void *wchan, int flags, int pri)
sleepq_release(wchan);
return;
}
- KASSERT(sq->sq_flags == flags,
+ KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
/* XXX: Do for all sleep queues eventually. */
if (flags & SLEEPQ_CONDVAR)
@@ -679,7 +693,7 @@ sleepq_broadcast(void *wchan, int flags, int pri)
sleepq_release(wchan);
return;
}
- KASSERT(sq->sq_flags == flags,
+ KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
/* XXX: Do for all sleep queues eventually. */
if (flags & SLEEPQ_CONDVAR)
OpenPOWER on IntegriCloud