diff options
author | jhb <jhb@FreeBSD.org> | 2008-08-05 20:02:31 +0000 |
---|---|---|
committer | jhb <jhb@FreeBSD.org> | 2008-08-05 20:02:31 +0000 |
commit | 8af56fb6875120edaae014a4cc547f5e14609a12 (patch) | |
tree | 80c45ebfc07976bf62d8de55267fa69fa52fdc1d /sys/kern/subr_sleepqueue.c | |
parent | be95b0fe3c10d07ed8c0887e7695a358ee6a2e17 (diff) | |
download | FreeBSD-src-8af56fb6875120edaae014a4cc547f5e14609a12.zip FreeBSD-src-8af56fb6875120edaae014a4cc547f5e14609a12.tar.gz |
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
Diffstat (limited to 'sys/kern/subr_sleepqueue.c')
-rw-r--r-- | sys/kern/subr_sleepqueue.c | 67 |
1 files changed, 49 insertions, 18 deletions
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c index 3878d81..313786a 100644 --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -160,7 +160,7 @@ static int sleepq_check_timeout(void); static void sleepq_dtor(void *mem, int size, void *arg); #endif static int sleepq_init(void *mem, int size, int flags); -static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, +static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri); static void sleepq_switch(void *wchan, int pri); static void sleepq_timeout(void *arg); @@ -434,7 +434,15 @@ sleepq_catch_signals(void *wchan, int pri) */ if (TD_ON_SLEEPQ(td)) { sq = sleepq_lookup(wchan); - sleepq_resume_thread(sq, td, 0); + if (sleepq_resume_thread(sq, td, 0)) { +#ifdef INVARIANTS + /* + * This thread hasn't gone to sleep yet, so it + * should not be swapped out. + */ + panic("not waking up swapper"); +#endif + } } mtx_unlock_spin(&sc->sc_lock); MPASS(td->td_lock != &sc->sc_lock); @@ -474,7 +482,15 @@ sleepq_switch(void *wchan, int pri) if (td->td_flags & TDF_TIMEOUT) { MPASS(TD_ON_SLEEPQ(td)); sq = sleepq_lookup(wchan); - sleepq_resume_thread(sq, td, 0); + if (sleepq_resume_thread(sq, td, 0)) { +#ifdef INVARIANTS + /* + * This thread hasn't gone to sleep yet, so it + * should not be swapped out. + */ + panic("not waking up swapper"); +#endif + } mtx_unlock_spin(&sc->sc_lock); return; } @@ -631,7 +647,7 @@ sleepq_timedwait_sig(void *wchan, int pri) * Removes a thread from a sleep queue and makes it * runnable. */ -static void +static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) { struct sleepqueue_chain *sc; @@ -683,7 +699,7 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); if (pri != 0 && td->td_priority > pri) sched_prio(td, pri); - setrunnable(td); + return (setrunnable(td)); } #ifdef INVARIANTS @@ -722,18 +738,19 @@ sleepq_init(void *mem, int size, int flags) /* * Find the highest priority thread sleeping on a wait channel and resume it. */ -void +int sleepq_signal(void *wchan, int flags, int pri, int queue) { struct sleepqueue *sq; struct thread *td, *besttd; + int wakeup_swapper; CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); MPASS((queue >= 0) && (queue < NR_SLEEPQS)); sq = sleepq_lookup(wchan); if (sq == NULL) - return; + return (0); KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), ("%s: mismatch between sleep/wakeup and cv_*", __func__)); @@ -750,35 +767,40 @@ sleepq_signal(void *wchan, int flags, int pri, int queue) } MPASS(besttd != NULL); thread_lock(besttd); - sleepq_resume_thread(sq, besttd, pri); + wakeup_swapper = sleepq_resume_thread(sq, besttd, pri); thread_unlock(besttd); + return (wakeup_swapper); } /* * Resume all threads sleeping on a specified wait channel. */ -void +int sleepq_broadcast(void *wchan, int flags, int pri, int queue) { struct sleepqueue *sq; struct thread *td; + int wakeup_swapper; CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); MPASS((queue >= 0) && (queue < NR_SLEEPQS)); sq = sleepq_lookup(wchan); if (sq == NULL) - return; + return (0); KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), ("%s: mismatch between sleep/wakeup and cv_*", __func__)); /* Resume all blocked threads on the sleep queue. */ + wakeup_swapper = 0; while (!TAILQ_EMPTY(&sq->sq_blocked[queue])) { td = TAILQ_FIRST(&sq->sq_blocked[queue]); thread_lock(td); - sleepq_resume_thread(sq, td, pri); + if (sleepq_resume_thread(sq, td, pri)) + wakeup_swapper = 1; thread_unlock(td); } + return (wakeup_swapper); } /* @@ -792,8 +814,10 @@ sleepq_timeout(void *arg) struct sleepqueue *sq; struct thread *td; void *wchan; + int wakeup_swapper; td = arg; + wakeup_swapper = 0; CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); @@ -809,8 +833,10 @@ sleepq_timeout(void *arg) sq = sleepq_lookup(wchan); MPASS(sq != NULL); td->td_flags |= TDF_TIMEOUT; - sleepq_resume_thread(sq, td, 0); + wakeup_swapper = sleepq_resume_thread(sq, td, 0); thread_unlock(td); + if (wakeup_swapper) + kick_proc0(); return; } @@ -839,10 +865,12 @@ sleepq_timeout(void *arg) MPASS(TD_IS_SLEEPING(td)); td->td_flags &= ~TDF_TIMEOUT; TD_CLR_SLEEPING(td); - setrunnable(td); + wakeup_swapper = setrunnable(td); } else td->td_flags |= TDF_TIMOFAIL; thread_unlock(td); + if (wakeup_swapper) + kick_proc0(); } /* @@ -853,6 +881,7 @@ void sleepq_remove(struct thread *td, void *wchan) { struct sleepqueue *sq; + int wakeup_swapper; /* * Look up the sleep queue for this wait channel, then re-check @@ -876,16 +905,18 @@ sleepq_remove(struct thread *td, void *wchan) thread_lock(td); MPASS(sq != NULL); MPASS(td->td_wchan == wchan); - sleepq_resume_thread(sq, td, 0); + wakeup_swapper = sleepq_resume_thread(sq, td, 0); thread_unlock(td); sleepq_release(wchan); + if (wakeup_swapper) + kick_proc0(); } /* * Abort a thread as if an interrupt had occurred. Only abort * interruptible waits (unfortunately it isn't safe to abort others). */ -void +int sleepq_abort(struct thread *td, int intrval) { struct sleepqueue *sq; @@ -901,7 +932,7 @@ sleepq_abort(struct thread *td, int intrval) * timeout is scheduled anyhow. */ if (td->td_flags & TDF_TIMEOUT) - return; + return (0); CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); @@ -913,14 +944,14 @@ sleepq_abort(struct thread *td, int intrval) * we have to do it here. */ if (!TD_IS_SLEEPING(td)) - return; + return (0); wchan = td->td_wchan; MPASS(wchan != NULL); sq = sleepq_lookup(wchan); MPASS(sq != NULL); /* Thread is asleep on sleep queue sq, so wake it up. */ - sleepq_resume_thread(sq, td, 0); + return (sleepq_resume_thread(sq, td, 0)); } #ifdef SLEEPQUEUE_PROFILING |