summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_synch.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-04 23:50:56 +0000
committerjeff <jeff@FreeBSD.org>2007-06-04 23:50:56 +0000
commitea7c909871b08e0c77c4e21ea7490715d243d562 (patch)
treea5bf0e5812452f39f88440ebc8ff94193caf026c /sys/kern/kern_synch.c
parent186ae07cb61840670b6b7bc387b690bef2c2e262 (diff)
downloadFreeBSD-src-ea7c909871b08e0c77c4e21ea7490715d243d562.zip
FreeBSD-src-ea7c909871b08e0c77c4e21ea7490715d243d562.tar.gz
Commit 2/14 of sched_lock decomposition.
- Adapt sleepqueues to the new thread_lock() mechanism. - Delay assigning the sleep queue spinlock as the thread lock until after we've checked for signals. It is illegal for a thread to return in mi_switch() with any lock assigned to td_lock other than the scheduler locks. - Change sleepq_catch_signals() to do the switch if necessary to simplify the callers. - Simplify timeout handling now that locking a sleeping thread has the side-effect of locking the sleepqueue. Some previous races are no longer possible. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r--sys/kern/kern_synch.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index c45e846..96b4eda 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -213,9 +213,9 @@ _sleep(ident, lock, priority, wmesg, timo)
*/
pri = priority & PRIMASK;
if (pri != 0 && pri != td->td_priority) {
- mtx_lock_spin(&sched_lock);
+ thread_lock(td);
sched_prio(td, pri);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
}
if (timo && catch)
@@ -362,6 +362,7 @@ wakeup_one(ident)
sleepq_lock(ident);
sleepq_signal(ident, SLEEPQ_SLEEP, -1, 0);
+ sleepq_release(ident);
}
/*
@@ -374,8 +375,8 @@ mi_switch(int flags, struct thread *newtd)
struct thread *td;
struct proc *p;
- mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
td = curthread; /* XXX */
+ THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
p = td->td_proc; /* XXX */
KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
#ifdef INVARIANTS
@@ -394,12 +395,11 @@ mi_switch(int flags, struct thread *newtd)
* Don't perform context switches from the debugger.
*/
if (kdb_active) {
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
kdb_backtrace();
kdb_reenter();
panic("%s: did not reenter debugger", __func__);
}
-
if (flags & SW_VOL)
td->td_ru.ru_nvcsw++;
else
@@ -466,7 +466,7 @@ setrunnable(struct thread *td)
struct proc *p;
p = td->td_proc;
- mtx_assert(&sched_lock, MA_OWNED);
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
switch (p->p_state) {
case PRS_ZOMBIE:
panic("setrunnable(1)");
@@ -495,7 +495,7 @@ setrunnable(struct thread *td)
if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
p->p_sflag |= PS_SWAPINREQ;
/*
- * due to a LOR between sched_lock and
+ * due to a LOR between the thread lock and
* the sleepqueue chain locks, use
* lower level scheduling functions.
*/
OpenPOWER on IntegriCloud