summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2010-01-23 15:54:21 +0000
committerattilio <attilio@FreeBSD.org>2010-01-23 15:54:21 +0000
commit9a7f4738f4b1cfdb4d140e79f2f45b83f0c37aa5 (patch)
tree8948b981ee7ffc54e90f7295617953bd509fcc3f /sys/kern/sched_4bsd.c
parent22188bf2d03496348cf79fe3478e52c6b7655095 (diff)
downloadFreeBSD-src-9a7f4738f4b1cfdb4d140e79f2f45b83f0c37aa5.zip
FreeBSD-src-9a7f4738f4b1cfdb4d140e79f2f45b83f0c37aa5.tar.gz
- Fix a race in sched_switch() of sched_4bsd.
In the case of the thread being on a sleepqueue or a turnstile, the sched_lock was acquired (without the aid of the td_lock interface) and the td_lock was dropped. This was going to break locking rules on other threads willing to access to the thread (via the td_lock interface) and modify his flags (allowed as long as the container lock was different by the one used in sched_switch). In order to prevent this situation, while sched_lock is acquired there the td_lock gets blocked. [0] - Merge the ULE's internal function thread_block_switch() into the global thread_lock_block() and make the former semantic as the default for thread_lock_block(). This means that thread_lock_block() will not disable interrupts when called (and consequently thread_unlock_block() will not re-enabled them when called). This should be done manually when necessary. Note, however, that ULE's thread_unblock_switch() is not reaped because it does reflect a difference in semantic due in ULE (the td_lock may not be necessarilly still blocked_lock when calling this). While asymmetric, it does describe a remarkable difference in semantic that is good to keep in mind. [0] Reported by: Kohji Okuno <okuno dot kohji at jp dot panasonic dot com> Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com> MFC: 2 weeks
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 99ea7b8..bcec5b9 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -920,9 +920,11 @@ sched_sleep(struct thread *td, int pri)
void
sched_switch(struct thread *td, struct thread *newtd, int flags)
{
+ struct mtx *tmtx;
struct td_sched *ts;
struct proc *p;
+ tmtx = NULL;
ts = td->td_sched;
p = td->td_proc;
@@ -931,10 +933,11 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
/*
* Switch to the sched lock to fix things up and pick
* a new thread.
+ * Block the td_lock in order to avoid breaking the critical path.
*/
if (td->td_lock != &sched_lock) {
mtx_lock_spin(&sched_lock);
- thread_unlock(td);
+ tmtx = thread_lock_block(td);
}
if ((td->td_flags & TDF_NOLOAD) == 0)
@@ -1004,7 +1007,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
(*dtrace_vtime_switch_func)(newtd);
#endif
- cpu_switch(td, newtd, td->td_lock);
+ cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
lock_profile_obtain_lock_success(&sched_lock.lock_object,
0, 0, __FILE__, __LINE__);
/*
OpenPOWER on IntegriCloud