summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2010-01-23 15:54:21 +0000
committerattilio <attilio@FreeBSD.org>2010-01-23 15:54:21 +0000
commit9a7f4738f4b1cfdb4d140e79f2f45b83f0c37aa5 (patch)
tree8948b981ee7ffc54e90f7295617953bd509fcc3f /sys/kern/sched_ule.c
parent22188bf2d03496348cf79fe3478e52c6b7655095 (diff)
downloadFreeBSD-src-9a7f4738f4b1cfdb4d140e79f2f45b83f0c37aa5.zip
FreeBSD-src-9a7f4738f4b1cfdb4d140e79f2f45b83f0c37aa5.tar.gz
- Fix a race in sched_switch() of sched_4bsd.
In the case of the thread being on a sleepqueue or a turnstile, the sched_lock was acquired (without the aid of the td_lock interface) and the td_lock was dropped. This was going to break locking rules on other threads willing to access to the thread (via the td_lock interface) and modify his flags (allowed as long as the container lock was different by the one used in sched_switch). In order to prevent this situation, while sched_lock is acquired there the td_lock gets blocked. [0] - Merge the ULE's internal function thread_block_switch() into the global thread_lock_block() and make the former semantic as the default for thread_lock_block(). This means that thread_lock_block() will not disable interrupts when called (and consequently thread_unlock_block() will not re-enabled them when called). This should be done manually when necessary. Note, however, that ULE's thread_unblock_switch() is not reaped because it does reflect a difference in semantic due in ULE (the td_lock may not be necessarilly still blocked_lock when calling this). While asymmetric, it does describe a remarkable difference in semantic that is good to keep in mind. [0] Reported by: Kohji Okuno <okuno dot kohji at jp dot panasonic dot com> Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com> MFC: 2 weeks
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c27
1 files changed, 6 insertions, 21 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 43c9a86..f29dd95 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -301,7 +301,6 @@ static int sched_pickcpu(struct thread *, int);
static void sched_balance(void);
static int sched_balance_pair(struct tdq *, struct tdq *);
static inline struct tdq *sched_setcpu(struct thread *, int, int);
-static inline struct mtx *thread_block_switch(struct thread *);
static inline void thread_unblock_switch(struct thread *, struct mtx *);
static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS);
@@ -1106,9 +1105,11 @@ sched_setcpu(struct thread *td, int cpu, int flags)
* The hard case, migration, we need to block the thread first to
* prevent order reversals with other cpus locks.
*/
+ spinlock_enter();
thread_lock_block(td);
TDQ_LOCK(tdq);
thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
+ spinlock_exit();
return (tdq);
}
@@ -1715,23 +1716,6 @@ sched_unlend_user_prio(struct thread *td, u_char prio)
}
/*
- * Block a thread for switching. Similar to thread_block() but does not
- * bump the spin count.
- */
-static inline struct mtx *
-thread_block_switch(struct thread *td)
-{
- struct mtx *lock;
-
- THREAD_LOCK_ASSERT(td, MA_OWNED);
- lock = td->td_lock;
- td->td_lock = &blocked_lock;
- mtx_unlock_spin(lock);
-
- return (lock);
-}
-
-/*
* Handle migration from sched_switch(). This happens only for
* cpu binding.
*/
@@ -1749,7 +1733,7 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
* not holding either run-queue lock.
*/
spinlock_enter();
- thread_block_switch(td); /* This releases the lock on tdq. */
+ thread_lock_block(td); /* This releases the lock on tdq. */
/*
* Acquire both run-queue locks before placing the thread on the new
@@ -1769,7 +1753,8 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
}
/*
- * Release a thread that was blocked with thread_block_switch().
+ * Variadic version of thread_lock_unblock() that does not assume td_lock
+ * is blocked.
*/
static inline void
thread_unblock_switch(struct thread *td, struct mtx *mtx)
@@ -1825,7 +1810,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
} else {
/* This thread must be going to sleep. */
TDQ_LOCK(tdq);
- mtx = thread_block_switch(td);
+ mtx = thread_lock_block(td);
tdq_load_rem(tdq, td);
}
/*
OpenPOWER on IntegriCloud