From 9a7f4738f4b1cfdb4d140e79f2f45b83f0c37aa5 Mon Sep 17 00:00:00 2001 From: attilio Date: Sat, 23 Jan 2010 15:54:21 +0000 Subject: - Fix a race in sched_switch() of sched_4bsd. In the case of the thread being on a sleepqueue or a turnstile, the sched_lock was acquired (without the aid of the td_lock interface) and the td_lock was dropped. This was going to break locking rules on other threads willing to access to the thread (via the td_lock interface) and modify his flags (allowed as long as the container lock was different by the one used in sched_switch). In order to prevent this situation, while sched_lock is acquired there the td_lock gets blocked. [0] - Merge the ULE's internal function thread_block_switch() into the global thread_lock_block() and make the former semantic as the default for thread_lock_block(). This means that thread_lock_block() will not disable interrupts when called (and consequently thread_unlock_block() will not re-enabled them when called). This should be done manually when necessary. Note, however, that ULE's thread_unblock_switch() is not reaped because it does reflect a difference in semantic due in ULE (the td_lock may not be necessarilly still blocked_lock when calling this). While asymmetric, it does describe a remarkable difference in semantic that is good to keep in mind. [0] Reported by: Kohji Okuno Tested by: Giovanni Trematerra MFC: 2 weeks --- sys/kern/sched_ule.c | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) (limited to 'sys/kern/sched_ule.c') diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 43c9a86..f29dd95 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -301,7 +301,6 @@ static int sched_pickcpu(struct thread *, int); static void sched_balance(void); static int sched_balance_pair(struct tdq *, struct tdq *); static inline struct tdq *sched_setcpu(struct thread *, int, int); -static inline struct mtx *thread_block_switch(struct thread *); static inline void thread_unblock_switch(struct thread *, struct mtx *); static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS); @@ -1106,9 +1105,11 @@ sched_setcpu(struct thread *td, int cpu, int flags) * The hard case, migration, we need to block the thread first to * prevent order reversals with other cpus locks. */ + spinlock_enter(); thread_lock_block(td); TDQ_LOCK(tdq); thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); + spinlock_exit(); return (tdq); } @@ -1715,23 +1716,6 @@ sched_unlend_user_prio(struct thread *td, u_char prio) } /* - * Block a thread for switching. Similar to thread_block() but does not - * bump the spin count. - */ -static inline struct mtx * -thread_block_switch(struct thread *td) -{ - struct mtx *lock; - - THREAD_LOCK_ASSERT(td, MA_OWNED); - lock = td->td_lock; - td->td_lock = &blocked_lock; - mtx_unlock_spin(lock); - - return (lock); -} - -/* * Handle migration from sched_switch(). This happens only for * cpu binding. */ @@ -1749,7 +1733,7 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) * not holding either run-queue lock. */ spinlock_enter(); - thread_block_switch(td); /* This releases the lock on tdq. */ + thread_lock_block(td); /* This releases the lock on tdq. */ /* * Acquire both run-queue locks before placing the thread on the new @@ -1769,7 +1753,8 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) } /* - * Release a thread that was blocked with thread_block_switch(). + * Variadic version of thread_lock_unblock() that does not assume td_lock + * is blocked. */ static inline void thread_unblock_switch(struct thread *td, struct mtx *mtx) @@ -1825,7 +1810,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) } else { /* This thread must be going to sleep. */ TDQ_LOCK(tdq); - mtx = thread_block_switch(td); + mtx = thread_lock_block(td); tdq_load_rem(tdq, td); } /* -- cgit v1.1