summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_thread.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-12 07:24:46 +0000
committerjeff <jeff@FreeBSD.org>2007-06-12 07:24:46 +0000
commit49712c9a601c8ab93eac4b8f8cc221c287a68719 (patch)
tree871d4bb36f57e3d092d9b82e1e69c5c1633eccea /sys/kern/kern_thread.c
parentaa6c27d81763f98c4700e9a4f929f862980b366e (diff)
downloadFreeBSD-src-49712c9a601c8ab93eac4b8f8cc221c287a68719.zip
FreeBSD-src-49712c9a601c8ab93eac4b8f8cc221c287a68719.tar.gz
Solve a complex exit race introduced with thread_lock:
- Add a count of exiting threads, p_exitthreads, to struct proc. - Increment p_exithreads when we set the deadthread in thread_exit(). - When we thread_stash() a deadthread use an atomic to drop the count. - Spin until the p_exithreads count reaches 0 in thread_wait(). - Lock the last exiting thread momentarily to be certain that it has exited cpu_throw(). - Restructure thread_wait(). It does not need a loop as there will only ever be one thread. Tested by: moose@opera.com Reported by: kris, moose@opera.com
Diffstat (limited to 'sys/kern/kern_thread.c')
-rw-r--r--sys/kern/kern_thread.c48
1 files changed, 33 insertions, 15 deletions
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 9c1c02d..abbac4a 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -73,6 +73,8 @@ TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
struct mtx zombie_lock;
MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
+static void thread_zombie(struct thread *);
+
#ifdef KSE
static int
sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
@@ -248,11 +250,11 @@ threadinit(void)
}
/*
- * Stash an embarasingly extra thread into the zombie thread queue.
+ * Place an unused thread on the zombie list.
* Use the slpq as that must be unused by now.
*/
void
-thread_stash(struct thread *td)
+thread_zombie(struct thread *td)
{
mtx_lock_spin(&zombie_lock);
TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
@@ -260,6 +262,16 @@ thread_stash(struct thread *td)
}
/*
+ * Release a thread that has exited after cpu_throw().
+ */
+void
+thread_stash(struct thread *td)
+{
+ atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
+ thread_zombie(td);
+}
+
+/*
* Reap zombie kse resource.
*/
void
@@ -371,7 +383,7 @@ thread_exit(void)
* Note that we don't need to free the cred here as it
* is done in thread_reap().
*/
- thread_stash(td->td_standin);
+ thread_zombie(td->td_standin);
td->td_standin = NULL;
}
#endif
@@ -440,6 +452,7 @@ thread_exit(void)
*/
upcall_remove(td);
#endif
+ atomic_add_int(&td->td_proc->p_exitthreads, 1);
PCPU_SET(deadthread, td);
} else {
/*
@@ -481,20 +494,25 @@ thread_wait(struct proc *p)
mtx_assert(&Giant, MA_NOTOWNED);
KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
- FOREACH_THREAD_IN_PROC(p, td) {
+ td = FIRST_THREAD_IN_PROC(p);
#ifdef KSE
- if (td->td_standin != NULL) {
- if (td->td_standin->td_ucred != NULL) {
- crfree(td->td_standin->td_ucred);
- td->td_standin->td_ucred = NULL;
- }
- thread_free(td->td_standin);
- td->td_standin = NULL;
+ if (td->td_standin != NULL) {
+ if (td->td_standin->td_ucred != NULL) {
+ crfree(td->td_standin->td_ucred);
+ td->td_standin->td_ucred = NULL;
}
-#endif
- cpu_thread_clean(td);
- crfree(td->td_ucred);
+ thread_free(td->td_standin);
+ td->td_standin = NULL;
}
+#endif
+ /* Lock the last thread so we spin until it exits cpu_throw(). */
+ thread_lock(td);
+ thread_unlock(td);
+ /* Wait for any remaining threads to exit cpu_throw(). */
+ while (p->p_exitthreads)
+ sched_relinquish(curthread);
+ cpu_thread_clean(td);
+ crfree(td->td_ucred);
thread_reap(); /* check for zombie threads etc. */
}
@@ -548,7 +566,7 @@ thread_unthread(struct thread *td)
td->td_mailbox = NULL;
td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
if (td->td_standin != NULL) {
- thread_stash(td->td_standin);
+ thread_zombie(td->td_standin);
td->td_standin = NULL;
}
sched_set_concurrency(p, 1);
OpenPOWER on IntegriCloud