summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_thread.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-09-03 08:18:07 +0000
committerkib <kib@FreeBSD.org>2014-09-03 08:18:07 +0000
commit401c0a1c8ff088805d19a448e538e6d6608fedf1 (patch)
tree9484e9ecd7f4377d18ac5c5b4bbd8350c398ad50 /sys/kern/kern_thread.c
parente58a48ab6d3934b371fcb4e8f9a3161b64f2b8dc (diff)
downloadFreeBSD-src-401c0a1c8ff088805d19a448e538e6d6608fedf1.zip
FreeBSD-src-401c0a1c8ff088805d19a448e538e6d6608fedf1.tar.gz
Right now, thread_single(SINGLE_EXIT) returns after the p_numthreads
reaches 1. The p_numthreads counter is decremented in thread_exit() by a call to thread_unlink(). This means that the exiting threads may still execute on other CPUs when thread_single(SINGLE_EXIT) returns. As result, vmspace could be destroyed while paging structures are still used on other CPUs by exiting threads. Delay the return from thread_single(SINGLE_EXIT) until all threads are really destroyed by thread_stash() after the last switch out. The p_exitthreads counter already provides the required mechanism, move the wait from the thread_wait() (which is called from wait(2) code) into thread_single(). Reported by: many (as "panic: pmap active <addr>") Reviewed by: alc, jhb Tested by: pho Sponsored by: The FreeBSD Foundation MFC after: 1 week
Diffstat (limited to 'sys/kern/kern_thread.c')
-rw-r--r--sys/kern/kern_thread.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 05b07ff..b132510 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -432,6 +432,7 @@ thread_exit(void)
*/
if (p->p_flag & P_HADTHREADS) {
if (p->p_numthreads > 1) {
+ atomic_add_int(&td->td_proc->p_exitthreads, 1);
thread_unlink(td);
td2 = FIRST_THREAD_IN_PROC(p);
sched_exit_thread(td2, td);
@@ -452,7 +453,6 @@ thread_exit(void)
}
}
- atomic_add_int(&td->td_proc->p_exitthreads, 1);
PCPU_SET(deadthread, td);
} else {
/*
@@ -507,14 +507,12 @@ thread_wait(struct proc *p)
struct thread *td;
mtx_assert(&Giant, MA_NOTOWNED);
- KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
+ KASSERT((p->p_numthreads == 1), ("multiple threads in thread_wait()"));
+ KASSERT((p->p_exitthreads == 0), ("p_exitthreads leaking"));
td = FIRST_THREAD_IN_PROC(p);
/* Lock the last thread so we spin until it exits cpu_throw(). */
thread_lock(td);
thread_unlock(td);
- /* Wait for any remaining threads to exit cpu_throw(). */
- while (p->p_exitthreads)
- sched_relinquish(curthread);
lock_profile_thread_exit(td);
cpuset_rel(td->td_cpuset);
td->td_cpuset = NULL;
@@ -722,6 +720,17 @@ stopme:
p->p_singlethread = NULL;
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
thread_unthread(td);
+
+ /*
+ * Wait for any remaining threads to exit cpu_throw().
+ */
+ while (p->p_exitthreads != 0) {
+ PROC_SUNLOCK(p);
+ PROC_UNLOCK(p);
+ sched_relinquish(td);
+ PROC_LOCK(p);
+ PROC_SLOCK(p);
+ }
}
PROC_SUNLOCK(p);
return (0);
OpenPOWER on IntegriCloud