diff options
author | peter <peter@FreeBSD.org> | 2004-03-13 22:31:39 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2004-03-13 22:31:39 +0000 |
commit | 1cb95fd2b70283ee01393976b8c4f6582fe4c61f (patch) | |
tree | 889a20a7869b0ea5ed48e021f905fe415332a764 | |
parent | 43cfb3a55ff1241827aed24e87f2cd5f80566c2b (diff) | |
download | FreeBSD-src-1cb95fd2b70283ee01393976b8c4f6582fe4c61f.zip FreeBSD-src-1cb95fd2b70283ee01393976b8c4f6582fe4c61f.tar.gz |
Push Giant down a little further:
- no longer serialize on Giant for thread_single*() and family in fork,
exit and exec
- thread_wait() is mpsafe, assert no Giant
- reduce scope of Giant in exit to not cover thread_wait and just do
vm_waitproc().
- assert that thread_single() family are not called with Giant
- remove the DROP/PICKUP_GIANT macros from thread_single() family
- assert that thread_suspend_check() s not called with Giant
- remove manual drop_giant hack in thread_suspend_check since we know it
isn't held.
- remove the DROP/PICKUP_GIANT macros from thread_suspend_check() family
- mark kse_create() mpsafe
-rw-r--r-- | sys/kern/kern_exec.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 7 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 3 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 13 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 13 | ||||
-rw-r--r-- | sys/kern/subr_trap.c | 3 | ||||
-rw-r--r-- | sys/kern/syscalls.master | 2 |
7 files changed, 14 insertions, 29 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index d2f60d7..a09dba2 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -253,7 +253,6 @@ kern_execve(td, fname, argv, envv, mac_p) * that might allow a local user to illicitly obtain elevated * privileges. */ - mtx_lock(&Giant); PROC_LOCK(p); KASSERT((p->p_flag & P_INEXEC) == 0, ("%s(): process already has P_INEXEC flag", __func__)); @@ -271,7 +270,6 @@ kern_execve(td, fname, argv, envv, mac_p) td->td_mailbox = NULL; thread_single_end(); } - mtx_unlock(&Giant); p->p_flag |= P_INEXEC; PROC_UNLOCK(p); diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 885fb9a..885404e 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -137,7 +137,6 @@ exit1(struct thread *td, int rv) /* * MUST abort all other threads before proceeding past here. */ - mtx_lock(&Giant); PROC_LOCK(p); if (p->p_flag & P_SA || p->p_numthreads > 1) { /* @@ -160,9 +159,8 @@ exit1(struct thread *td, int rv) * from userret(). thread_exit() will unsuspend us * when the last other thread exits. */ - if (thread_single(SINGLE_EXIT)) { + if (thread_single(SINGLE_EXIT)) panic ("Exit: Single threading fouled up"); - } /* * All other activity in this process is now stopped. * Remove excess KSEs and KSEGRPS. XXXKSE (when we have them) @@ -172,7 +170,6 @@ exit1(struct thread *td, int rv) p->p_flag &= ~P_SA; thread_single_end(); /* Don't need this any more. */ } - mtx_unlock(&Giant); /* * With this state set: * Any thread entering the kernel from userspace will thread_exit() @@ -716,7 +713,6 @@ loop: /* * do any thread-system specific cleanups */ - mtx_lock(&Giant); thread_wait(p); /* @@ -724,6 +720,7 @@ loop: * to free anything that cpu_exit couldn't * release while still running in process context. */ + mtx_lock(&Giant); vm_waitproc(p); mtx_unlock(&Giant); #ifdef MAC diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index fd229c9..03d12bb 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -272,16 +272,13 @@ fork1(td, flags, pages, procp) * where they will try restart in the parent and will * be aborted in the child. */ - mtx_lock(&Giant); PROC_LOCK(p1); if (thread_single(SINGLE_NO_EXIT)) { /* Abort. Someone else is single threading before us. */ PROC_UNLOCK(p1); - mtx_unlock(&Giant); return (ERESTART); } PROC_UNLOCK(p1); - mtx_unlock(&Giant); /* * All other activity in this process * is now suspended at the user boundary, diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 037266a..1995204 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -1331,13 +1331,14 @@ thread_exit(void) /* * Do any thread specific cleanups that may be needed in wait() - * called with Giant held, proc and schedlock not held. + * called with Giant, proc and schedlock not held. */ void thread_wait(struct proc *p) { struct thread *td; + mtx_assert(&Giant, MA_NOTOWNED); KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()")); FOREACH_THREAD_IN_PROC(p, td) { @@ -1468,6 +1469,7 @@ kse_purge(struct proc *p, struct thread *td) void thread_alloc_spare(struct thread *td, struct thread *spare) { + if (td->td_standin) return; if (spare == NULL) @@ -1876,7 +1878,7 @@ thread_single(int force_exit) td = curthread; p = td->td_proc; - mtx_assert(&Giant, MA_OWNED); + mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((td != NULL), ("curthread is NULL")); @@ -1933,11 +1935,9 @@ thread_single(int force_exit) * In the mean time we suspend as well. */ thread_suspend_one(td); - DROP_GIANT(); PROC_UNLOCK(p); mi_switch(SW_VOL); mtx_unlock_spin(&sched_lock); - PICKUP_GIANT(); PROC_LOCK(p); mtx_lock_spin(&sched_lock); } @@ -1991,6 +1991,7 @@ thread_suspend_check(int return_instead) td = curthread; p = td->td_proc; + mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); while (P_SHOULDSTOP(p)) { if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { @@ -2016,8 +2017,6 @@ thread_suspend_check(int return_instead) * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. */ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { - while (mtx_owned(&Giant)) - mtx_unlock(&Giant); if (p->p_flag & P_SA) thread_exit(); else @@ -2035,11 +2034,9 @@ thread_suspend_check(int return_instead) thread_unsuspend_one(p->p_singlethread); } } - DROP_GIANT(); PROC_UNLOCK(p); mi_switch(SW_INVOL); mtx_unlock_spin(&sched_lock); - PICKUP_GIANT(); PROC_LOCK(p); } return (0); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 037266a..1995204 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -1331,13 +1331,14 @@ thread_exit(void) /* * Do any thread specific cleanups that may be needed in wait() - * called with Giant held, proc and schedlock not held. + * called with Giant, proc and schedlock not held. */ void thread_wait(struct proc *p) { struct thread *td; + mtx_assert(&Giant, MA_NOTOWNED); KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()")); FOREACH_THREAD_IN_PROC(p, td) { @@ -1468,6 +1469,7 @@ kse_purge(struct proc *p, struct thread *td) void thread_alloc_spare(struct thread *td, struct thread *spare) { + if (td->td_standin) return; if (spare == NULL) @@ -1876,7 +1878,7 @@ thread_single(int force_exit) td = curthread; p = td->td_proc; - mtx_assert(&Giant, MA_OWNED); + mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((td != NULL), ("curthread is NULL")); @@ -1933,11 +1935,9 @@ thread_single(int force_exit) * In the mean time we suspend as well. */ thread_suspend_one(td); - DROP_GIANT(); PROC_UNLOCK(p); mi_switch(SW_VOL); mtx_unlock_spin(&sched_lock); - PICKUP_GIANT(); PROC_LOCK(p); mtx_lock_spin(&sched_lock); } @@ -1991,6 +1991,7 @@ thread_suspend_check(int return_instead) td = curthread; p = td->td_proc; + mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); while (P_SHOULDSTOP(p)) { if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { @@ -2016,8 +2017,6 @@ thread_suspend_check(int return_instead) * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. */ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { - while (mtx_owned(&Giant)) - mtx_unlock(&Giant); if (p->p_flag & P_SA) thread_exit(); else @@ -2035,11 +2034,9 @@ thread_suspend_check(int return_instead) thread_unsuspend_one(p->p_singlethread); } } - DROP_GIANT(); PROC_UNLOCK(p); mi_switch(SW_INVOL); mtx_unlock_spin(&sched_lock); - PICKUP_GIANT(); PROC_LOCK(p); } return (0); diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index 3e40672..9d17208 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -114,9 +114,8 @@ userret(td, frame, oticks) /* * Do special thread processing, e.g. upcall tweaking and such. */ - if (p->p_flag & P_SA) { + if (p->p_flag & P_SA) thread_userret(td, frame); - } /* * Charge system time if profiling. diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master index 171f2d4..f23f056 100644 --- a/sys/kern/syscalls.master +++ b/sys/kern/syscalls.master @@ -540,7 +540,7 @@ int flags); } 379 MSTD { int kse_exit(void); } 380 MSTD { int kse_wakeup(struct kse_mailbox *mbx); } -381 STD { int kse_create(struct kse_mailbox *mbx, \ +381 MSTD { int kse_create(struct kse_mailbox *mbx, \ int newgroup); } 382 MSTD { int kse_thr_interrupt(struct kse_thr_mailbox *tmbx, int cmd, \ long data); } |