summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2004-10-06 00:40:41 +0000
committerdavidxu <davidxu@FreeBSD.org>2004-10-06 00:40:41 +0000
commit793ea9317eb55a19242d9f6648f3a2f128b8934c (patch)
tree432efd811bd18d95f48da3533a80d28fea1512ec /sys/kern
parentc2f56cb723c50c2fdc1fdc81aa886ca10d05c837 (diff)
downloadFreeBSD-src-793ea9317eb55a19242d9f6648f3a2f128b8934c.zip
FreeBSD-src-793ea9317eb55a19242d9f6648f3a2f128b8934c.tar.gz
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and a failed execve() would change threaded process to unthreaded, this side effect is unexpected. The new code introduces a new single threading mode SINGLE_BOUNDARY, in the mode, all threads should suspend themself at user boundary except the singler. we can not use SINGLE_NO_EXIT because we want to start from a clean state if execve() is successful, suspending other threads at unknown point and later resuming them from there and forcing them to exit at user boundary may cause the process to start from a dirty state. If execve() is successful, current thread upgrades to SINGLE_EXIT mode and forces other threads to suicide at user boundary, otherwise, other threads will be resumed and their interrupted syscall will be restarted. Reviewed by: julian
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_exec.c58
-rw-r--r--sys/kern/kern_thread.c83
2 files changed, 106 insertions, 35 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 77e7bc5..47b3a33 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -80,6 +80,8 @@ static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
static int kern_execve(struct thread *td, char *fname, char **argv,
char **envv, struct mac *mac_p);
+static int do_execve(struct thread *td, char *fname, char **argv,
+ char **envv, struct mac *mac_p);
/* XXX This should be vm_size_t. */
SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD,
@@ -205,6 +207,44 @@ __mac_execve(td, uap)
#endif
}
+static int
+kern_execve(td, fname, argv, envv, mac_p)
+ struct thread *td;
+ char *fname;
+ char **argv;
+ char **envv;
+ struct mac *mac_p;
+{
+ struct proc *p = td->td_proc;
+ int error;
+
+ if (p->p_flag & P_HADTHREADS) {
+ PROC_LOCK(p);
+ if (thread_single(SINGLE_BOUNDARY)) {
+ PROC_UNLOCK(p);
+ return (ERESTART); /* Try again later. */
+ }
+ PROC_UNLOCK(p);
+ }
+
+ error = do_execve(td, fname, argv, envv, mac_p);
+
+ if (p->p_flag & P_HADTHREADS) {
+ PROC_LOCK(p);
+ /*
+ * If success, we upgrade to SINGLE_EXIT state to
+ * force other threads to suicide.
+ */
+ if (error == 0)
+ thread_single(SINGLE_EXIT);
+ else
+ thread_single_end();
+ PROC_UNLOCK(p);
+ }
+
+ return (error);
+}
+
/*
* In-kernel implementation of execve(). All arguments are assumed to be
* userspace pointers from the passed thread.
@@ -212,7 +252,7 @@ __mac_execve(td, uap)
* MPSAFE
*/
static int
-kern_execve(td, fname, argv, envv, mac_p)
+do_execve(td, fname, argv, envv, mac_p)
struct thread *td;
char *fname;
char **argv;
@@ -254,16 +294,6 @@ kern_execve(td, fname, argv, envv, mac_p)
PROC_LOCK(p);
KASSERT((p->p_flag & P_INEXEC) == 0,
("%s(): process already has P_INEXEC flag", __func__));
- if (p->p_flag & P_HADTHREADS) {
- if (thread_single(SINGLE_EXIT)) {
- PROC_UNLOCK(p);
- return (ERESTART); /* Try again later. */
- }
- /*
- * If we get here all other threads are dead,
- * and threading mode has been turned off
- */
- }
p->p_flag |= P_INEXEC;
PROC_UNLOCK(p);
@@ -624,9 +654,13 @@ interpret:
/*
* If tracing the process, trap to debugger so breakpoints
* can be set before the program executes.
+ * Use tdsignal to deliver signal to current thread, use
+ * psignal may cause the signal to be delivered to wrong thread
+ * because that thread will exit, remember we are going to enter
+ * single thread mode.
*/
if (p->p_flag & P_TRACED)
- psignal(p, SIGTRAP);
+ tdsignal(td, SIGTRAP, SIGTARGET_TD);
/* clear "fork but no exec" flag, as we _are_ execing */
p->p_acflag &= ~AFORK;
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index e9e3900..835ca80 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -764,7 +764,7 @@ thread_unlink(struct thread *td)
* any sleeping threads that are interruptable. (PCATCH).
*/
int
-thread_single(int force_exit)
+thread_single(int mode)
{
struct thread *td;
struct thread *td2;
@@ -781,36 +781,53 @@ thread_single(int force_exit)
return (0);
/* Is someone already single threading? */
- if (p->p_singlethread)
+ if (p->p_singlethread != NULL && p->p_singlethread != td)
return (1);
+ if (mode == SINGLE_EXIT) {
+ p->p_flag |= P_SINGLE_EXIT;
+ p->p_flag &= ~P_SINGLE_BOUNDARY;
+ } else {
+ p->p_flag &= ~P_SINGLE_EXIT;
+ if (mode == SINGLE_BOUNDARY)
+ p->p_flag |= P_SINGLE_BOUNDARY;
+ else
+ p->p_flag &= ~P_SINGLE_BOUNDARY;
+ }
p->p_flag |= P_STOPPED_SINGLE;
mtx_lock_spin(&sched_lock);
p->p_singlethread = td;
- if (force_exit == SINGLE_EXIT) {
+ if (mode == SINGLE_EXIT)
remaining = p->p_numthreads;
- p->p_flag |= P_SINGLE_EXIT;
- } else {
+ else if (mode == SINGLE_BOUNDARY)
+ remaining = p->p_numthreads - p->p_boundary_count;
+ else
remaining = p->p_numthreads - p->p_suspcount;
- p->p_flag &= ~P_SINGLE_EXIT;
- }
while (remaining != 1) {
FOREACH_THREAD_IN_PROC(p, td2) {
if (td2 == td)
continue;
td2->td_flags |= TDF_ASTPENDING;
if (TD_IS_INHIBITED(td2)) {
- if (force_exit == SINGLE_EXIT) {
+ switch (mode) {
+ case SINGLE_EXIT:
if (td->td_flags & TDF_DBSUSPEND)
td->td_flags &= ~TDF_DBSUSPEND;
- if (TD_IS_SUSPENDED(td2)) {
+ if (TD_IS_SUSPENDED(td2))
thread_unsuspend_one(td2);
- }
if (TD_ON_SLEEPQ(td2) &&
- (td2->td_flags & TDF_SINTR)) {
+ (td2->td_flags & TDF_SINTR))
sleepq_abort(td2);
- }
- } else {
+ break;
+ case SINGLE_BOUNDARY:
+ if (TD_IS_SUSPENDED(td2) &&
+ !(td2->td_flags & TDF_BOUNDARY))
+ thread_unsuspend_one(td2);
+ if (TD_ON_SLEEPQ(td2) &&
+ (td2->td_flags & TDF_SINTR))
+ sleepq_abort(td2);
+ break;
+ default:
if (TD_IS_SUSPENDED(td2))
continue;
/*
@@ -821,11 +838,14 @@ thread_single(int force_exit)
if (td2->td_inhibitors &
(TDI_SLEEPING | TDI_SWAPPED))
thread_suspend_one(td2);
+ break;
}
}
}
- if (force_exit == SINGLE_EXIT)
+ if (mode == SINGLE_EXIT)
remaining = p->p_numthreads;
+ else if (mode == SINGLE_BOUNDARY)
+ remaining = p->p_numthreads - p->p_boundary_count;
else
remaining = p->p_numthreads - p->p_suspcount;
@@ -845,12 +865,14 @@ thread_single(int force_exit)
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
- if (force_exit == SINGLE_EXIT)
+ if (mode == SINGLE_EXIT)
remaining = p->p_numthreads;
+ else if (mode == SINGLE_BOUNDARY)
+ remaining = p->p_numthreads - p->p_boundary_count;
else
remaining = p->p_numthreads - p->p_suspcount;
}
- if (force_exit == SINGLE_EXIT) {
+ if (mode == SINGLE_EXIT) {
/*
* We have gotten rid of all the other threads and we
* are about to either exit or exec. In either case,
@@ -925,6 +947,11 @@ thread_suspend_check(int return_instead)
if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
return (1);
+ /* Should we goto user boundary if we didn't come from there? */
+ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
+ (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
+ return (1);
+
mtx_lock_spin(&sched_lock);
thread_stopped(p);
/*
@@ -932,9 +959,8 @@ thread_suspend_check(int return_instead)
* this thread should just suicide.
* Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
*/
- if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
+ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
thread_exit();
- }
/*
* When a thread suspends, it just
@@ -942,13 +968,20 @@ thread_suspend_check(int return_instead)
* and stays there.
*/
thread_suspend_one(td);
+ if (return_instead == 0) {
+ p->p_boundary_count++;
+ td->td_flags |= TDF_BOUNDARY;
+ }
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
- if (p->p_numthreads == p->p_suspcount) {
+ if (p->p_numthreads == p->p_suspcount)
thread_unsuspend_one(p->p_singlethread);
- }
}
PROC_UNLOCK(p);
mi_switch(SW_INVOL, NULL);
+ if (return_instead == 0) {
+ p->p_boundary_count--;
+ td->td_flags &= ~TDF_BOUNDARY;
+ }
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
}
@@ -1026,7 +1059,7 @@ thread_single_end(void)
td = curthread;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
- p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
+ p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
mtx_lock_spin(&sched_lock);
p->p_singlethread = NULL;
/*
@@ -1055,8 +1088,12 @@ thread_sleep_check(struct thread *td)
p = td->td_proc;
mtx_assert(&sched_lock, MA_OWNED);
if (p->p_flag & P_HADTHREADS) {
- if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td)
- return (EINTR);
+ if (p->p_singlethread != td) {
+ if (p->p_flag & P_SINGLE_EXIT)
+ return (EINTR);
+ if (p->p_flag & P_SINGLE_BOUNDARY)
+ return (ERESTART);
+ }
if (td->td_flags & TDF_INTERRUPT)
return (td->td_intrval);
}
OpenPOWER on IntegriCloud