summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2004-07-13 07:20:10 +0000
committerdavidxu <davidxu@FreeBSD.org>2004-07-13 07:20:10 +0000
commit1920ad199e7dd23d7b308c9caa9ed999e2947329 (patch)
tree7b594e9c78fb4c5b7c6d232be655c25bb67cc072 /sys/kern
parent51b37935456677085e7e877224559238690d6a50 (diff)
downloadFreeBSD-src-1920ad199e7dd23d7b308c9caa9ed999e2947329.zip
FreeBSD-src-1920ad199e7dd23d7b308c9caa9ed999e2947329.tar.gz
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel thread current user thread is running on. Add tm_dflags into kse_thr_mailbox, the flags is written by debugger, it tells UTS and kernel what should be done when the process is being debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER. TMDF_SSTEP is used to tell kernel to turn on single stepping, or turn off if it is not set. TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall whenever possible, to UTS, it means do not run the user thread until debugger clears it, this behaviour is necessary because gdb wants to resume only one thread when the thread's pc is at a breakpoint, and thread needs to go forward, in order to avoid other threads sneak pass the breakpoints, it needs to remove breakpoint, only wants one thread to go. Also, add km_lwp to kse_mailbox, the lwp id is copied to kse_thr_mailbox at context switch time when process is not being debugged, so when process is attached, debugger can map kernel thread to user thread. 2. Add p_xthread to proc strcuture and td_xsig to thread structure. p_xthread is used by a thread when it wants to report event to debugger, every thread can set the pointer, especially, when it is used in ptracestop, it is the last thread reporting event will win the race. Every thread has a td_xsig to exchange signal with debugger, thread uses TDF_XSIG flag to indicate it is reporting signal to debugger, if the flag is not cleared, thread will keep retrying until it is cleared by debugger, p_xthread may be used by debugger to indicate CURRENT thread. The p_xstat is still in proc structure to keep wait() to work, in future, we may just use td_xsig. 3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend a thread. When process stops, debugger can set the flag for thread, thread will check the flag in thread_suspend_check, enters a loop, unless it is cleared by debugger, process is detached or process is existing. The flag is also checked in ptracestop, so debugger can temporarily suspend a thread even if the thread wants to exchange signal. 4. Current, in ptrace, we always resume all threads, but if a thread has already a TDF_DBSUSPEND flag set by debugger, it won't run. Encouraged by: marcel, julian, deischen
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_exit.c2
-rw-r--r--sys/kern/kern_sig.c125
-rw-r--r--sys/kern/kern_thread.c5
3 files changed, 86 insertions, 46 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 1db90ab..8efb312 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -427,7 +427,7 @@ retry:
mtx_lock(&Giant);
PROC_LOCK(p);
p->p_xstat = rv;
- p->p_xlwpid = td->td_tid;
+ p->p_xthread = td;
*p->p_ru = p->p_stats->p_ru;
mtx_lock_spin(&sched_lock);
calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index bbc9e28..371e62d 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1489,23 +1489,16 @@ trapsignal(struct thread *td, int sig, u_long code)
if (td->td_mailbox == NULL)
thread_user_enter(p, td);
PROC_LOCK(p);
- if (td->td_mailbox) {
- SIGDELSET(td->td_sigmask, sig);
- mtx_lock_spin(&sched_lock);
- /*
- * Force scheduling an upcall, so UTS has chance to
- * process the signal before thread runs again in
- * userland.
- */
- if (td->td_upcall)
- td->td_upcall->ku_flags |= KUF_DOUPCALL;
- mtx_unlock_spin(&sched_lock);
- } else {
- /* UTS caused a sync signal */
- p->p_code = code; /* XXX for core dump/debugger */
- p->p_sig = sig; /* XXX to verify code */
- sigexit(td, sig);
- }
+ SIGDELSET(td->td_sigmask, sig);
+ mtx_lock_spin(&sched_lock);
+ /*
+ * Force scheduling an upcall, so UTS has chance to
+ * process the signal before thread runs again in
+ * userland.
+ */
+ if (td->td_upcall)
+ td->td_upcall->ku_flags |= KUF_DOUPCALL;
+ mtx_unlock_spin(&sched_lock);
} else {
PROC_LOCK(p);
}
@@ -1523,17 +1516,23 @@ trapsignal(struct thread *td, int sig, u_long code)
(*p->p_sysent->sv_sendsig)(
ps->ps_sigact[_SIG_IDX(sig)], sig,
&td->td_sigmask, code);
- else {
+ else if (td->td_mailbox == NULL) {
+ mtx_unlock(&ps->ps_mtx);
+ /* UTS caused a sync signal */
+ p->p_code = code; /* XXX for core dump/debugger */
+ p->p_sig = sig; /* XXX to verify code */
+ sigexit(td, sig);
+ } else {
cpu_thread_siginfo(sig, code, &siginfo);
mtx_unlock(&ps->ps_mtx);
+ SIGADDSET(td->td_sigmask, sig);
PROC_UNLOCK(p);
error = copyout(&siginfo, &td->td_mailbox->tm_syncsig,
sizeof(siginfo));
PROC_LOCK(p);
/* UTS memory corrupted */
if (error)
- sigexit(td, SIGILL);
- SIGADDSET(td->td_sigmask, sig);
+ sigexit(td, SIGSEGV);
mtx_lock(&ps->ps_mtx);
}
SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
@@ -1882,7 +1881,7 @@ do_tdsignal(struct thread *td, int sig, sigtarget_t target)
goto out;
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
- p->p_xlwpid = td->td_tid;
+ p->p_xthread = td;
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td0) {
if (TD_IS_SLEEPING(td0) &&
@@ -2002,28 +2001,63 @@ tdsigwakeup(struct thread *td, int sig, sig_t action)
}
}
-void
+int
ptracestop(struct thread *td, int sig)
{
struct proc *p = td->td_proc;
+ struct thread *td0;
PROC_LOCK_ASSERT(p, MA_OWNED);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
&p->p_mtx.mtx_object, "Stopping for traced signal");
- p->p_xstat = sig;
- p->p_xlwpid = td->td_tid;
- PROC_LOCK(p->p_pptr);
- psignal(p->p_pptr, SIGCHLD);
- PROC_UNLOCK(p->p_pptr);
- stop(p);
mtx_lock_spin(&sched_lock);
- thread_suspend_one(td);
- PROC_UNLOCK(p);
- DROP_GIANT();
- mi_switch(SW_INVOL, NULL);
+ td->td_flags |= TDF_XSIG;
mtx_unlock_spin(&sched_lock);
- PICKUP_GIANT();
+ td->td_xsig = sig;
+ while ((p->p_flag & P_TRACED) && (td->td_flags & TDF_XSIG)) {
+ if (p->p_flag & P_SINGLE_EXIT) {
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_XSIG;
+ mtx_unlock_spin(&sched_lock);
+ return (sig);
+ }
+ /*
+ * Just make wait() to work, the last stopped thread
+ * will win.
+ */
+ p->p_xstat = sig;
+ p->p_xthread = td;
+ p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE);
+ mtx_lock_spin(&sched_lock);
+ FOREACH_THREAD_IN_PROC(p, td0) {
+ if (TD_IS_SLEEPING(td0) &&
+ (td0->td_flags & TDF_SINTR) &&
+ !TD_IS_SUSPENDED(td0)) {
+ thread_suspend_one(td0);
+ } else if (td != td0) {
+ td0->td_flags |= TDF_ASTPENDING;
+ }
+ }
+stopme:
+ thread_stopped(p);
+ thread_suspend_one(td);
+ PROC_UNLOCK(p);
+ DROP_GIANT();
+ mi_switch(SW_VOL, NULL);
+ mtx_unlock_spin(&sched_lock);
+ PICKUP_GIANT();
+ PROC_LOCK(p);
+ if (!(p->p_flag & P_TRACED))
+ break;
+ if (td->td_flags & TDF_DBSUSPEND) {
+ if (p->p_flag & P_SINGLE_EXIT)
+ break;
+ mtx_lock_spin(&sched_lock);
+ goto stopme;
+ }
+ }
+ return (td->td_xsig);
}
/*
@@ -2045,7 +2079,7 @@ issignal(td)
struct proc *p;
struct sigacts *ps;
sigset_t sigpending;
- int sig, prop;
+ int sig, prop, newsig;
struct thread *td0;
p = td->td_proc;
@@ -2076,6 +2110,8 @@ issignal(td)
*/
if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
SIGDELSET(td->td_siglist, sig);
+ if (td->td_pflags & TDP_SA)
+ SIGADDSET(td->td_sigmask, sig);
continue;
}
if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
@@ -2083,8 +2119,7 @@ issignal(td)
* If traced, always stop.
*/
mtx_unlock(&ps->ps_mtx);
- ptracestop(td, sig);
- PROC_LOCK(p);
+ newsig = ptracestop(td, sig);
mtx_lock(&ps->ps_mtx);
/*
@@ -2093,10 +2128,11 @@ issignal(td)
* otherwise we just look for signals again.
*/
SIGDELSET(td->td_siglist, sig); /* clear old signal */
- sig = p->p_xstat;
- if (sig == 0)
+ if (td->td_pflags & TDP_SA)
+ SIGADDSET(td->td_sigmask, sig);
+ if (newsig == 0)
continue;
-
+ sig = newsig;
/*
* If the traced bit got turned off, go back up
* to the top to rescan signals. This ensures
@@ -2110,6 +2146,8 @@ issignal(td)
* signal is being masked, look for other signals.
*/
SIGADDSET(td->td_siglist, sig);
+ if (td->td_pflags & TDP_SA)
+ SIGDELSET(td->td_sigmask, sig);
if (SIGISMEMBER(td->td_sigmask, sig))
continue;
signotify(td);
@@ -2156,7 +2194,7 @@ issignal(td)
&p->p_mtx.mtx_object, "Catching SIGSTOP");
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
- p->p_xlwpid = td->td_tid;
+ p->p_xthread = td;
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td0) {
if (TD_IS_SLEEPING(td0) &&
@@ -2289,8 +2327,7 @@ postsig(sig)
mtx_lock(&ps->ps_mtx);
}
- if (!(td->td_pflags & TDP_SA && td->td_mailbox) &&
- action == SIG_DFL) {
+ if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) {
/*
* Default action, where the default is to kill
* the process. (Other cases were ignored above.)
@@ -2299,7 +2336,7 @@ postsig(sig)
sigexit(td, sig);
/* NOTREACHED */
} else {
- if (td->td_pflags & TDP_SA && td->td_mailbox) {
+ if (td->td_pflags & TDP_SA) {
if (sig == SIGKILL) {
mtx_unlock(&ps->ps_mtx);
sigexit(td, sig);
@@ -2348,7 +2385,7 @@ postsig(sig)
p->p_code = 0;
p->p_sig = 0;
}
- if (td->td_pflags & TDP_SA && td->td_mailbox)
+ if (td->td_pflags & TDP_SA)
thread_signal_add(curthread, sig);
else
(*p->p_sysent->sv_sendsig)(action, sig,
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 4b7bd47..def5412 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -870,6 +870,8 @@ thread_single(int force_exit)
td2->td_flags |= TDF_ASTPENDING;
if (TD_IS_INHIBITED(td2)) {
if (force_exit == SINGLE_EXIT) {
+ if (td->td_flags & TDF_DBSUSPEND)
+ td->td_flags &= ~TDF_DBSUSPEND;
if (TD_IS_SUSPENDED(td2)) {
thread_unsuspend_one(td2);
}
@@ -969,7 +971,8 @@ thread_suspend_check(int return_instead)
p = td->td_proc;
mtx_assert(&Giant, MA_NOTOWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
- while (P_SHOULDSTOP(p)) {
+ while (P_SHOULDSTOP(p) ||
+ ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
KASSERT(p->p_singlethread != NULL,
("singlethread not set"));
OpenPOWER on IntegriCloud