summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2004-07-13 07:20:10 +0000
committerdavidxu <davidxu@FreeBSD.org>2004-07-13 07:20:10 +0000
commit1920ad199e7dd23d7b308c9caa9ed999e2947329 (patch)
tree7b594e9c78fb4c5b7c6d232be655c25bb67cc072
parent51b37935456677085e7e877224559238690d6a50 (diff)
downloadFreeBSD-src-1920ad199e7dd23d7b308c9caa9ed999e2947329.zip
FreeBSD-src-1920ad199e7dd23d7b308c9caa9ed999e2947329.tar.gz
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel thread current user thread is running on. Add tm_dflags into kse_thr_mailbox, the flags is written by debugger, it tells UTS and kernel what should be done when the process is being debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER. TMDF_SSTEP is used to tell kernel to turn on single stepping, or turn off if it is not set. TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall whenever possible, to UTS, it means do not run the user thread until debugger clears it, this behaviour is necessary because gdb wants to resume only one thread when the thread's pc is at a breakpoint, and thread needs to go forward, in order to avoid other threads sneak pass the breakpoints, it needs to remove breakpoint, only wants one thread to go. Also, add km_lwp to kse_mailbox, the lwp id is copied to kse_thr_mailbox at context switch time when process is not being debugged, so when process is attached, debugger can map kernel thread to user thread. 2. Add p_xthread to proc strcuture and td_xsig to thread structure. p_xthread is used by a thread when it wants to report event to debugger, every thread can set the pointer, especially, when it is used in ptracestop, it is the last thread reporting event will win the race. Every thread has a td_xsig to exchange signal with debugger, thread uses TDF_XSIG flag to indicate it is reporting signal to debugger, if the flag is not cleared, thread will keep retrying until it is cleared by debugger, p_xthread may be used by debugger to indicate CURRENT thread. The p_xstat is still in proc structure to keep wait() to work, in future, we may just use td_xsig. 3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend a thread. When process stops, debugger can set the flag for thread, thread will check the flag in thread_suspend_check, enters a loop, unless it is cleared by debugger, process is detached or process is existing. The flag is also checked in ptracestop, so debugger can temporarily suspend a thread even if the thread wants to exchange signal. 4. Current, in ptrace, we always resume all threads, but if a thread has already a TDF_DBSUSPEND flag set by debugger, it won't run. Encouraged by: marcel, julian, deischen
-rw-r--r--sys/kern/kern_exit.c2
-rw-r--r--sys/kern/kern_sig.c125
-rw-r--r--sys/kern/kern_thread.c5
-rw-r--r--sys/sys/kse.h52
-rw-r--r--sys/sys/proc.h7
-rw-r--r--sys/sys/signalvar.h2
6 files changed, 123 insertions, 70 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 1db90ab..8efb312 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -427,7 +427,7 @@ retry:
mtx_lock(&Giant);
PROC_LOCK(p);
p->p_xstat = rv;
- p->p_xlwpid = td->td_tid;
+ p->p_xthread = td;
*p->p_ru = p->p_stats->p_ru;
mtx_lock_spin(&sched_lock);
calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index bbc9e28..371e62d 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1489,23 +1489,16 @@ trapsignal(struct thread *td, int sig, u_long code)
if (td->td_mailbox == NULL)
thread_user_enter(p, td);
PROC_LOCK(p);
- if (td->td_mailbox) {
- SIGDELSET(td->td_sigmask, sig);
- mtx_lock_spin(&sched_lock);
- /*
- * Force scheduling an upcall, so UTS has chance to
- * process the signal before thread runs again in
- * userland.
- */
- if (td->td_upcall)
- td->td_upcall->ku_flags |= KUF_DOUPCALL;
- mtx_unlock_spin(&sched_lock);
- } else {
- /* UTS caused a sync signal */
- p->p_code = code; /* XXX for core dump/debugger */
- p->p_sig = sig; /* XXX to verify code */
- sigexit(td, sig);
- }
+ SIGDELSET(td->td_sigmask, sig);
+ mtx_lock_spin(&sched_lock);
+ /*
+ * Force scheduling an upcall, so UTS has chance to
+ * process the signal before thread runs again in
+ * userland.
+ */
+ if (td->td_upcall)
+ td->td_upcall->ku_flags |= KUF_DOUPCALL;
+ mtx_unlock_spin(&sched_lock);
} else {
PROC_LOCK(p);
}
@@ -1523,17 +1516,23 @@ trapsignal(struct thread *td, int sig, u_long code)
(*p->p_sysent->sv_sendsig)(
ps->ps_sigact[_SIG_IDX(sig)], sig,
&td->td_sigmask, code);
- else {
+ else if (td->td_mailbox == NULL) {
+ mtx_unlock(&ps->ps_mtx);
+ /* UTS caused a sync signal */
+ p->p_code = code; /* XXX for core dump/debugger */
+ p->p_sig = sig; /* XXX to verify code */
+ sigexit(td, sig);
+ } else {
cpu_thread_siginfo(sig, code, &siginfo);
mtx_unlock(&ps->ps_mtx);
+ SIGADDSET(td->td_sigmask, sig);
PROC_UNLOCK(p);
error = copyout(&siginfo, &td->td_mailbox->tm_syncsig,
sizeof(siginfo));
PROC_LOCK(p);
/* UTS memory corrupted */
if (error)
- sigexit(td, SIGILL);
- SIGADDSET(td->td_sigmask, sig);
+ sigexit(td, SIGSEGV);
mtx_lock(&ps->ps_mtx);
}
SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
@@ -1882,7 +1881,7 @@ do_tdsignal(struct thread *td, int sig, sigtarget_t target)
goto out;
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
- p->p_xlwpid = td->td_tid;
+ p->p_xthread = td;
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td0) {
if (TD_IS_SLEEPING(td0) &&
@@ -2002,28 +2001,63 @@ tdsigwakeup(struct thread *td, int sig, sig_t action)
}
}
-void
+int
ptracestop(struct thread *td, int sig)
{
struct proc *p = td->td_proc;
+ struct thread *td0;
PROC_LOCK_ASSERT(p, MA_OWNED);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
&p->p_mtx.mtx_object, "Stopping for traced signal");
- p->p_xstat = sig;
- p->p_xlwpid = td->td_tid;
- PROC_LOCK(p->p_pptr);
- psignal(p->p_pptr, SIGCHLD);
- PROC_UNLOCK(p->p_pptr);
- stop(p);
mtx_lock_spin(&sched_lock);
- thread_suspend_one(td);
- PROC_UNLOCK(p);
- DROP_GIANT();
- mi_switch(SW_INVOL, NULL);
+ td->td_flags |= TDF_XSIG;
mtx_unlock_spin(&sched_lock);
- PICKUP_GIANT();
+ td->td_xsig = sig;
+ while ((p->p_flag & P_TRACED) && (td->td_flags & TDF_XSIG)) {
+ if (p->p_flag & P_SINGLE_EXIT) {
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_XSIG;
+ mtx_unlock_spin(&sched_lock);
+ return (sig);
+ }
+ /*
+ * Just make wait() to work, the last stopped thread
+ * will win.
+ */
+ p->p_xstat = sig;
+ p->p_xthread = td;
+ p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE);
+ mtx_lock_spin(&sched_lock);
+ FOREACH_THREAD_IN_PROC(p, td0) {
+ if (TD_IS_SLEEPING(td0) &&
+ (td0->td_flags & TDF_SINTR) &&
+ !TD_IS_SUSPENDED(td0)) {
+ thread_suspend_one(td0);
+ } else if (td != td0) {
+ td0->td_flags |= TDF_ASTPENDING;
+ }
+ }
+stopme:
+ thread_stopped(p);
+ thread_suspend_one(td);
+ PROC_UNLOCK(p);
+ DROP_GIANT();
+ mi_switch(SW_VOL, NULL);
+ mtx_unlock_spin(&sched_lock);
+ PICKUP_GIANT();
+ PROC_LOCK(p);
+ if (!(p->p_flag & P_TRACED))
+ break;
+ if (td->td_flags & TDF_DBSUSPEND) {
+ if (p->p_flag & P_SINGLE_EXIT)
+ break;
+ mtx_lock_spin(&sched_lock);
+ goto stopme;
+ }
+ }
+ return (td->td_xsig);
}
/*
@@ -2045,7 +2079,7 @@ issignal(td)
struct proc *p;
struct sigacts *ps;
sigset_t sigpending;
- int sig, prop;
+ int sig, prop, newsig;
struct thread *td0;
p = td->td_proc;
@@ -2076,6 +2110,8 @@ issignal(td)
*/
if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
SIGDELSET(td->td_siglist, sig);
+ if (td->td_pflags & TDP_SA)
+ SIGADDSET(td->td_sigmask, sig);
continue;
}
if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
@@ -2083,8 +2119,7 @@ issignal(td)
* If traced, always stop.
*/
mtx_unlock(&ps->ps_mtx);
- ptracestop(td, sig);
- PROC_LOCK(p);
+ newsig = ptracestop(td, sig);
mtx_lock(&ps->ps_mtx);
/*
@@ -2093,10 +2128,11 @@ issignal(td)
* otherwise we just look for signals again.
*/
SIGDELSET(td->td_siglist, sig); /* clear old signal */
- sig = p->p_xstat;
- if (sig == 0)
+ if (td->td_pflags & TDP_SA)
+ SIGADDSET(td->td_sigmask, sig);
+ if (newsig == 0)
continue;
-
+ sig = newsig;
/*
* If the traced bit got turned off, go back up
* to the top to rescan signals. This ensures
@@ -2110,6 +2146,8 @@ issignal(td)
* signal is being masked, look for other signals.
*/
SIGADDSET(td->td_siglist, sig);
+ if (td->td_pflags & TDP_SA)
+ SIGDELSET(td->td_sigmask, sig);
if (SIGISMEMBER(td->td_sigmask, sig))
continue;
signotify(td);
@@ -2156,7 +2194,7 @@ issignal(td)
&p->p_mtx.mtx_object, "Catching SIGSTOP");
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
- p->p_xlwpid = td->td_tid;
+ p->p_xthread = td;
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td0) {
if (TD_IS_SLEEPING(td0) &&
@@ -2289,8 +2327,7 @@ postsig(sig)
mtx_lock(&ps->ps_mtx);
}
- if (!(td->td_pflags & TDP_SA && td->td_mailbox) &&
- action == SIG_DFL) {
+ if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) {
/*
* Default action, where the default is to kill
* the process. (Other cases were ignored above.)
@@ -2299,7 +2336,7 @@ postsig(sig)
sigexit(td, sig);
/* NOTREACHED */
} else {
- if (td->td_pflags & TDP_SA && td->td_mailbox) {
+ if (td->td_pflags & TDP_SA) {
if (sig == SIGKILL) {
mtx_unlock(&ps->ps_mtx);
sigexit(td, sig);
@@ -2348,7 +2385,7 @@ postsig(sig)
p->p_code = 0;
p->p_sig = 0;
}
- if (td->td_pflags & TDP_SA && td->td_mailbox)
+ if (td->td_pflags & TDP_SA)
thread_signal_add(curthread, sig);
else
(*p->p_sysent->sv_sendsig)(action, sig,
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 4b7bd47..def5412 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -870,6 +870,8 @@ thread_single(int force_exit)
td2->td_flags |= TDF_ASTPENDING;
if (TD_IS_INHIBITED(td2)) {
if (force_exit == SINGLE_EXIT) {
+ if (td->td_flags & TDF_DBSUSPEND)
+ td->td_flags &= ~TDF_DBSUSPEND;
if (TD_IS_SUSPENDED(td2)) {
thread_unsuspend_one(td2);
}
@@ -969,7 +971,8 @@ thread_suspend_check(int return_instead)
p = td->td_proc;
mtx_assert(&Giant, MA_NOTOWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
- while (P_SHOULDSTOP(p)) {
+ while (P_SHOULDSTOP(p) ||
+ ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
KASSERT(p->p_singlethread != NULL,
("singlethread not set"));
diff --git a/sys/sys/kse.h b/sys/sys/kse.h
index 2d02eb7..a0a2752 100644
--- a/sys/sys/kse.h
+++ b/sys/sys/kse.h
@@ -54,13 +54,15 @@ typedef void kse_func_t(struct kse_mailbox *);
*/
struct kse_thr_mailbox {
ucontext_t tm_context; /* User and machine context */
- unsigned int tm_flags; /* Thread flags */
+ uint32_t tm_flags; /* Thread flags */
struct kse_thr_mailbox *tm_next; /* Next thread in list */
void *tm_udata; /* For use by the UTS */
- uint32_t tm_uticks;
- uint32_t tm_sticks;
+ uint32_t tm_uticks; /* Time in userland */
+ uint32_t tm_sticks; /* Time in kernel */
siginfo_t tm_syncsig;
- int tm_spare[8];
+ uint32_t tm_dflags; /* Debug flags */
+ lwpid_t tm_lwp; /* kernel thread UTS runs on */
+ uint32_t __spare__[6];
};
/*
@@ -70,40 +72,48 @@ struct kse_thr_mailbox {
* a single KSE.
*/
struct kse_mailbox {
- int km_version; /* Mailbox version */
+ uint32_t km_version; /* Mailbox version */
struct kse_thr_mailbox *km_curthread; /* Currently running thread */
struct kse_thr_mailbox *km_completed; /* Threads back from kernel */
sigset_t km_sigscaught; /* Caught signals */
- uint32_t km_flags; /* KSE flags */
+ uint32_t km_flags; /* Mailbox flags */
kse_func_t *km_func; /* UTS function */
- stack_t km_stack; /* UTS context */
+ stack_t km_stack; /* UTS stack */
void *km_udata; /* For use by the UTS */
struct timespec km_timeofday; /* Time of day */
- int km_quantum; /* Upcall quantum in msecs */
- int km_spare[8];
+ uint32_t km_quantum; /* Upcall quantum in msecs */
+ lwpid_t km_lwp; /* kernel thread UTS runs on */
+ uint32_t __spare2__[7];
};
-#define KSE_VER_0 0
-#define KSE_VERSION KSE_VER_0
+#define KSE_VER_0 0
+#define KSE_VERSION KSE_VER_0
/* These flags are kept in km_flags */
-#define KMF_NOUPCALL 0x01
-#define KMF_NOCOMPLETED 0x02
-#define KMF_DONE 0x04
-#define KMF_BOUND 0x08
-#define KMF_WAITSIGEVENT 0x10
+#define KMF_NOUPCALL 0x01
+#define KMF_NOCOMPLETED 0x02
+#define KMF_DONE 0x04
+#define KMF_BOUND 0x08
+#define KMF_WAITSIGEVENT 0x10
/* These flags are kept in tm_flags */
-#define TMF_NOUPCALL 0x01
+#define TMF_NOUPCALL 0x01
+
+/* These flags are kept in tm_dlfags */
+#define TMDF_SSTEP 0x01
+#define TMDF_DONOTRUNUSER 0x02
+
+/* Flags for kse_switchin */
+#define KSE_SWITCHIN_SETTMBX 0x01
/* Flags for kse_switchin */
#define KSE_SWITCHIN_SETTMBX 0x01
/* Commands for kse_thr_interrupt */
-#define KSE_INTR_INTERRUPT 0x01
-#define KSE_INTR_RESTART 0x02
-#define KSE_INTR_SENDSIG 0x03
-#define KSE_INTR_SIGEXIT 0x04
+#define KSE_INTR_INTERRUPT 0x01
+#define KSE_INTR_RESTART 0x02
+#define KSE_INTR_SENDSIG 0x03
+#define KSE_INTR_SIGEXIT 0x04
#ifndef _KERNEL
int kse_create(struct kse_mailbox *, int);
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index bf3c788..0a15daf 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -303,7 +303,7 @@ struct thread {
volatile u_int td_generation; /* (k) Enable detection of preemption */
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
int td_kflags; /* (c) Flags for KSE threading. */
-
+ int td_xsig; /* (c) Signal for ptrace */
#define td_endzero td_base_pri
/* Copied during fork1() or thread_sched_upcall(). */
@@ -354,8 +354,10 @@ struct thread {
#define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next AST. */
#define TDF_NEEDRESCHED 0x010000 /* Thread needs to yield. */
#define TDF_NEEDSIGCHK 0x020000 /* Thread may need signal delivery. */
+#define TDF_XSIG 0x040000 /* Thread is exchanging signal under traced */
#define TDF_UMTXWAKEUP 0x080000 /* Libthr thread must not sleep on a umtx. */
#define TDF_THRWAKEUP 0x100000 /* Libthr thread must not suspend itself. */
+#define TDF_DBSUSPEND 0x200000 /* Thread is suspended by debugger */
/* "Private" flags kept in td_pflags: */
#define TDP_OLDMASK 0x0001 /* Need to restore mask after suspend. */
@@ -586,6 +588,7 @@ struct proc {
void *p_aioinfo; /* (?) ASYNC I/O info. */
struct thread *p_singlethread;/* (c + j) If single threading this is it */
int p_suspcount; /* (c) # threads in suspended mode */
+ struct thread *p_xthread; /* (c) Trap thread */
/* End area that is zeroed on creation. */
#define p_endzero p_magic
@@ -602,7 +605,6 @@ struct proc {
#define p_endcopy p_xstat
u_short p_xstat; /* (c) Exit status; also stop sig. */
- lwpid_t p_xlwpid; /* (c) Thread corresponding p_xstat. */
int p_numthreads; /* (j) Number of threads. */
int p_numksegrps; /* (?) number of ksegrps */
struct mdproc p_md; /* Any machine-dependent fields. */
@@ -937,6 +939,7 @@ void upcall_stash(struct kse_upcall *ke);
void thread_sanity_check(struct thread *td, char *);
void thread_stopped(struct proc *p);
void thread_switchout(struct thread *td);
+void thread_continued(struct proc *p);
void thr_exit1(void);
#endif /* _KERNEL */
diff --git a/sys/sys/signalvar.h b/sys/sys/signalvar.h
index 1fa4b86..83301c8 100644
--- a/sys/sys/signalvar.h
+++ b/sys/sys/signalvar.h
@@ -271,7 +271,7 @@ void siginit(struct proc *p);
void signotify(struct thread *td);
void tdsignal(struct thread *td, int sig, sigtarget_t target);
void trapsignal(struct thread *td, int sig, u_long code);
-void ptracestop(struct thread *td, int sig);
+int ptracestop(struct thread *td, int sig);
/*
* Machine-dependent functions:
OpenPOWER on IntegriCloud