summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorbadger <badger@FreeBSD.org>2017-03-25 13:33:23 +0000
committerbadger <badger@FreeBSD.org>2017-03-25 13:33:23 +0000
commite581b9ce028a5e799dfa8661362d8fb0b15d969a (patch)
tree015233187db6183e069a76ecd85aafa0ad99ed4e /sys/kern
parentf41cd7e2a015bf78169ed297bd77746f72d04302 (diff)
downloadFreeBSD-src-e581b9ce028a5e799dfa8661362d8fb0b15d969a.zip
FreeBSD-src-e581b9ce028a5e799dfa8661362d8fb0b15d969a.tar.gz
MFC r313992, r314075, r314118, r315484:
r315484: ptrace_test: eliminate assumption about thread scheduling A couple of the ptrace tests make assumptions about which thread in a multithreaded process will run after a halt. This makes the tests less portable across branches, and susceptible to future breakage. Instead, twiddle thread scheduling and priorities to match the tests' expectation. r314118: Actually fix buildworlds other than i386/amd64/sparc64 after r313992 Disable offending test for platforms without a userspace visible breakpoint(). r314075: Fix world build for archs where __builtin_debugtrap() does not work. The offending code was introduced in r313992. r313992: Defer ptracestop() signals that cannot be delivered immediately When a thread is stopped in ptracestop(), the ptrace(2) user may request a signal be delivered upon resumption of the thread. Heretofore, those signals were discarded unless ptracestop()'s caller was issignal(). Fix this by modifying ptracestop() to queue up signals requested by the ptrace user that will be delivered when possible. Take special care when the signal is SIGKILL (usually generated from a PT_KILL request); no new stop events should be triggered after a PT_KILL. Add a number of tests for the new functionality. Several tests were authored by jhb. PR: 212607 Sponsored by: Dell EMC
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_fork.c4
-rw-r--r--sys/kern/kern_sig.c224
-rw-r--r--sys/kern/kern_thr.c2
-rw-r--r--sys/kern/subr_syscall.c6
-rw-r--r--sys/kern/sys_process.c10
5 files changed, 148 insertions, 98 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index ff1c0a0..bb92861 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -1083,7 +1083,7 @@ fork_return(struct thread *td, struct trapframe *frame)
proc_reparent(p, dbg);
sx_xunlock(&proctree_lock);
td->td_dbgflags |= TDB_CHILD | TDB_SCX | TDB_FSTP;
- ptracestop(td, SIGSTOP);
+ ptracestop(td, SIGSTOP, NULL);
td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX);
} else {
/*
@@ -1104,7 +1104,7 @@ fork_return(struct thread *td, struct trapframe *frame)
_STOPEVENT(p, S_SCX, td->td_dbg_sc_code);
if ((p->p_ptevents & PTRACE_SCX) != 0 ||
(td->td_dbgflags & TDB_BORN) != 0)
- ptracestop(td, SIGTRAP);
+ ptracestop(td, SIGTRAP, NULL);
td->td_dbgflags &= ~(TDB_SCX | TDB_BORN);
PROC_UNLOCK(p);
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 852fb4b..6ae6de0 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -278,6 +278,7 @@ sigqueue_init(sigqueue_t *list, struct proc *p)
{
SIGEMPTYSET(list->sq_signals);
SIGEMPTYSET(list->sq_kill);
+ SIGEMPTYSET(list->sq_ptrace);
TAILQ_INIT(&list->sq_list);
list->sq_proc = p;
list->sq_flags = SQ_INIT;
@@ -301,9 +302,15 @@ sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
if (!SIGISMEMBER(sq->sq_signals, signo))
return (0);
+ if (SIGISMEMBER(sq->sq_ptrace, signo)) {
+ count++;
+ SIGDELSET(sq->sq_ptrace, signo);
+ si->ksi_flags |= KSI_PTRACE;
+ }
if (SIGISMEMBER(sq->sq_kill, signo)) {
count++;
- SIGDELSET(sq->sq_kill, signo);
+ if (count == 1)
+ SIGDELSET(sq->sq_kill, signo);
}
TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
@@ -347,7 +354,8 @@ sigqueue_take(ksiginfo_t *ksi)
if (kp->ksi_signo == ksi->ksi_signo)
break;
}
- if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo))
+ if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
+ !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
SIGDELSET(sq->sq_signals, ksi->ksi_signo);
}
@@ -360,6 +368,10 @@ sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
+ /*
+ * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
+ * for these signals.
+ */
if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
SIGADDSET(sq->sq_kill, signo);
goto out_set_bit;
@@ -398,16 +410,19 @@ sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
ksi->ksi_sigq = sq;
}
- if ((si->ksi_flags & KSI_TRAP) != 0 ||
- (si->ksi_flags & KSI_SIGQ) == 0) {
- if (ret != 0)
+ if (ret != 0) {
+ if ((si->ksi_flags & KSI_PTRACE) != 0) {
+ SIGADDSET(sq->sq_ptrace, signo);
+ ret = 0;
+ goto out_set_bit;
+ } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
+ (si->ksi_flags & KSI_SIGQ) == 0) {
SIGADDSET(sq->sq_kill, signo);
- ret = 0;
- goto out_set_bit;
- }
-
- if (ret != 0)
+ ret = 0;
+ goto out_set_bit;
+ }
return (ret);
+ }
out_set_bit:
SIGADDSET(sq->sq_signals, signo);
@@ -434,6 +449,7 @@ sigqueue_flush(sigqueue_t *sq)
SIGEMPTYSET(sq->sq_signals);
SIGEMPTYSET(sq->sq_kill);
+ SIGEMPTYSET(sq->sq_ptrace);
}
static void
@@ -466,6 +482,11 @@ sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
SIGSETOR(dst->sq_kill, tmp);
SIGSETNAND(src->sq_kill, tmp);
+ tmp = src->sq_ptrace;
+ SIGSETAND(tmp, *set);
+ SIGSETOR(dst->sq_ptrace, tmp);
+ SIGSETNAND(src->sq_ptrace, tmp);
+
tmp = src->sq_signals;
SIGSETAND(tmp, *set);
SIGSETOR(dst->sq_signals, tmp);
@@ -502,6 +523,7 @@ sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
}
}
SIGSETNAND(sq->sq_kill, *set);
+ SIGSETNAND(sq->sq_ptrace, *set);
SIGSETNAND(sq->sq_signals, *set);
}
@@ -2501,69 +2523,116 @@ sig_suspend_threads(struct thread *td, struct proc *p, int sending)
return (wakeup_swapper);
}
+/*
+ * Stop the process for an event deemed interesting to the debugger. If si is
+ * non-NULL, this is a signal exchange; the new signal requested by the
+ * debugger will be returned for handling. If si is NULL, this is some other
+ * type of interesting event. The debugger may request a signal be delivered in
+ * that case as well, however it will be deferred until it can be handled.
+ */
int
-ptracestop(struct thread *td, int sig)
+ptracestop(struct thread *td, int sig, ksiginfo_t *si)
{
struct proc *p = td->td_proc;
+ struct thread *td2;
+ ksiginfo_t ksi;
+ int prop;
PROC_LOCK_ASSERT(p, MA_OWNED);
KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
&p->p_mtx.lock_object, "Stopping for traced signal");
- td->td_dbgflags |= TDB_XSIG;
td->td_xsig = sig;
- CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
- td->td_tid, p->p_pid, td->td_dbgflags, sig);
- PROC_SLOCK(p);
- while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
- if (p->p_flag & P_SINGLE_EXIT &&
- !(td->td_dbgflags & TDB_EXIT)) {
+
+ if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
+ td->td_dbgflags |= TDB_XSIG;
+ CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
+ td->td_tid, p->p_pid, td->td_dbgflags, sig);
+ PROC_SLOCK(p);
+ while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
+ if (P_KILLED(p)) {
+ /*
+ * Ensure that, if we've been PT_KILLed, the
+ * exit status reflects that. Another thread
+ * may also be in ptracestop(), having just
+ * received the SIGKILL, but this thread was
+ * unsuspended first.
+ */
+ td->td_dbgflags &= ~TDB_XSIG;
+ td->td_xsig = SIGKILL;
+ p->p_ptevents = 0;
+ break;
+ }
+ if (p->p_flag & P_SINGLE_EXIT &&
+ !(td->td_dbgflags & TDB_EXIT)) {
+ /*
+ * Ignore ptrace stops except for thread exit
+ * events when the process exits.
+ */
+ td->td_dbgflags &= ~TDB_XSIG;
+ PROC_SUNLOCK(p);
+ return (0);
+ }
+
/*
- * Ignore ptrace stops except for thread exit
- * events when the process exits.
+ * Make wait(2) work. Ensure that right after the
+ * attach, the thread which was decided to become the
+ * leader of attach gets reported to the waiter.
+ * Otherwise, just avoid overwriting another thread's
+ * assignment to p_xthread. If another thread has
+ * already set p_xthread, the current thread will get
+ * a chance to report itself upon the next iteration.
*/
- td->td_dbgflags &= ~TDB_XSIG;
- PROC_SUNLOCK(p);
- return (sig);
+ if ((td->td_dbgflags & TDB_FSTP) != 0 ||
+ ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
+ p->p_xthread == NULL)) {
+ p->p_xsig = sig;
+ p->p_xthread = td;
+ td->td_dbgflags &= ~TDB_FSTP;
+ p->p_flag2 &= ~P2_PTRACE_FSTP;
+ p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
+ sig_suspend_threads(td, p, 0);
+ }
+ if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
+ td->td_dbgflags &= ~TDB_STOPATFORK;
+ cv_broadcast(&p->p_dbgwait);
+ }
+stopme:
+ thread_suspend_switch(td, p);
+ if (p->p_xthread == td)
+ p->p_xthread = NULL;
+ if (!(p->p_flag & P_TRACED))
+ break;
+ if (td->td_dbgflags & TDB_SUSPEND) {
+ if (p->p_flag & P_SINGLE_EXIT)
+ break;
+ goto stopme;
+ }
}
+ PROC_SUNLOCK(p);
+ }
+ if (si != NULL && sig == td->td_xsig) {
+ /* Parent wants us to take the original signal unchanged. */
+ si->ksi_flags |= KSI_HEAD;
+ if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
+ si->ksi_signo = 0;
+ } else if (td->td_xsig != 0) {
/*
- * Make wait(2) work. Ensure that right after the
- * attach, the thread which was decided to become the
- * leader of attach gets reported to the waiter.
- * Otherwise, just avoid overwriting another thread's
- * assignment to p_xthread. If another thread has
- * already set p_xthread, the current thread will get
- * a chance to report itself upon the next iteration.
+ * If parent wants us to take a new signal, then it will leave
+ * it in td->td_xsig; otherwise we just look for signals again.
*/
- if ((td->td_dbgflags & TDB_FSTP) != 0 ||
- ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
- p->p_xthread == NULL)) {
- p->p_xsig = sig;
- p->p_xthread = td;
- td->td_dbgflags &= ~TDB_FSTP;
- p->p_flag2 &= ~P2_PTRACE_FSTP;
- p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
- sig_suspend_threads(td, p, 0);
- }
- if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
- td->td_dbgflags &= ~TDB_STOPATFORK;
- cv_broadcast(&p->p_dbgwait);
- }
-stopme:
- thread_suspend_switch(td, p);
- if (p->p_xthread == td)
- p->p_xthread = NULL;
- if (!(p->p_flag & P_TRACED))
- break;
- if (td->td_dbgflags & TDB_SUSPEND) {
- if (p->p_flag & P_SINGLE_EXIT)
- break;
- goto stopme;
- }
+ ksiginfo_init(&ksi);
+ ksi.ksi_signo = td->td_xsig;
+ ksi.ksi_flags |= KSI_PTRACE;
+ prop = sigprop(td->td_xsig);
+ td2 = sigtd(p, td->td_xsig, prop);
+ tdsendsignal(p, td2, td->td_xsig, &ksi);
+ if (td != td2)
+ return (0);
}
- PROC_SUNLOCK(p);
+
return (td->td_xsig);
}
@@ -2721,7 +2790,7 @@ issignal(struct thread *td)
struct sigacts *ps;
struct sigqueue *queue;
sigset_t sigpending;
- int sig, prop, newsig;
+ int sig, prop;
p = td->td_proc;
ps = p->p_sigacts;
@@ -2784,47 +2853,18 @@ issignal(struct thread *td)
}
mtx_unlock(&ps->ps_mtx);
- newsig = ptracestop(td, sig);
+ sig = ptracestop(td, sig, &td->td_dbgksi);
mtx_lock(&ps->ps_mtx);
- if (sig != newsig) {
-
- /*
- * If parent wants us to take the signal,
- * then it will leave it in p->p_xsig;
- * otherwise we just look for signals again.
- */
- if (newsig == 0)
- continue;
- sig = newsig;
-
- /*
- * Put the new signal into td_sigqueue. If the
- * signal is being masked, look for other
- * signals.
- */
- sigqueue_add(queue, sig, NULL);
- if (SIGISMEMBER(td->td_sigmask, sig))
- continue;
- signotify(td);
- } else {
- if (td->td_dbgksi.ksi_signo != 0) {
- td->td_dbgksi.ksi_flags |= KSI_HEAD;
- if (sigqueue_add(&td->td_sigqueue, sig,
- &td->td_dbgksi) != 0)
- td->td_dbgksi.ksi_signo = 0;
- }
- if (td->td_dbgksi.ksi_signo == 0)
- sigqueue_add(&td->td_sigqueue, sig,
- NULL);
- }
-
- /*
+ /*
+ * Keep looking if the debugger discarded the signal
+ * or replaced it with a masked signal.
+ *
* If the traced bit got turned off, go back up
* to the top to rescan signals. This ensures
* that p_sig* and p_sigact are consistent.
*/
- if ((p->p_flag & P_TRACED) == 0)
+ if (sig == 0 || (p->p_flag & P_TRACED) == 0)
continue;
}
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 9458b70..c39986d 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -357,7 +357,7 @@ kern_thr_exit(struct thread *td)
p->p_pendingexits++;
td->td_dbgflags |= TDB_EXIT;
if (p->p_ptevents & PTRACE_LWP)
- ptracestop(td, SIGTRAP);
+ ptracestop(td, SIGTRAP, NULL);
PROC_UNLOCK(p);
tidhash_remove(td);
PROC_LOCK(p);
diff --git a/sys/kern/subr_syscall.c b/sys/kern/subr_syscall.c
index 822976e..2bcad34 100644
--- a/sys/kern/subr_syscall.c
+++ b/sys/kern/subr_syscall.c
@@ -88,7 +88,7 @@ syscallenter(struct thread *td, struct syscall_args *sa)
td->td_dbg_sc_code = sa->code;
td->td_dbg_sc_narg = sa->narg;
if (p->p_ptevents & PTRACE_SCE)
- ptracestop((td), SIGTRAP);
+ ptracestop((td), SIGTRAP, NULL);
PROC_UNLOCK(p);
}
if (td->td_dbgflags & TDB_USERWR) {
@@ -222,7 +222,7 @@ syscallret(struct thread *td, int error, struct syscall_args *sa)
if (traced &&
((td->td_dbgflags & (TDB_FORK | TDB_EXEC)) != 0 ||
(p->p_ptevents & PTRACE_SCX) != 0))
- ptracestop(td, SIGTRAP);
+ ptracestop(td, SIGTRAP, NULL);
td->td_dbgflags &= ~(TDB_SCX | TDB_EXEC | TDB_FORK);
PROC_UNLOCK(p);
}
@@ -259,7 +259,7 @@ again:
if (td->td_dbgflags & TDB_VFORK) {
PROC_LOCK(p);
if (p->p_ptevents & PTRACE_VFORK)
- ptracestop(td, SIGTRAP);
+ ptracestop(td, SIGTRAP, NULL);
td->td_dbgflags &= ~TDB_VFORK;
PROC_UNLOCK(p);
}
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 69a3e4b..ded874a 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -1125,6 +1125,16 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
td2->td_dbgflags &= ~TDB_XSIG;
td2->td_xsig = data;
+ /*
+ * P_WKILLED is insurance that a PT_KILL/SIGKILL always
+ * works immediately, even if another thread is
+ * unsuspended first and attempts to handle a different
+ * signal or if the POSIX.1b style signal queue cannot
+ * accommodate any new signals.
+ */
+ if (data == SIGKILL)
+ p->p_flag |= P_WKILLED;
+
if (req == PT_DETACH) {
FOREACH_THREAD_IN_PROC(p, td3)
td3->td_dbgflags &= ~TDB_SUSPEND;
OpenPOWER on IntegriCloud