summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/ptrace.c49
-rw-r--r--kernel/signal.c79
3 files changed, 112 insertions, 18 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b2a17df..456d80e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1775,8 +1775,10 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
/*
* task->group_stop flags
*/
+#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */
#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */
#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */
+#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */
extern void task_clear_group_stop_pending(struct task_struct *task);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 6acf895..745fc2d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -49,14 +49,22 @@ static void ptrace_untrace(struct task_struct *child)
spin_lock(&child->sighand->siglock);
if (task_is_traced(child)) {
/*
- * If the group stop is completed or in progress,
- * this thread was already counted as stopped.
+ * If group stop is completed or in progress, it should
+ * participate in the group stop. Set GROUP_STOP_PENDING
+ * before kicking it.
+ *
+ * This involves TRACED -> RUNNING -> STOPPED transition
+ * which is similar to but in the opposite direction of
+ * what happens while attaching to a stopped task.
+ * However, in this direction, the intermediate RUNNING
+ * state is not hidden even from the current ptracer and if
+ * it immediately re-attaches and performs a WNOHANG
+ * wait(2), it may fail.
*/
if (child->signal->flags & SIGNAL_STOP_STOPPED ||
child->signal->group_stop_count)
- __set_task_state(child, TASK_STOPPED);
- else
- signal_wake_up(child, 1);
+ child->group_stop |= GROUP_STOP_PENDING;
+ signal_wake_up(child, 1);
}
spin_unlock(&child->sighand->siglock);
}
@@ -165,6 +173,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
static int ptrace_attach(struct task_struct *task)
{
+ bool wait_trap = false;
int retval;
audit_ptrace(task);
@@ -204,12 +213,42 @@ static int ptrace_attach(struct task_struct *task)
__ptrace_link(task, current);
send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
+ spin_lock(&task->sighand->siglock);
+
+ /*
+ * If the task is already STOPPED, set GROUP_STOP_PENDING and
+ * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
+ * will be cleared if the child completes the transition or any
+ * event which clears the group stop states happens. We'll wait
+ * for the transition to complete before returning from this
+ * function.
+ *
+ * This hides STOPPED -> RUNNING -> TRACED transition from the
+ * attaching thread but a different thread in the same group can
+ * still observe the transient RUNNING state. IOW, if another
+ * thread's WNOHANG wait(2) on the stopped tracee races against
+ * ATTACH, the wait(2) may fail due to the transient RUNNING.
+ *
+ * The following task_is_stopped() test is safe as both transitions
+ * in and out of STOPPED are protected by siglock.
+ */
+ if (task_is_stopped(task)) {
+ task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
+ signal_wake_up(task, 1);
+ wait_trap = true;
+ }
+
+ spin_unlock(&task->sighand->siglock);
+
retval = 0;
unlock_tasklist:
write_unlock_irq(&tasklist_lock);
unlock_creds:
mutex_unlock(&task->signal->cred_guard_mutex);
out:
+ if (wait_trap)
+ wait_event(current->signal->wait_chldexit,
+ !(task->group_stop & GROUP_STOP_TRAPPING));
return retval;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 418776c..1e19991 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -224,6 +224,26 @@ static inline void print_dropped_signal(int sig)
}
/**
+ * task_clear_group_stop_trapping - clear group stop trapping bit
+ * @task: target task
+ *
+ * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
+ * and wake up the ptracer. Note that we don't need any further locking.
+ * @task->siglock guarantees that @task->parent points to the ptracer.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+static void task_clear_group_stop_trapping(struct task_struct *task)
+{
+ if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
+ task->group_stop &= ~GROUP_STOP_TRAPPING;
+ __wake_up_sync(&task->parent->signal->wait_chldexit,
+ TASK_UNINTERRUPTIBLE, 1);
+ }
+}
+
+/**
* task_clear_group_stop_pending - clear pending group stop
* @task: target task
*
@@ -1706,8 +1726,20 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
current->last_siginfo = info;
current->exit_code = exit_code;
- /* Let the debugger run. */
- __set_current_state(TASK_TRACED);
+ /*
+ * TRACED should be visible before TRAPPING is cleared; otherwise,
+ * the tracer might fail do_wait().
+ */
+ set_current_state(TASK_TRACED);
+
+ /*
+ * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
+ * transition to TASK_TRACED should be atomic with respect to
+ * siglock. This hsould be done after the arch hook as siglock is
+ * released and regrabbed across it.
+ */
+ task_clear_group_stop_trapping(current);
+
spin_unlock_irq(&current->sighand->siglock);
read_lock(&tasklist_lock);
if (may_ptrace_stop()) {
@@ -1788,6 +1820,9 @@ static int do_signal_stop(int signr)
unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
struct task_struct *t;
+ /* signr will be recorded in task->group_stop for retries */
+ WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
+
if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
unlikely(signal_group_exit(sig)))
return 0;
@@ -1797,25 +1832,27 @@ static int do_signal_stop(int signr)
*/
sig->group_exit_code = signr;
- current->group_stop = gstop;
+ current->group_stop &= ~GROUP_STOP_SIGMASK;
+ current->group_stop |= signr | gstop;
sig->group_stop_count = 1;
- for (t = next_thread(current); t != current; t = next_thread(t))
+ for (t = next_thread(current); t != current;
+ t = next_thread(t)) {
+ t->group_stop &= ~GROUP_STOP_SIGMASK;
/*
* Setting state to TASK_STOPPED for a group
* stop is always done with the siglock held,
* so this check has no races.
*/
if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
- t->group_stop = gstop;
+ t->group_stop |= signr | gstop;
sig->group_stop_count++;
signal_wake_up(t, 0);
- } else
+ } else {
task_clear_group_stop_pending(t);
+ }
+ }
}
-
- current->exit_code = sig->group_exit_code;
- __set_current_state(TASK_STOPPED);
-
+retry:
if (likely(!task_ptrace(current))) {
int notify = 0;
@@ -1827,6 +1864,7 @@ static int do_signal_stop(int signr)
if (task_participate_group_stop(current))
notify = CLD_STOPPED;
+ __set_current_state(TASK_STOPPED);
spin_unlock_irq(&current->sighand->siglock);
if (notify) {
@@ -1839,13 +1877,28 @@ static int do_signal_stop(int signr)
schedule();
spin_lock_irq(&current->sighand->siglock);
- } else
- ptrace_stop(current->exit_code, CLD_STOPPED, 0, NULL);
+ } else {
+ ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
+ CLD_STOPPED, 0, NULL);
+ current->exit_code = 0;
+ }
+
+ /*
+ * GROUP_STOP_PENDING could be set if another group stop has
+ * started since being woken up or ptrace wants us to transit
+ * between TASK_STOPPED and TRACED. Retry group stop.
+ */
+ if (current->group_stop & GROUP_STOP_PENDING) {
+ WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
+ goto retry;
+ }
+
+ /* PTRACE_ATTACH might have raced with task killing, clear trapping */
+ task_clear_group_stop_trapping(current);
spin_unlock_irq(&current->sighand->siglock);
tracehook_finish_jctl();
- current->exit_code = 0;
return 1;
}
OpenPOWER on IntegriCloud