diff options
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 2592 |
1 files changed, 2592 insertions, 0 deletions
diff --git a/kernel/signal.c b/kernel/signal.c new file mode 100644 index 0000000..28859a9 --- /dev/null +++ b/kernel/signal.c @@ -0,0 +1,2592 @@ +/* + * linux/kernel/signal.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson + * + * 2003-06-02 Jim Houston - Concurrent Computer Corp. + * Changes to use preallocated sigqueue structures + * to allow signals to be sent reliably. + */ + +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/tty.h> +#include <linux/binfmts.h> +#include <linux/security.h> +#include <linux/syscalls.h> +#include <linux/ptrace.h> +#include <linux/signal.h> +#include <linux/signalfd.h> +#include <linux/tracehook.h> +#include <linux/capability.h> +#include <linux/freezer.h> +#include <linux/pid_namespace.h> +#include <linux/nsproxy.h> +#include <trace/sched.h> + +#include <asm/param.h> +#include <asm/uaccess.h> +#include <asm/unistd.h> +#include <asm/siginfo.h> +#include "audit.h" /* audit_signal_info() */ + +/* + * SLAB caches for signal bits. + */ + +static struct kmem_cache *sigqueue_cachep; + +static void __user *sig_handler(struct task_struct *t, int sig) +{ + return t->sighand->action[sig - 1].sa.sa_handler; +} + +static int sig_handler_ignored(void __user *handler, int sig) +{ + /* Is it explicitly or implicitly ignored? */ + return handler == SIG_IGN || + (handler == SIG_DFL && sig_kernel_ignore(sig)); +} + +static int sig_ignored(struct task_struct *t, int sig) +{ + void __user *handler; + + /* + * Blocked signals are never ignored, since the + * signal handler may change by the time it is + * unblocked. + */ + if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) + return 0; + + handler = sig_handler(t, sig); + if (!sig_handler_ignored(handler, sig)) + return 0; + + /* + * Tracers may want to know about even ignored signals. + */ + return !tracehook_consider_ignored_signal(t, sig, handler); +} + +/* + * Re-calculate pending state from the set of locally pending + * signals, globally pending signals, and blocked signals. + */ +static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) +{ + unsigned long ready; + long i; + + switch (_NSIG_WORDS) { + default: + for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) + ready |= signal->sig[i] &~ blocked->sig[i]; + break; + + case 4: ready = signal->sig[3] &~ blocked->sig[3]; + ready |= signal->sig[2] &~ blocked->sig[2]; + ready |= signal->sig[1] &~ blocked->sig[1]; + ready |= signal->sig[0] &~ blocked->sig[0]; + break; + + case 2: ready = signal->sig[1] &~ blocked->sig[1]; + ready |= signal->sig[0] &~ blocked->sig[0]; + break; + + case 1: ready = signal->sig[0] &~ blocked->sig[0]; + } + return ready != 0; +} + +#define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) + +static int recalc_sigpending_tsk(struct task_struct *t) +{ + if (t->signal->group_stop_count > 0 || + PENDING(&t->pending, &t->blocked) || + PENDING(&t->signal->shared_pending, &t->blocked)) { + set_tsk_thread_flag(t, TIF_SIGPENDING); + return 1; + } + /* + * We must never clear the flag in another thread, or in current + * when it's possible the current syscall is returning -ERESTART*. + * So we don't clear it here, and only callers who know they should do. + */ + return 0; +} + +/* + * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. + * This is superfluous when called on current, the wakeup is a harmless no-op. + */ +void recalc_sigpending_and_wake(struct task_struct *t) +{ + if (recalc_sigpending_tsk(t)) + signal_wake_up(t, 0); +} + +void recalc_sigpending(void) +{ + if (unlikely(tracehook_force_sigpending())) + set_thread_flag(TIF_SIGPENDING); + else if (!recalc_sigpending_tsk(current) && !freezing(current)) + clear_thread_flag(TIF_SIGPENDING); + +} + +/* Given the mask, find the first available signal that should be serviced. */ + +int next_signal(struct sigpending *pending, sigset_t *mask) +{ + unsigned long i, *s, *m, x; + int sig = 0; + + s = pending->signal.sig; + m = mask->sig; + switch (_NSIG_WORDS) { + default: + for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) + if ((x = *s &~ *m) != 0) { + sig = ffz(~x) + i*_NSIG_BPW + 1; + break; + } + break; + + case 2: if ((x = s[0] &~ m[0]) != 0) + sig = 1; + else if ((x = s[1] &~ m[1]) != 0) + sig = _NSIG_BPW + 1; + else + break; + sig += ffz(~x); + break; + + case 1: if ((x = *s &~ *m) != 0) + sig = ffz(~x) + 1; + break; + } + + return sig; +} + +static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, + int override_rlimit) +{ + struct sigqueue *q = NULL; + struct user_struct *user; + + /* + * In order to avoid problems with "switch_user()", we want to make + * sure that the compiler doesn't re-load "t->user" + */ + user = t->user; + barrier(); + atomic_inc(&user->sigpending); + if (override_rlimit || + atomic_read(&user->sigpending) <= + t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) + q = kmem_cache_alloc(sigqueue_cachep, flags); + if (unlikely(q == NULL)) { + atomic_dec(&user->sigpending); + } else { + INIT_LIST_HEAD(&q->list); + q->flags = 0; + q->user = get_uid(user); + } + return(q); +} + +static void __sigqueue_free(struct sigqueue *q) +{ + if (q->flags & SIGQUEUE_PREALLOC) + return; + atomic_dec(&q->user->sigpending); + free_uid(q->user); + kmem_cache_free(sigqueue_cachep, q); +} + +void flush_sigqueue(struct sigpending *queue) +{ + struct sigqueue *q; + + sigemptyset(&queue->signal); + while (!list_empty(&queue->list)) { + q = list_entry(queue->list.next, struct sigqueue , list); + list_del_init(&q->list); + __sigqueue_free(q); + } +} + +/* + * Flush all pending signals for a task. + */ +void flush_signals(struct task_struct *t) +{ + unsigned long flags; + + spin_lock_irqsave(&t->sighand->siglock, flags); + clear_tsk_thread_flag(t, TIF_SIGPENDING); + flush_sigqueue(&t->pending); + flush_sigqueue(&t->signal->shared_pending); + spin_unlock_irqrestore(&t->sighand->siglock, flags); +} + +static void __flush_itimer_signals(struct sigpending *pending) +{ + sigset_t signal, retain; + struct sigqueue *q, *n; + + signal = pending->signal; + sigemptyset(&retain); + + list_for_each_entry_safe(q, n, &pending->list, list) { + int sig = q->info.si_signo; + + if (likely(q->info.si_code != SI_TIMER)) { + sigaddset(&retain, sig); + } else { + sigdelset(&signal, sig); + list_del_init(&q->list); + __sigqueue_free(q); + } + } + + sigorsets(&pending->signal, &signal, &retain); +} + +void flush_itimer_signals(void) +{ + struct task_struct *tsk = current; + unsigned long flags; + + spin_lock_irqsave(&tsk->sighand->siglock, flags); + __flush_itimer_signals(&tsk->pending); + __flush_itimer_signals(&tsk->signal->shared_pending); + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); +} + +void ignore_signals(struct task_struct *t) +{ + int i; + + for (i = 0; i < _NSIG; ++i) + t->sighand->action[i].sa.sa_handler = SIG_IGN; + + flush_signals(t); +} + +/* + * Flush all handlers for a task. + */ + +void +flush_signal_handlers(struct task_struct *t, int force_default) +{ + int i; + struct k_sigaction *ka = &t->sighand->action[0]; + for (i = _NSIG ; i != 0 ; i--) { + if (force_default || ka->sa.sa_handler != SIG_IGN) + ka->sa.sa_handler = SIG_DFL; + ka->sa.sa_flags = 0; + sigemptyset(&ka->sa.sa_mask); + ka++; + } +} + +int unhandled_signal(struct task_struct *tsk, int sig) +{ + void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; + if (is_global_init(tsk)) + return 1; + if (handler != SIG_IGN && handler != SIG_DFL) + return 0; + return !tracehook_consider_fatal_signal(tsk, sig, handler); +} + + +/* Notify the system that a driver wants to block all signals for this + * process, and wants to be notified if any signals at all were to be + * sent/acted upon. If the notifier routine returns non-zero, then the + * signal will be acted upon after all. If the notifier routine returns 0, + * then then signal will be blocked. Only one block per process is + * allowed. priv is a pointer to private data that the notifier routine + * can use to determine if the signal should be blocked or not. */ + +void +block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) +{ + unsigned long flags; + + spin_lock_irqsave(¤t->sighand->siglock, flags); + current->notifier_mask = mask; + current->notifier_data = priv; + current->notifier = notifier; + spin_unlock_irqrestore(¤t->sighand->siglock, flags); +} + +/* Notify the system that blocking has ended. */ + +void +unblock_all_signals(void) +{ + unsigned long flags; + + spin_lock_irqsave(¤t->sighand->siglock, flags); + current->notifier = NULL; + current->notifier_data = NULL; + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); +} + +static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) +{ + struct sigqueue *q, *first = NULL; + + /* + * Collect the siginfo appropriate to this signal. Check if + * there is another siginfo for the same signal. + */ + list_for_each_entry(q, &list->list, list) { + if (q->info.si_signo == sig) { + if (first) + goto still_pending; + first = q; + } + } + + sigdelset(&list->signal, sig); + + if (first) { +still_pending: + list_del_init(&first->list); + copy_siginfo(info, &first->info); + __sigqueue_free(first); + } else { + /* Ok, it wasn't in the queue. This must be + a fast-pathed signal or we must have been + out of queue space. So zero out the info. + */ + info->si_signo = sig; + info->si_errno = 0; + info->si_code = 0; + info->si_pid = 0; + info->si_uid = 0; + } +} + +static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, + siginfo_t *info) +{ + int sig = next_signal(pending, mask); + + if (sig) { + if (current->notifier) { + if (sigismember(current->notifier_mask, sig)) { + if (!(current->notifier)(current->notifier_data)) { + clear_thread_flag(TIF_SIGPENDING); + return 0; + } + } + } + + collect_signal(sig, pending, info); + } + + return sig; +} + +/* + * Dequeue a signal and return the element to the caller, which is + * expected to free it. + * + * All callers have to hold the siglock. + */ +int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) +{ + int signr; + + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ + signr = __dequeue_signal(&tsk->pending, mask, info); + if (!signr) { + signr = __dequeue_signal(&tsk->signal->shared_pending, + mask, info); + /* + * itimer signal ? + * + * itimers are process shared and we restart periodic + * itimers in the signal delivery path to prevent DoS + * attacks in the high resolution timer case. This is + * compliant with the old way of self restarting + * itimers, as the SIGALRM is a legacy signal and only + * queued once. Changing the restart behaviour to + * restart the timer in the signal dequeue path is + * reducing the timer noise on heavy loaded !highres + * systems too. + */ + if (unlikely(signr == SIGALRM)) { + struct hrtimer *tmr = &tsk->signal->real_timer; + + if (!hrtimer_is_queued(tmr) && + tsk->signal->it_real_incr.tv64 != 0) { + hrtimer_forward(tmr, tmr->base->get_time(), + tsk->signal->it_real_incr); + hrtimer_restart(tmr); + } + } + } + + recalc_sigpending(); + if (!signr) + return 0; + + if (unlikely(sig_kernel_stop(signr))) { + /* + * Set a marker that we have dequeued a stop signal. Our + * caller might release the siglock and then the pending + * stop signal it is about to process is no longer in the + * pending bitmasks, but must still be cleared by a SIGCONT + * (and overruled by a SIGKILL). So those cases clear this + * shared flag after we've set it. Note that this flag may + * remain set after the signal we return is ignored or + * handled. That doesn't matter because its only purpose + * is to alert stop-signal processing code when another + * processor has come along and cleared the flag. + */ + tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; + } + if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { + /* + * Release the siglock to ensure proper locking order + * of timer locks outside of siglocks. Note, we leave + * irqs disabled here, since the posix-timers code is + * about to disable them again anyway. + */ + spin_unlock(&tsk->sighand->siglock); + do_schedule_next_timer(info); + spin_lock(&tsk->sighand->siglock); + } + return signr; +} + +/* + * Tell a process that it has a new active signal.. + * + * NOTE! we rely on the previous spin_lock to + * lock interrupts for us! We can only be called with + * "siglock" held, and the local interrupt must + * have been disabled when that got acquired! + * + * No need to set need_resched since signal event passing + * goes through ->blocked + */ +void signal_wake_up(struct task_struct *t, int resume) +{ + unsigned int mask; + + set_tsk_thread_flag(t, TIF_SIGPENDING); + + /* + * For SIGKILL, we want to wake it up in the stopped/traced/killable + * case. We don't check t->state here because there is a race with it + * executing another processor and just now entering stopped state. + * By using wake_up_state, we ensure the process will wake up and + * handle its death signal. + */ + mask = TASK_INTERRUPTIBLE; + if (resume) + mask |= TASK_WAKEKILL; + if (!wake_up_state(t, mask)) + kick_process(t); +} + +/* + * Remove signals in mask from the pending set and queue. + * Returns 1 if any signals were found. + * + * All callers must be holding the siglock. + * + * This version takes a sigset mask and looks at all signals, + * not just those in the first mask word. + */ +static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) +{ + struct sigqueue *q, *n; + sigset_t m; + + sigandsets(&m, mask, &s->signal); + if (sigisemptyset(&m)) + return 0; + + signandsets(&s->signal, &s->signal, mask); + list_for_each_entry_safe(q, n, &s->list, list) { + if (sigismember(mask, q->info.si_signo)) { + list_del_init(&q->list); + __sigqueue_free(q); + } + } + return 1; +} +/* + * Remove signals in mask from the pending set and queue. + * Returns 1 if any signals were found. + * + * All callers must be holding the siglock. + */ +static int rm_from_queue(unsigned long mask, struct sigpending *s) +{ + struct sigqueue *q, *n; + + if (!sigtestsetmask(&s->signal, mask)) + return 0; + + sigdelsetmask(&s->signal, mask); + list_for_each_entry_safe(q, n, &s->list, list) { + if (q->info.si_signo < SIGRTMIN && + (mask & sigmask(q->info.si_signo))) { + list_del_init(&q->list); + __sigqueue_free(q); + } + } + return 1; +} + +/* + * Bad permissions for sending the signal + */ +static int check_kill_permission(int sig, struct siginfo *info, + struct task_struct *t) +{ + struct pid *sid; + int error; + + if (!valid_signal(sig)) + return -EINVAL; + + if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) + return 0; + + error = audit_signal_info(sig, t); /* Let audit system see the signal */ + if (error) + return error; + + if ((current->euid ^ t->suid) && (current->euid ^ t->uid) && + (current->uid ^ t->suid) && (current->uid ^ t->uid) && + !capable(CAP_KILL)) { + switch (sig) { + case SIGCONT: + sid = task_session(t); + /* + * We don't return the error if sid == NULL. The + * task was unhashed, the caller must notice this. + */ + if (!sid || sid == task_session(current)) + break; + default: + return -EPERM; + } + } + + return security_task_kill(t, info, sig, 0); +} + +/* + * Handle magic process-wide effects of stop/continue signals. Unlike + * the signal actions, these happen immediately at signal-generation + * time regardless of blocking, ignoring, or handling. This does the + * actual continuing for SIGCONT, but not the actual stopping for stop + * signals. The process stop is done as a signal action for SIG_DFL. + * + * Returns true if the signal should be actually delivered, otherwise + * it should be dropped. + */ +static int prepare_signal(int sig, struct task_struct *p) +{ + struct signal_struct *signal = p->signal; + struct task_struct *t; + + if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { + /* + * The process is in the middle of dying, nothing to do. + */ + } else if (sig_kernel_stop(sig)) { + /* + * This is a stop signal. Remove SIGCONT from all queues. + */ + rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); + t = p; + do { + rm_from_queue(sigmask(SIGCONT), &t->pending); + } while_each_thread(p, t); + } else if (sig == SIGCONT) { + unsigned int why; + /* + * Remove all stop signals from all queues, + * and wake all threads. + */ + rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); + t = p; + do { + unsigned int state; + rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); + /* + * If there is a handler for SIGCONT, we must make + * sure that no thread returns to user mode before + * we post the signal, in case it was the only + * thread eligible to run the signal handler--then + * it must not do anything between resuming and + * running the handler. With the TIF_SIGPENDING + * flag set, the thread will pause and acquire the + * siglock that we hold now and until we've queued + * the pending signal. + * + * Wake up the stopped thread _after_ setting + * TIF_SIGPENDING + */ + state = __TASK_STOPPED; + if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { + set_tsk_thread_flag(t, TIF_SIGPENDING); + state |= TASK_INTERRUPTIBLE; + } + wake_up_state(t, state); + } while_each_thread(p, t); + + /* + * Notify the parent with CLD_CONTINUED if we were stopped. + * + * If we were in the middle of a group stop, we pretend it + * was already finished, and then continued. Since SIGCHLD + * doesn't queue we report only CLD_STOPPED, as if the next + * CLD_CONTINUED was dropped. + */ + why = 0; + if (signal->flags & SIGNAL_STOP_STOPPED) + why |= SIGNAL_CLD_CONTINUED; + else if (signal->group_stop_count) + why |= SIGNAL_CLD_STOPPED; + + if (why) { + /* + * The first thread which returns from finish_stop() + * will take ->siglock, notice SIGNAL_CLD_MASK, and + * notify its parent. See get_signal_to_deliver(). + */ + signal->flags = why | SIGNAL_STOP_CONTINUED; + signal->group_stop_count = 0; + signal->group_exit_code = 0; + } else { + /* + * We are not stopped, but there could be a stop + * signal in the middle of being processed after + * being removed from the queue. Clear that too. + */ + signal->flags &= ~SIGNAL_STOP_DEQUEUED; + } + } + + return !sig_ignored(p, sig); +} + +/* + * Test if P wants to take SIG. After we've checked all threads with this, + * it's equivalent to finding no threads not blocking SIG. Any threads not + * blocking SIG were ruled out because they are not running and already + * have pending signals. Such threads will dequeue from the shared queue + * as soon as they're available, so putting the signal on the shared queue + * will be equivalent to sending it to one such thread. + */ +static inline int wants_signal(int sig, struct task_struct *p) +{ + if (sigismember(&p->blocked, sig)) + return 0; + if (p->flags & PF_EXITING) + return 0; + if (sig == SIGKILL) + return 1; + if (task_is_stopped_or_traced(p)) + return 0; + return task_curr(p) || !signal_pending(p); +} + +static void complete_signal(int sig, struct task_struct *p, int group) +{ + struct signal_struct *signal = p->signal; + struct task_struct *t; + + /* + * Now find a thread we can wake up to take the signal off the queue. + * + * If the main thread wants the signal, it gets first crack. + * Probably the least surprising to the average bear. + */ + if (wants_signal(sig, p)) + t = p; + else if (!group || thread_group_empty(p)) + /* + * There is just one thread and it does not need to be woken. + * It will dequeue unblocked signals before it runs again. + */ + return; + else { + /* + * Otherwise try to find a suitable thread. + */ + t = signal->curr_target; + while (!wants_signal(sig, t)) { + t = next_thread(t); + if (t == signal->curr_target) + /* + * No thread needs to be woken. + * Any eligible threads will see + * the signal in the queue soon. + */ + return; + } + signal->curr_target = t; + } + + /* + * Found a killable thread. If the signal will be fatal, + * then start taking the whole group down immediately. + */ + if (sig_fatal(p, sig) && + !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && + !sigismember(&t->real_blocked, sig) && + (sig == SIGKILL || + !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) { + /* + * This signal will be fatal to the whole group. + */ + if (!sig_kernel_coredump(sig)) { + /* + * Start a group exit and wake everybody up. + * This way we don't have other threads + * running and doing things after a slower + * thread has the fatal signal pending. + */ + signal->flags = SIGNAL_GROUP_EXIT; + signal->group_exit_code = sig; + signal->group_stop_count = 0; + t = p; + do { + sigaddset(&t->pending.signal, SIGKILL); + signal_wake_up(t, 1); + } while_each_thread(p, t); + return; + } + } + + /* + * The signal is already in the shared-pending queue. + * Tell the chosen thread to wake up and dequeue it. + */ + signal_wake_up(t, sig == SIGKILL); + return; +} + +static inline int legacy_queue(struct sigpending *signals, int sig) +{ + return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); +} + +static int send_signal(int sig, struct siginfo *info, struct task_struct *t, + int group) +{ + struct sigpending *pending; + struct sigqueue *q; + + trace_sched_signal_send(sig, t); + + assert_spin_locked(&t->sighand->siglock); + if (!prepare_signal(sig, t)) + return 0; + + pending = group ? &t->signal->shared_pending : &t->pending; + /* + * Short-circuit ignored signals and support queuing + * exactly one non-rt signal, so that we can get more + * detailed information about the cause of the signal. + */ + if (legacy_queue(pending, sig)) + return 0; + /* + * fast-pathed signals for kernel-internal things like SIGSTOP + * or SIGKILL. + */ + if (info == SEND_SIG_FORCED) + goto out_set; + + /* Real-time signals must be queued if sent by sigqueue, or + some other real-time mechanism. It is implementation + defined whether kill() does so. We attempt to do so, on + the principle of least surprise, but since kill is not + allowed to fail with EAGAIN when low on memory we just + make sure at least one signal gets delivered and don't + pass on the info struct. */ + + q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && + (is_si_special(info) || + info->si_code >= 0))); + if (q) { + list_add_tail(&q->list, &pending->list); + switch ((unsigned long) info) { + case (unsigned long) SEND_SIG_NOINFO: + q->info.si_signo = sig; + q->info.si_errno = 0; + q->info.si_code = SI_USER; + q->info.si_pid = task_pid_vnr(current); + q->info.si_uid = current->uid; + break; + case (unsigned long) SEND_SIG_PRIV: + q->info.si_signo = sig; + q->info.si_errno = 0; + q->info.si_code = SI_KERNEL; + q->info.si_pid = 0; + q->info.si_uid = 0; + break; + default: + copy_siginfo(&q->info, info); + break; + } + } else if (!is_si_special(info)) { + if (sig >= SIGRTMIN && info->si_code != SI_USER) + /* + * Queue overflow, abort. We may abort if the signal was rt + * and sent by user using something other than kill(). + */ + return -EAGAIN; + } + +out_set: + signalfd_notify(t, sig); + sigaddset(&pending->signal, sig); + complete_signal(sig, t, group); + return 0; +} + +int print_fatal_signals; + +static void print_fatal_signal(struct pt_regs *regs, int signr) +{ + printk("%s/%d: potentially unexpected fatal signal %d.\n", + current->comm, task_pid_nr(current), signr); + +#if defined(__i386__) && !defined(__arch_um__) + printk("code at %08lx: ", regs->ip); + { + int i; + for (i = 0; i < 16; i++) { + unsigned char insn; + + __get_user(insn, (unsigned char *)(regs->ip + i)); + printk("%02x ", insn); + } + } +#endif + printk("\n"); + show_regs(regs); +} + +static int __init setup_print_fatal_signals(char *str) +{ + get_option (&str, &print_fatal_signals); + + return 1; +} + +__setup("print-fatal-signals=", setup_print_fatal_signals); + +int +__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +{ + return send_signal(sig, info, p, 1); +} + +static int +specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) +{ + return send_signal(sig, info, t, 0); +} + +/* + * Force a signal that the process can't ignore: if necessary + * we unblock the signal and change any SIG_IGN to SIG_DFL. + * + * Note: If we unblock the signal, we always reset it to SIG_DFL, + * since we do not want to have a signal handler that was blocked + * be invoked when user space had explicitly blocked it. + * + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. + */ +int +force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +{ + unsigned long int flags; + int ret, blocked, ignored; + struct k_sigaction *action; + + spin_lock_irqsave(&t->sighand->siglock, flags); + action = &t->sighand->action[sig-1]; + ignored = action->sa.sa_handler == SIG_IGN; + blocked = sigismember(&t->blocked, sig); + if (blocked || ignored) { + action->sa.sa_handler = SIG_DFL; + if (blocked) { + sigdelset(&t->blocked, sig); + recalc_sigpending_and_wake(t); + } + } + if (action->sa.sa_handler == SIG_DFL) + t->signal->flags &= ~SIGNAL_UNKILLABLE; + ret = specific_send_sig_info(sig, info, t); + spin_unlock_irqrestore(&t->sighand->siglock, flags); + + return ret; +} + +void +force_sig_specific(int sig, struct task_struct *t) +{ + force_sig_info(sig, SEND_SIG_FORCED, t); +} + +/* + * Nuke all other threads in the group. + */ +void zap_other_threads(struct task_struct *p) +{ + struct task_struct *t; + + p->signal->group_stop_count = 0; + + for (t = next_thread(p); t != p; t = next_thread(t)) { + /* + * Don't bother with already dead threads + */ + if (t->exit_state) + continue; + + /* SIGKILL will be handled before any pending SIGSTOP */ + sigaddset(&t->pending.signal, SIGKILL); + signal_wake_up(t, 1); + } +} + +int __fatal_signal_pending(struct task_struct *tsk) +{ + return sigismember(&tsk->pending.signal, SIGKILL); +} +EXPORT_SYMBOL(__fatal_signal_pending); + +struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) +{ + struct sighand_struct *sighand; + + rcu_read_lock(); + for (;;) { + sighand = rcu_dereference(tsk->sighand); + if (unlikely(sighand == NULL)) + break; + + spin_lock_irqsave(&sighand->siglock, *flags); + if (likely(sighand == tsk->sighand)) + break; + spin_unlock_irqrestore(&sighand->siglock, *flags); + } + rcu_read_unlock(); + + return sighand; +} + +int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +{ + unsigned long flags; + int ret; + + ret = check_kill_permission(sig, info, p); + + if (!ret && sig) { + ret = -ESRCH; + if (lock_task_sighand(p, &flags)) { + ret = __group_send_sig_info(sig, info, p); + unlock_task_sighand(p, &flags); + } + } + + return ret; +} + +/* + * __kill_pgrp_info() sends a signal to a process group: this is what the tty + * control characters do (^C, ^Z etc) + */ + +int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) +{ + struct task_struct *p = NULL; + int retval, success; + + success = 0; + retval = -ESRCH; + do_each_pid_task(pgrp, PIDTYPE_PGID, p) { + int err = group_send_sig_info(sig, info, p); + success |= !err; + retval = err; + } while_each_pid_task(pgrp, PIDTYPE_PGID, p); + return success ? 0 : retval; +} + +int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) +{ + int error = -ESRCH; + struct task_struct *p; + + rcu_read_lock(); +retry: + p = pid_task(pid, PIDTYPE_PID); + if (p) { + error = group_send_sig_info(sig, info, p); + if (unlikely(error == -ESRCH)) + /* + * The task was unhashed in between, try again. + * If it is dead, pid_task() will return NULL, + * if we race with de_thread() it will find the + * new leader. + */ + goto retry; + } + rcu_read_unlock(); + + return error; +} + +int +kill_proc_info(int sig, struct siginfo *info, pid_t pid) +{ + int error; + rcu_read_lock(); + error = kill_pid_info(sig, info, find_vpid(pid)); + rcu_read_unlock(); + return error; +} + +/* like kill_pid_info(), but doesn't use uid/euid of "current" */ +int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, + uid_t uid, uid_t euid, u32 secid) +{ + int ret = -EINVAL; + struct task_struct *p; + + if (!valid_signal(sig)) + return ret; + + read_lock(&tasklist_lock); + p = pid_task(pid, PIDTYPE_PID); + if (!p) { + ret = -ESRCH; + goto out_unlock; + } + if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) + && (euid != p->suid) && (euid != p->uid) + && (uid != p->suid) && (uid != p->uid)) { + ret = -EPERM; + goto out_unlock; + } + ret = security_task_kill(p, info, sig, secid); + if (ret) + goto out_unlock; + if (sig && p->sighand) { + unsigned long flags; + spin_lock_irqsave(&p->sighand->siglock, flags); + ret = __group_send_sig_info(sig, info, p); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + } +out_unlock: + read_unlock(&tasklist_lock); + return ret; +} +EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); + +/* + * kill_something_info() interprets pid in interesting ways just like kill(2). + * + * POSIX specifies that kill(-1,sig) is unspecified, but what we have + * is probably wrong. Should make it like BSD or SYSV. + */ + +static int kill_something_info(int sig, struct siginfo *info, pid_t pid) +{ + int ret; + + if (pid > 0) { + rcu_read_lock(); + ret = kill_pid_info(sig, info, find_vpid(pid)); + rcu_read_unlock(); + return ret; + } + + read_lock(&tasklist_lock); + if (pid != -1) { + ret = __kill_pgrp_info(sig, info, + pid ? find_vpid(-pid) : task_pgrp(current)); + } else { + int retval = 0, count = 0; + struct task_struct * p; + + for_each_process(p) { + if (task_pid_vnr(p) > 1 && + !same_thread_group(p, current)) { + int err = group_send_sig_info(sig, info, p); + ++count; + if (err != -EPERM) + retval = err; + } + } + ret = count ? retval : -ESRCH; + } + read_unlock(&tasklist_lock); + + return ret; +} + +/* + * These are for backward compatibility with the rest of the kernel source. + */ + +/* + * The caller must ensure the task can't exit. + */ +int +send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +{ + int ret; + unsigned long flags; + + /* + * Make sure legacy kernel users don't send in bad values + * (normal paths check this in check_kill_permission). + */ + if (!valid_signal(sig)) + return -EINVAL; + + spin_lock_irqsave(&p->sighand->siglock, flags); + ret = specific_send_sig_info(sig, info, p); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + return ret; +} + +#define __si_special(priv) \ + ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) + +int +send_sig(int sig, struct task_struct *p, int priv) +{ + return send_sig_info(sig, __si_special(priv), p); +} + +void +force_sig(int sig, struct task_struct *p) +{ + force_sig_info(sig, SEND_SIG_PRIV, p); +} + +/* + * When things go south during signal handling, we + * will force a SIGSEGV. And if the signal that caused + * the problem was already a SIGSEGV, we'll want to + * make sure we don't even try to deliver the signal.. + */ +int +force_sigsegv(int sig, struct task_struct *p) +{ + if (sig == SIGSEGV) { + unsigned long flags; + spin_lock_irqsave(&p->sighand->siglock, flags); + p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; + spin_unlock_irqrestore(&p->sighand->siglock, flags); + } + force_sig(SIGSEGV, p); + return 0; +} + +int kill_pgrp(struct pid *pid, int sig, int priv) +{ + int ret; + + read_lock(&tasklist_lock); + ret = __kill_pgrp_info(sig, __si_special(priv), pid); + read_unlock(&tasklist_lock); + + return ret; +} +EXPORT_SYMBOL(kill_pgrp); + +int kill_pid(struct pid *pid, int sig, int priv) +{ + return kill_pid_info(sig, __si_special(priv), pid); +} +EXPORT_SYMBOL(kill_pid); + +/* + * These functions support sending signals using preallocated sigqueue + * structures. This is needed "because realtime applications cannot + * afford to lose notifications of asynchronous events, like timer + * expirations or I/O completions". In the case of Posix Timers + * we allocate the sigqueue structure from the timer_create. If this + * allocation fails we are able to report the failure to the application + * with an EAGAIN error. + */ + +struct sigqueue *sigqueue_alloc(void) +{ + struct sigqueue *q; + + if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) + q->flags |= SIGQUEUE_PREALLOC; + return(q); +} + +void sigqueue_free(struct sigqueue *q) +{ + unsigned long flags; + spinlock_t *lock = ¤t->sighand->siglock; + + BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); + /* + * We must hold ->siglock while testing q->list + * to serialize with collect_signal() or with + * __exit_signal()->flush_sigqueue(). + */ + spin_lock_irqsave(lock, flags); + q->flags &= ~SIGQUEUE_PREALLOC; + /* + * If it is queued it will be freed when dequeued, + * like the "regular" sigqueue. + */ + if (!list_empty(&q->list)) + q = NULL; + spin_unlock_irqrestore(lock, flags); + + if (q) + __sigqueue_free(q); +} + +int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) +{ + int sig = q->info.si_signo; + struct sigpending *pending; + unsigned long flags; + int ret; + + BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); + + ret = -1; + if (!likely(lock_task_sighand(t, &flags))) + goto ret; + + ret = 1; /* the signal is ignored */ + if (!prepare_signal(sig, t)) + goto out; + + ret = 0; + if (unlikely(!list_empty(&q->list))) { + /* + * If an SI_TIMER entry is already queue just increment + * the overrun count. + */ + BUG_ON(q->info.si_code != SI_TIMER); + q->info.si_overrun++; + goto out; + } + q->info.si_overrun = 0; + + signalfd_notify(t, sig); + pending = group ? &t->signal->shared_pending : &t->pending; + list_add_tail(&q->list, &pending->list); + sigaddset(&pending->signal, sig); + complete_signal(sig, t, group); +out: + unlock_task_sighand(t, &flags); +ret: + return ret; +} + +/* + * Wake up any threads in the parent blocked in wait* syscalls. + */ +static inline void __wake_up_parent(struct task_struct *p, + struct task_struct *parent) +{ + wake_up_interruptible_sync(&parent->signal->wait_chldexit); +} + +/* + * Let a parent know about the death of a child. + * For a stopped/continued status change, use do_notify_parent_cldstop instead. + * + * Returns -1 if our parent ignored us and so we've switched to + * self-reaping, or else @sig. + */ +int do_notify_parent(struct task_struct *tsk, int sig) +{ + struct siginfo info; + unsigned long flags; + struct sighand_struct *psig; + struct task_cputime cputime; + int ret = sig; + + BUG_ON(sig == -1); + + /* do_notify_parent_cldstop should have been called instead. */ + BUG_ON(task_is_stopped_or_traced(tsk)); + + BUG_ON(!tsk->ptrace && + (tsk->group_leader != tsk || !thread_group_empty(tsk))); + + info.si_signo = sig; + info.si_errno = 0; + /* + * we are under tasklist_lock here so our parent is tied to + * us and cannot exit and release its namespace. + * + * the only it can is to switch its nsproxy with sys_unshare, + * bu uncharing pid namespaces is not allowed, so we'll always + * see relevant namespace + * + * write_lock() currently calls preempt_disable() which is the + * same as rcu_read_lock(), but according to Oleg, this is not + * correct to rely on this + */ + rcu_read_lock(); + info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + rcu_read_unlock(); + + info.si_uid = tsk->uid; + + thread_group_cputime(tsk, &cputime); + info.si_utime = cputime_to_jiffies(cputime.utime); + info.si_stime = cputime_to_jiffies(cputime.stime); + + info.si_status = tsk->exit_code & 0x7f; + if (tsk->exit_code & 0x80) + info.si_code = CLD_DUMPED; + else if (tsk->exit_code & 0x7f) + info.si_code = CLD_KILLED; + else { + info.si_code = CLD_EXITED; + info.si_status = tsk->exit_code >> 8; + } + + psig = tsk->parent->sighand; + spin_lock_irqsave(&psig->siglock, flags); + if (!tsk->ptrace && sig == SIGCHLD && + (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || + (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { + /* + * We are exiting and our parent doesn't care. POSIX.1 + * defines special semantics for setting SIGCHLD to SIG_IGN + * or setting the SA_NOCLDWAIT flag: we should be reaped + * automatically and not left for our parent's wait4 call. + * Rather than having the parent do it as a magic kind of + * signal handler, we just set this to tell do_exit that we + * can be cleaned up without becoming a zombie. Note that + * we still call __wake_up_parent in this case, because a + * blocked sys_wait4 might now return -ECHILD. + * + * Whether we send SIGCHLD or not for SA_NOCLDWAIT + * is implementation-defined: we do (if you don't want + * it, just use SIG_IGN instead). + */ + ret = tsk->exit_signal = -1; + if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) + sig = -1; + } + if (valid_signal(sig) && sig > 0) + __group_send_sig_info(sig, &info, tsk->parent); + __wake_up_parent(tsk, tsk->parent); + spin_unlock_irqrestore(&psig->siglock, flags); + + return ret; +} + +static void do_notify_parent_cldstop(struct task_struct *tsk, int why) +{ + struct siginfo info; + unsigned long flags; + struct task_struct *parent; + struct sighand_struct *sighand; + + if (tsk->ptrace & PT_PTRACED) + parent = tsk->parent; + else { + tsk = tsk->group_leader; + parent = tsk->real_parent; + } + + info.si_signo = SIGCHLD; + info.si_errno = 0; + /* + * see comment in do_notify_parent() abot the following 3 lines + */ + rcu_read_lock(); + info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + rcu_read_unlock(); + + info.si_uid = tsk->uid; + + info.si_utime = cputime_to_clock_t(tsk->utime); + info.si_stime = cputime_to_clock_t(tsk->stime); + + info.si_code = why; + switch (why) { + case CLD_CONTINUED: + info.si_status = SIGCONT; + break; + case CLD_STOPPED: + info.si_status = tsk->signal->group_exit_code & 0x7f; + break; + case CLD_TRAPPED: + info.si_status = tsk->exit_code & 0x7f; + break; + default: + BUG(); + } + + sighand = parent->sighand; + spin_lock_irqsave(&sighand->siglock, flags); + if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && + !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) + __group_send_sig_info(SIGCHLD, &info, parent); + /* + * Even if SIGCHLD is not generated, we must wake up wait4 calls. + */ + __wake_up_parent(tsk, parent); + spin_unlock_irqrestore(&sighand->siglock, flags); +} + +static inline int may_ptrace_stop(void) +{ + if (!likely(current->ptrace & PT_PTRACED)) + return 0; + /* + * Are we in the middle of do_coredump? + * If so and our tracer is also part of the coredump stopping + * is a deadlock situation, and pointless because our tracer + * is dead so don't allow us to stop. + * If SIGKILL was already sent before the caller unlocked + * ->siglock we must see ->core_state != NULL. Otherwise it + * is safe to enter schedule(). + */ + if (unlikely(current->mm->core_state) && + unlikely(current->mm == current->parent->mm)) + return 0; + + return 1; +} + +/* + * Return nonzero if there is a SIGKILL that should be waking us up. + * Called with the siglock held. + */ +static int sigkill_pending(struct task_struct *tsk) +{ + return sigismember(&tsk->pending.signal, SIGKILL) || + sigismember(&tsk->signal->shared_pending.signal, SIGKILL); +} + +/* + * This must be called with current->sighand->siglock held. + * + * This should be the path for all ptrace stops. + * We always set current->last_siginfo while stopped here. + * That makes it a way to test a stopped process for + * being ptrace-stopped vs being job-control-stopped. + * + * If we actually decide not to stop at all because the tracer + * is gone, we keep current->exit_code unless clear_code. + */ +static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) +{ + if (arch_ptrace_stop_needed(exit_code, info)) { + /* + * The arch code has something special to do before a + * ptrace stop. This is allowed to block, e.g. for faults + * on user stack pages. We can't keep the siglock while + * calling arch_ptrace_stop, so we must release it now. + * To preserve proper semantics, we must do this before + * any signal bookkeeping like checking group_stop_count. + * Meanwhile, a SIGKILL could come in before we retake the + * siglock. That must prevent us from sleeping in TASK_TRACED. + * So after regaining the lock, we must check for SIGKILL. + */ + spin_unlock_irq(¤t->sighand->siglock); + arch_ptrace_stop(exit_code, info); + spin_lock_irq(¤t->sighand->siglock); + if (sigkill_pending(current)) + return; + } + + /* + * If there is a group stop in progress, + * we must participate in the bookkeeping. + */ + if (current->signal->group_stop_count > 0) + --current->signal->group_stop_count; + + current->last_siginfo = info; + current->exit_code = exit_code; + + /* Let the debugger run. */ + __set_current_state(TASK_TRACED); + spin_unlock_irq(¤t->sighand->siglock); + read_lock(&tasklist_lock); + if (may_ptrace_stop()) { + do_notify_parent_cldstop(current, CLD_TRAPPED); + read_unlock(&tasklist_lock); + schedule(); + } else { + /* + * By the time we got the lock, our tracer went away. + * Don't drop the lock yet, another tracer may come. + */ + __set_current_state(TASK_RUNNING); + if (clear_code) + current->exit_code = 0; + read_unlock(&tasklist_lock); + } + + /* + * While in TASK_TRACED, we were considered "frozen enough". + * Now that we woke up, it's crucial if we're supposed to be + * frozen that we freeze now before running anything substantial. + */ + try_to_freeze(); + + /* + * We are back. Now reacquire the siglock before touching + * last_siginfo, so that we are sure to have synchronized with + * any signal-sending on another CPU that wants to examine it. + */ + spin_lock_irq(¤t->sighand->siglock); + current->last_siginfo = NULL; + + /* + * Queued signals ignored us while we were stopped for tracing. + * So check for any that we should take before resuming user mode. + * This sets TIF_SIGPENDING, but never clears it. + */ + recalc_sigpending_tsk(current); +} + +void ptrace_notify(int exit_code) +{ + siginfo_t info; + + BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); + + memset(&info, 0, sizeof info); + info.si_signo = SIGTRAP; + info.si_code = exit_code; + info.si_pid = task_pid_vnr(current); + info.si_uid = current->uid; + + /* Let the debugger run. */ + spin_lock_irq(¤t->sighand->siglock); + ptrace_stop(exit_code, 1, &info); + spin_unlock_irq(¤t->sighand->siglock); +} + +static void +finish_stop(int stop_count) +{ + /* + * If there are no other threads in the group, or if there is + * a group stop in progress and we are the last to stop, + * report to the parent. When ptraced, every thread reports itself. + */ + if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current, CLD_STOPPED); + read_unlock(&tasklist_lock); + } + + do { + schedule(); + } while (try_to_freeze()); + /* + * Now we don't run again until continued. + */ + current->exit_code = 0; +} + +/* + * This performs the stopping for SIGSTOP and other stop signals. + * We have to stop all threads in the thread group. + * Returns nonzero if we've actually stopped and released the siglock. + * Returns zero if we didn't stop and still hold the siglock. + */ +static int do_signal_stop(int signr) +{ + struct signal_struct *sig = current->signal; + int stop_count; + + if (sig->group_stop_count > 0) { + /* + * There is a group stop in progress. We don't need to + * start another one. + */ + stop_count = --sig->group_stop_count; + } else { + struct task_struct *t; + + if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || + unlikely(signal_group_exit(sig))) + return 0; + /* + * There is no group stop already in progress. + * We must initiate one now. + */ + sig->group_exit_code = signr; + + stop_count = 0; + for (t = next_thread(current); t != current; t = next_thread(t)) + /* + * Setting state to TASK_STOPPED for a group + * stop is always done with the siglock held, + * so this check has no races. + */ + if (!(t->flags & PF_EXITING) && + !task_is_stopped_or_traced(t)) { + stop_count++; + signal_wake_up(t, 0); + } + sig->group_stop_count = stop_count; + } + + if (stop_count == 0) + sig->flags = SIGNAL_STOP_STOPPED; + current->exit_code = sig->group_exit_code; + __set_current_state(TASK_STOPPED); + + spin_unlock_irq(¤t->sighand->siglock); + finish_stop(stop_count); + return 1; +} + +static int ptrace_signal(int signr, siginfo_t *info, + struct pt_regs *regs, void *cookie) +{ + if (!(current->ptrace & PT_PTRACED)) + return signr; + + ptrace_signal_deliver(regs, cookie); + + /* Let the debugger run. */ + ptrace_stop(signr, 0, info); + + /* We're back. Did the debugger cancel the sig? */ + signr = current->exit_code; + if (signr == 0) + return signr; + + current->exit_code = 0; + + /* Update the siginfo structure if the signal has + changed. If the debugger wanted something + specific in the siginfo structure then it should + have updated *info via PTRACE_SETSIGINFO. */ + if (signr != info->si_signo) { + info->si_signo = signr; + info->si_errno = 0; + info->si_code = SI_USER; + info->si_pid = task_pid_vnr(current->parent); + info->si_uid = current->parent->uid; + } + + /* If the (new) signal is now blocked, requeue it. */ + if (sigismember(¤t->blocked, signr)) { + specific_send_sig_info(signr, info, current); + signr = 0; + } + + return signr; +} + +int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, + struct pt_regs *regs, void *cookie) +{ + struct sighand_struct *sighand = current->sighand; + struct signal_struct *signal = current->signal; + int signr; + +relock: + /* + * We'll jump back here after any time we were stopped in TASK_STOPPED. + * While in TASK_STOPPED, we were considered "frozen enough". + * Now that we woke up, it's crucial if we're supposed to be + * frozen that we freeze now before running anything substantial. + */ + try_to_freeze(); + + spin_lock_irq(&sighand->siglock); + /* + * Every stopped thread goes here after wakeup. Check to see if + * we should notify the parent, prepare_signal(SIGCONT) encodes + * the CLD_ si_code into SIGNAL_CLD_MASK bits. + */ + if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { + int why = (signal->flags & SIGNAL_STOP_CONTINUED) + ? CLD_CONTINUED : CLD_STOPPED; + signal->flags &= ~SIGNAL_CLD_MASK; + spin_unlock_irq(&sighand->siglock); + + if (unlikely(!tracehook_notify_jctl(1, why))) + goto relock; + + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current->group_leader, why); + read_unlock(&tasklist_lock); + goto relock; + } + + for (;;) { + struct k_sigaction *ka; + + if (unlikely(signal->group_stop_count > 0) && + do_signal_stop(0)) + goto relock; + + /* + * Tracing can induce an artifical signal and choose sigaction. + * The return value in @signr determines the default action, + * but @info->si_signo is the signal number we will report. + */ + signr = tracehook_get_signal(current, regs, info, return_ka); + if (unlikely(signr < 0)) + goto relock; + if (unlikely(signr != 0)) + ka = return_ka; + else { + signr = dequeue_signal(current, ¤t->blocked, + info); + + if (!signr) + break; /* will return 0 */ + + if (signr != SIGKILL) { + signr = ptrace_signal(signr, info, + regs, cookie); + if (!signr) + continue; + } + + ka = &sighand->action[signr-1]; + } + + if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ + continue; + if (ka->sa.sa_handler != SIG_DFL) { + /* Run the handler. */ + *return_ka = *ka; + + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + + break; /* will return non-zero "signr" value */ + } + + /* + * Now we are doing the default action for this signal. + */ + if (sig_kernel_ignore(signr)) /* Default is nothing. */ + continue; + + /* + * Global init gets no signals it doesn't want. + */ + if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && + !signal_group_exit(signal)) + continue; + + if (sig_kernel_stop(signr)) { + /* + * The default action is to stop all threads in + * the thread group. The job control signals + * do nothing in an orphaned pgrp, but SIGSTOP + * always works. Note that siglock needs to be + * dropped during the call to is_orphaned_pgrp() + * because of lock ordering with tasklist_lock. + * This allows an intervening SIGCONT to be posted. + * We need to check for that and bail out if necessary. + */ + if (signr != SIGSTOP) { + spin_unlock_irq(&sighand->siglock); + + /* signals can be posted during this window */ + + if (is_current_pgrp_orphaned()) + goto relock; + + spin_lock_irq(&sighand->siglock); + } + + if (likely(do_signal_stop(info->si_signo))) { + /* It released the siglock. */ + goto relock; + } + + /* + * We didn't actually stop, due to a race + * with SIGCONT or something like that. + */ + continue; + } + + spin_unlock_irq(&sighand->siglock); + + /* + * Anything else is fatal, maybe with a core dump. + */ + current->flags |= PF_SIGNALED; + + if (sig_kernel_coredump(signr)) { + if (print_fatal_signals) + print_fatal_signal(regs, info->si_signo); + /* + * If it was able to dump core, this kills all + * other threads in the group and synchronizes with + * their demise. If we lost the race with another + * thread getting here, it set group_exit_code + * first and our do_group_exit call below will use + * that value and ignore the one we pass it. + */ + do_coredump(info->si_signo, info->si_signo, regs); + } + + /* + * Death signals, no core dump. + */ + do_group_exit(info->si_signo); + /* NOTREACHED */ + } + spin_unlock_irq(&sighand->siglock); + return signr; +} + +void exit_signals(struct task_struct *tsk) +{ + int group_stop = 0; + struct task_struct *t; + + if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { + tsk->flags |= PF_EXITING; + return; + } + + spin_lock_irq(&tsk->sighand->siglock); + /* + * From now this task is not visible for group-wide signals, + * see wants_signal(), do_signal_stop(). + */ + tsk->flags |= PF_EXITING; + if (!signal_pending(tsk)) + goto out; + + /* It could be that __group_complete_signal() choose us to + * notify about group-wide signal. Another thread should be + * woken now to take the signal since we will not. + */ + for (t = tsk; (t = next_thread(t)) != tsk; ) + if (!signal_pending(t) && !(t->flags & PF_EXITING)) + recalc_sigpending_and_wake(t); + + if (unlikely(tsk->signal->group_stop_count) && + !--tsk->signal->group_stop_count) { + tsk->signal->flags = SIGNAL_STOP_STOPPED; + group_stop = 1; + } +out: + spin_unlock_irq(&tsk->sighand->siglock); + + if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(tsk, CLD_STOPPED); + read_unlock(&tasklist_lock); + } +} + +EXPORT_SYMBOL(recalc_sigpending); +EXPORT_SYMBOL_GPL(dequeue_signal); +EXPORT_SYMBOL(flush_signals); +EXPORT_SYMBOL(force_sig); +EXPORT_SYMBOL(send_sig); +EXPORT_SYMBOL(send_sig_info); +EXPORT_SYMBOL(sigprocmask); +EXPORT_SYMBOL(block_all_signals); +EXPORT_SYMBOL(unblock_all_signals); + + +/* + * System call entry points. + */ + +SYSCALL_DEFINE0(restart_syscall) +{ + struct restart_block *restart = ¤t_thread_info()->restart_block; + return restart->fn(restart); +} + +long do_no_restart_syscall(struct restart_block *param) +{ + return -EINTR; +} + +/* + * We don't need to get the kernel lock - this is all local to this + * particular thread.. (and that's good, because this is _heavily_ + * used by various programs) + */ + +/* + * This is also useful for kernel threads that want to temporarily + * (or permanently) block certain signals. + * + * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel + * interface happily blocks "unblockable" signals like SIGKILL + * and friends. + */ +int sigprocmask(int how, sigset_t *set, sigset_t *oldset) +{ + int error; + + spin_lock_irq(¤t->sighand->siglock); + if (oldset) + *oldset = current->blocked; + + error = 0; + switch (how) { + case SIG_BLOCK: + sigorsets(¤t->blocked, ¤t->blocked, set); + break; + case SIG_UNBLOCK: + signandsets(¤t->blocked, ¤t->blocked, set); + break; + case SIG_SETMASK: + current->blocked = *set; + break; + default: + error = -EINVAL; + } + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + return error; +} + +SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, + sigset_t __user *, oset, size_t, sigsetsize) +{ + int error = -EINVAL; + sigset_t old_set, new_set; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + goto out; + + if (set) { + error = -EFAULT; + if (copy_from_user(&new_set, set, sizeof(*set))) + goto out; + sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); + + error = sigprocmask(how, &new_set, &old_set); + if (error) + goto out; + if (oset) + goto set_old; + } else if (oset) { + spin_lock_irq(¤t->sighand->siglock); + old_set = current->blocked; + spin_unlock_irq(¤t->sighand->siglock); + + set_old: + error = -EFAULT; + if (copy_to_user(oset, &old_set, sizeof(*oset))) + goto out; + } + error = 0; +out: + return error; +} + +long do_sigpending(void __user *set, unsigned long sigsetsize) +{ + long error = -EINVAL; + sigset_t pending; + + if (sigsetsize > sizeof(sigset_t)) + goto out; + + spin_lock_irq(¤t->sighand->siglock); + sigorsets(&pending, ¤t->pending.signal, + ¤t->signal->shared_pending.signal); + spin_unlock_irq(¤t->sighand->siglock); + + /* Outside the lock because only this thread touches it. */ + sigandsets(&pending, ¤t->blocked, &pending); + + error = -EFAULT; + if (!copy_to_user(set, &pending, sigsetsize)) + error = 0; + +out: + return error; +} + +SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) +{ + return do_sigpending(set, sigsetsize); +} + +#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER + +int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) +{ + int err; + + if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) + return -EFAULT; + if (from->si_code < 0) + return __copy_to_user(to, from, sizeof(siginfo_t)) + ? -EFAULT : 0; + /* + * If you change siginfo_t structure, please be sure + * this code is fixed accordingly. + * Please remember to update the signalfd_copyinfo() function + * inside fs/signalfd.c too, in case siginfo_t changes. + * It should never copy any pad contained in the structure + * to avoid security leaks, but must copy the generic + * 3 ints plus the relevant union member. + */ + err = __put_user(from->si_signo, &to->si_signo); + err |= __put_user(from->si_errno, &to->si_errno); + err |= __put_user((short)from->si_code, &to->si_code); + switch (from->si_code & __SI_MASK) { + case __SI_KILL: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + break; + case __SI_TIMER: + err |= __put_user(from->si_tid, &to->si_tid); + err |= __put_user(from->si_overrun, &to->si_overrun); + err |= __put_user(from->si_ptr, &to->si_ptr); + break; + case __SI_POLL: + err |= __put_user(from->si_band, &to->si_band); + err |= __put_user(from->si_fd, &to->si_fd); + break; + case __SI_FAULT: + err |= __put_user(from->si_addr, &to->si_addr); +#ifdef __ARCH_SI_TRAPNO + err |= __put_user(from->si_trapno, &to->si_trapno); +#endif + break; + case __SI_CHLD: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + err |= __put_user(from->si_status, &to->si_status); + err |= __put_user(from->si_utime, &to->si_utime); + err |= __put_user(from->si_stime, &to->si_stime); + break; + case __SI_RT: /* This is not generated by the kernel as of now. */ + case __SI_MESGQ: /* But this is */ + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + err |= __put_user(from->si_ptr, &to->si_ptr); + break; + default: /* this is just in case for now ... */ + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + break; + } + return err; +} + +#endif + +SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, + siginfo_t __user *, uinfo, const struct timespec __user *, uts, + size_t, sigsetsize) +{ + int ret, sig; + sigset_t these; + struct timespec ts; + siginfo_t info; + long timeout = 0; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user(&these, uthese, sizeof(these))) + return -EFAULT; + + /* + * Invert the set of allowed signals to get those we + * want to block. + */ + sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); + signotset(&these); + + if (uts) { + if (copy_from_user(&ts, uts, sizeof(ts))) + return -EFAULT; + if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 + || ts.tv_sec < 0) + return -EINVAL; + } + + spin_lock_irq(¤t->sighand->siglock); + sig = dequeue_signal(current, &these, &info); + if (!sig) { + timeout = MAX_SCHEDULE_TIMEOUT; + if (uts) + timeout = (timespec_to_jiffies(&ts) + + (ts.tv_sec || ts.tv_nsec)); + + if (timeout) { + /* None ready -- temporarily unblock those we're + * interested while we are sleeping in so that we'll + * be awakened when they arrive. */ + current->real_blocked = current->blocked; + sigandsets(¤t->blocked, ¤t->blocked, &these); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + timeout = schedule_timeout_interruptible(timeout); + + spin_lock_irq(¤t->sighand->siglock); + sig = dequeue_signal(current, &these, &info); + current->blocked = current->real_blocked; + siginitset(¤t->real_blocked, 0); + recalc_sigpending(); + } + } + spin_unlock_irq(¤t->sighand->siglock); + + if (sig) { + ret = sig; + if (uinfo) { + if (copy_siginfo_to_user(uinfo, &info)) + ret = -EFAULT; + } + } else { + ret = -EAGAIN; + if (timeout) + ret = -EINTR; + } + + return ret; +} + +SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) +{ + struct siginfo info; + + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_USER; + info.si_pid = task_tgid_vnr(current); + info.si_uid = current->uid; + + return kill_something_info(sig, &info, pid); +} + +static int do_tkill(pid_t tgid, pid_t pid, int sig) +{ + int error; + struct siginfo info; + struct task_struct *p; + unsigned long flags; + + error = -ESRCH; + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_TKILL; + info.si_pid = task_tgid_vnr(current); + info.si_uid = current->uid; + + rcu_read_lock(); + p = find_task_by_vpid(pid); + if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { + error = check_kill_permission(sig, &info, p); + /* + * The null signal is a permissions and process existence + * probe. No signal is actually delivered. + * + * If lock_task_sighand() fails we pretend the task dies + * after receiving the signal. The window is tiny, and the + * signal is private anyway. + */ + if (!error && sig && lock_task_sighand(p, &flags)) { + error = specific_send_sig_info(sig, &info, p); + unlock_task_sighand(p, &flags); + } + } + rcu_read_unlock(); + + return error; +} + +/** + * sys_tgkill - send signal to one specific thread + * @tgid: the thread group ID of the thread + * @pid: the PID of the thread + * @sig: signal to be sent + * + * This syscall also checks the @tgid and returns -ESRCH even if the PID + * exists but it's not belonging to the target process anymore. This + * method solves the problem of threads exiting and PIDs getting reused. + */ +SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) +{ + /* This is only valid for single tasks */ + if (pid <= 0 || tgid <= 0) + return -EINVAL; + + return do_tkill(tgid, pid, sig); +} + +/* + * Send a signal to only one task, even if it's a CLONE_THREAD task. + */ +SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) +{ + /* This is only valid for single tasks */ + if (pid <= 0) + return -EINVAL; + + return do_tkill(0, pid, sig); +} + +SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, + siginfo_t __user *, uinfo) +{ + siginfo_t info; + + if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) + return -EFAULT; + + /* Not even root can pretend to send signals from the kernel. + Nor can they impersonate a kill(), which adds source info. */ + if (info.si_code >= 0) + return -EPERM; + info.si_signo = sig; + + /* POSIX.1b doesn't mention process groups. */ + return kill_proc_info(sig, &info, pid); +} + +int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) +{ + struct task_struct *t = current; + struct k_sigaction *k; + sigset_t mask; + + if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) + return -EINVAL; + + k = &t->sighand->action[sig-1]; + + spin_lock_irq(¤t->sighand->siglock); + if (oact) + *oact = *k; + + if (act) { + sigdelsetmask(&act->sa.sa_mask, + sigmask(SIGKILL) | sigmask(SIGSTOP)); + *k = *act; + /* + * POSIX 3.3.1.3: + * "Setting a signal action to SIG_IGN for a signal that is + * pending shall cause the pending signal to be discarded, + * whether or not it is blocked." + * + * "Setting a signal action to SIG_DFL for a signal that is + * pending and whose default action is to ignore the signal + * (for example, SIGCHLD), shall cause the pending signal to + * be discarded, whether or not it is blocked" + */ + if (sig_handler_ignored(sig_handler(t, sig), sig)) { + sigemptyset(&mask); + sigaddset(&mask, sig); + rm_from_queue_full(&mask, &t->signal->shared_pending); + do { + rm_from_queue_full(&mask, &t->pending); + t = next_thread(t); + } while (t != current); + } + } + + spin_unlock_irq(¤t->sighand->siglock); + return 0; +} + +int +do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) +{ + stack_t oss; + int error; + + if (uoss) { + oss.ss_sp = (void __user *) current->sas_ss_sp; + oss.ss_size = current->sas_ss_size; + oss.ss_flags = sas_ss_flags(sp); + } + + if (uss) { + void __user *ss_sp; + size_t ss_size; + int ss_flags; + + error = -EFAULT; + if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) + || __get_user(ss_sp, &uss->ss_sp) + || __get_user(ss_flags, &uss->ss_flags) + || __get_user(ss_size, &uss->ss_size)) + goto out; + + error = -EPERM; + if (on_sig_stack(sp)) + goto out; + + error = -EINVAL; + /* + * + * Note - this code used to test ss_flags incorrectly + * old code may have been written using ss_flags==0 + * to mean ss_flags==SS_ONSTACK (as this was the only + * way that worked) - this fix preserves that older + * mechanism + */ + if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) + goto out; + + if (ss_flags == SS_DISABLE) { + ss_size = 0; + ss_sp = NULL; + } else { + error = -ENOMEM; + if (ss_size < MINSIGSTKSZ) + goto out; + } + + current->sas_ss_sp = (unsigned long) ss_sp; + current->sas_ss_size = ss_size; + } + + if (uoss) { + error = -EFAULT; + if (copy_to_user(uoss, &oss, sizeof(oss))) + goto out; + } + + error = 0; +out: + return error; +} + +#ifdef __ARCH_WANT_SYS_SIGPENDING + +SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) +{ + return do_sigpending(set, sizeof(*set)); +} + +#endif + +#ifdef __ARCH_WANT_SYS_SIGPROCMASK +/* Some platforms have their own version with special arguments others + support only sys_rt_sigprocmask. */ + +SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, + old_sigset_t __user *, oset) +{ + int error; + old_sigset_t old_set, new_set; + + if (set) { + error = -EFAULT; + if (copy_from_user(&new_set, set, sizeof(*set))) + goto out; + new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); + + spin_lock_irq(¤t->sighand->siglock); + old_set = current->blocked.sig[0]; + + error = 0; + switch (how) { + default: + error = -EINVAL; + break; + case SIG_BLOCK: + sigaddsetmask(¤t->blocked, new_set); + break; + case SIG_UNBLOCK: + sigdelsetmask(¤t->blocked, new_set); + break; + case SIG_SETMASK: + current->blocked.sig[0] = new_set; + break; + } + + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + if (error) + goto out; + if (oset) + goto set_old; + } else if (oset) { + old_set = current->blocked.sig[0]; + set_old: + error = -EFAULT; + if (copy_to_user(oset, &old_set, sizeof(*oset))) + goto out; + } + error = 0; +out: + return error; +} +#endif /* __ARCH_WANT_SYS_SIGPROCMASK */ + +#ifdef __ARCH_WANT_SYS_RT_SIGACTION +SYSCALL_DEFINE4(rt_sigaction, int, sig, + const struct sigaction __user *, act, + struct sigaction __user *, oact, + size_t, sigsetsize) +{ + struct k_sigaction new_sa, old_sa; + int ret = -EINVAL; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + goto out; + + if (act) { + if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) + return -EFAULT; + } + + ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); + + if (!ret && oact) { + if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) + return -EFAULT; + } +out: + return ret; +} +#endif /* __ARCH_WANT_SYS_RT_SIGACTION */ + +#ifdef __ARCH_WANT_SYS_SGETMASK + +/* + * For backwards compatibility. Functionality superseded by sigprocmask. + */ +SYSCALL_DEFINE0(sgetmask) +{ + /* SMP safe */ + return current->blocked.sig[0]; +} + +SYSCALL_DEFINE1(ssetmask, int, newmask) +{ + int old; + + spin_lock_irq(¤t->sighand->siglock); + old = current->blocked.sig[0]; + + siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| + sigmask(SIGSTOP))); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + return old; +} +#endif /* __ARCH_WANT_SGETMASK */ + +#ifdef __ARCH_WANT_SYS_SIGNAL +/* + * For backwards compatibility. Functionality superseded by sigaction. + */ +SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) +{ + struct k_sigaction new_sa, old_sa; + int ret; + + new_sa.sa.sa_handler = handler; + new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; + sigemptyset(&new_sa.sa.sa_mask); + + ret = do_sigaction(sig, &new_sa, &old_sa); + + return ret ? ret : (unsigned long)old_sa.sa.sa_handler; +} +#endif /* __ARCH_WANT_SYS_SIGNAL */ + +#ifdef __ARCH_WANT_SYS_PAUSE + +SYSCALL_DEFINE0(pause) +{ + current->state = TASK_INTERRUPTIBLE; + schedule(); + return -ERESTARTNOHAND; +} + +#endif + +#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND +SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) +{ + sigset_t newset; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user(&newset, unewset, sizeof(newset))) + return -EFAULT; + sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); + + spin_lock_irq(¤t->sighand->siglock); + current->saved_sigmask = current->blocked; + current->blocked = newset; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_restore_sigmask(); + return -ERESTARTNOHAND; +} +#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ + +__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) +{ + return NULL; +} + +void __init signals_init(void) +{ + sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); +} |