summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c8
-rw-r--r--kernel/exit.c22
-rw-r--r--kernel/fork.c24
-rw-r--r--kernel/pid.c11
-rw-r--r--kernel/signal.c22
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--kernel/sys.c14
-rw-r--r--kernel/sys_ni.c5
8 files changed, 86 insertions, 24 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index cebb4c2..3bae374 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -475,8 +475,8 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
return min_length;
}
-static int get_compat_itimerspec(struct itimerspec *dst,
- struct compat_itimerspec __user *src)
+int get_compat_itimerspec(struct itimerspec *dst,
+ const struct compat_itimerspec __user *src)
{
if (get_compat_timespec(&dst->it_interval, &src->it_interval) ||
get_compat_timespec(&dst->it_value, &src->it_value))
@@ -484,8 +484,8 @@ static int get_compat_itimerspec(struct itimerspec *dst,
return 0;
}
-static int put_compat_itimerspec(struct compat_itimerspec __user *dst,
- struct itimerspec *src)
+int put_compat_itimerspec(struct compat_itimerspec __user *dst,
+ const struct itimerspec *src)
{
if (put_compat_timespec(&src->it_interval, &dst->it_interval) ||
put_compat_timespec(&src->it_value, &dst->it_value))
diff --git a/kernel/exit.c b/kernel/exit.c
index b0c6f0c..c6d14b8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -24,6 +24,7 @@
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/profile.h>
+#include <linux/signalfd.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
@@ -42,6 +43,7 @@
#include <linux/audit.h> /* for audit_free() */
#include <linux/resource.h>
#include <linux/blkdev.h>
+#include <linux/task_io_accounting_ops.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -82,6 +84,14 @@ static void __exit_signal(struct task_struct *tsk)
sighand = rcu_dereference(tsk->sighand);
spin_lock(&sighand->siglock);
+ /*
+ * Notify that this sighand has been detached. This must
+ * be called with the tsk->sighand lock held. Also, this
+ * access tsk->sighand internally, so it must be called
+ * before tsk->sighand is reset.
+ */
+ signalfd_detach_locked(tsk);
+
posix_cpu_timers_exit(tsk);
if (atomic_dec_and_test(&sig->count))
posix_cpu_timers_exit_group(tsk);
@@ -113,6 +123,8 @@ static void __exit_signal(struct task_struct *tsk)
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->sched_time += tsk->sched_time;
+ sig->inblock += task_io_get_inblock(tsk);
+ sig->oublock += task_io_get_oublock(tsk);
sig = NULL; /* Marker for below. */
}
@@ -299,12 +311,12 @@ void __set_special_pids(pid_t session, pid_t pgrp)
if (process_session(curr) != session) {
detach_pid(curr, PIDTYPE_SID);
set_signal_session(curr->signal, session);
- attach_pid(curr, PIDTYPE_SID, session);
+ attach_pid(curr, PIDTYPE_SID, find_pid(session));
}
if (process_group(curr) != pgrp) {
detach_pid(curr, PIDTYPE_PGID);
curr->signal->pgrp = pgrp;
- attach_pid(curr, PIDTYPE_PGID, pgrp);
+ attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp));
}
}
@@ -1193,6 +1205,12 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
p->nvcsw + sig->nvcsw + sig->cnvcsw;
psig->cnivcsw +=
p->nivcsw + sig->nivcsw + sig->cnivcsw;
+ psig->cinblock +=
+ task_io_get_inblock(p) +
+ sig->inblock + sig->cinblock;
+ psig->coublock +=
+ task_io_get_oublock(p) +
+ sig->oublock + sig->coublock;
spin_unlock_irq(&p->parent->sighand->siglock);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 5dd3979..49530e4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -875,6 +875,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
+ sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
sig->sched_time = 0;
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -955,7 +956,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr,
- int pid)
+ struct pid *pid)
{
int retval;
struct task_struct *p = NULL;
@@ -1022,7 +1023,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
- p->pid = pid;
+ p->pid = pid_nr(pid);
retval = -EFAULT;
if (clone_flags & CLONE_PARENT_SETTID)
if (put_user(p->pid, parent_tidptr))
@@ -1251,13 +1252,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->signal->tty = current->signal->tty;
p->signal->pgrp = process_group(current);
set_signal_session(p->signal, process_session(current));
- attach_pid(p, PIDTYPE_PGID, process_group(p));
- attach_pid(p, PIDTYPE_SID, process_session(p));
+ attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
+ attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
- attach_pid(p, PIDTYPE_PID, p->pid);
+ attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
@@ -1321,7 +1322,8 @@ struct task_struct * __cpuinit fork_idle(int cpu)
struct task_struct *task;
struct pt_regs regs;
- task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
+ task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL,
+ &init_struct_pid);
if (!IS_ERR(task))
init_idle(task, cpu);
@@ -1371,7 +1373,7 @@ long do_fork(unsigned long clone_flags,
clone_flags |= CLONE_PTRACE;
}
- p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
+ p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
@@ -1420,12 +1422,15 @@ long do_fork(unsigned long clone_flags,
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
-static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags)
+static void sighand_ctor(void *data, struct kmem_cache *cachep,
+ unsigned long flags)
{
struct sighand_struct *sighand = data;
- if (flags & SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
spin_lock_init(&sighand->siglock);
+ INIT_LIST_HEAD(&sighand->signalfd_list);
+ }
}
void __init proc_caches_init(void)
@@ -1451,7 +1456,6 @@ void __init proc_caches_init(void)
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
}
-
/*
* Check constraints on flags passed to the unshare system call and
* force unsharing of additional process context as appropriate.
diff --git a/kernel/pid.c b/kernel/pid.c
index d3ad724..eb66bd2 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -27,11 +27,13 @@
#include <linux/bootmem.h>
#include <linux/hash.h>
#include <linux/pid_namespace.h>
+#include <linux/init_task.h>
#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
static struct hlist_head *pid_hash;
static int pidhash_shift;
static struct kmem_cache *pid_cachep;
+struct pid init_struct_pid = INIT_STRUCT_PID;
int pid_max = PID_MAX_DEFAULT;
@@ -247,13 +249,16 @@ struct pid * fastcall find_pid(int nr)
}
EXPORT_SYMBOL_GPL(find_pid);
-int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
+/*
+ * attach_pid() must be called with the tasklist_lock write-held.
+ */
+int fastcall attach_pid(struct task_struct *task, enum pid_type type,
+ struct pid *pid)
{
struct pid_link *link;
- struct pid *pid;
link = &task->pids[type];
- link->pid = pid = find_pid(nr);
+ link->pid = pid;
hlist_add_head_rcu(&link->node, &pid->tasks[type]);
return 0;
diff --git a/kernel/signal.c b/kernel/signal.c
index c43a3f1..364fc95 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -21,6 +21,7 @@
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
+#include <linux/signalfd.h>
#include <linux/capability.h>
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
@@ -113,8 +114,7 @@ void recalc_sigpending(void)
/* Given the mask, find the first available signal that should be serviced. */
-static int
-next_signal(struct sigpending *pending, sigset_t *mask)
+int next_signal(struct sigpending *pending, sigset_t *mask)
{
unsigned long i, *s, *m, x;
int sig = 0;
@@ -632,6 +632,12 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
int ret = 0;
/*
+ * Deliver the signal to listening signalfds. This must be called
+ * with the sighand lock held.
+ */
+ signalfd_notify(t, sig);
+
+ /*
* fast-pathed signals for kernel-internal things like SIGSTOP
* or SIGKILL.
*/
@@ -1282,6 +1288,11 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
ret = 1;
goto out;
}
+ /*
+ * Deliver the signal to listening signalfds. This must be called
+ * with the sighand lock held.
+ */
+ signalfd_notify(p, sig);
list_add_tail(&q->list, &p->pending.list);
sigaddset(&p->pending.signal, sig);
@@ -1325,6 +1336,11 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
q->info.si_overrun++;
goto out;
}
+ /*
+ * Deliver the signal to listening signalfds. This must be called
+ * with the sighand lock held.
+ */
+ signalfd_notify(p, sig);
/*
* Put this signal on the shared-pending queue.
@@ -1985,6 +2001,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
/*
* If you change siginfo_t structure, please be sure
* this code is fixed accordingly.
+ * Please remember to update the signalfd_copyinfo() function
+ * inside fs/signalfd.c too, in case siginfo_t changes.
* It should never copy any pad contained in the structure
* to avoid security leaks, but must copy the generic
* 3 ints plus the relevant union member.
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index daabb74..fcee2a8 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -8,6 +8,8 @@
#include <linux/sched.h>
#include <linux/stop_machine.h>
#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@@ -45,6 +47,7 @@ static int stopmachine(void *cpu)
if (stopmachine_state == STOPMACHINE_DISABLE_IRQ
&& !irqs_disabled) {
local_irq_disable();
+ hard_irq_disable();
irqs_disabled = 1;
/* Ack: irqs disabled. */
smp_mb(); /* Must read state first. */
@@ -124,6 +127,7 @@ static int stop_machine(void)
/* Make them disable irqs. */
local_irq_disable();
+ hard_irq_disable();
stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
return 0;
diff --git a/kernel/sys.c b/kernel/sys.c
index cdb7e94..872271c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -14,6 +14,7 @@
#include <linux/prctl.h>
#include <linux/highuid.h>
#include <linux/fs.h>
+#include <linux/resource.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/workqueue.h>
@@ -29,6 +30,7 @@
#include <linux/signal.h>
#include <linux/cn_proc.h>
#include <linux/getcpu.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -658,7 +660,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
int error = -EINVAL;
struct pid *pgrp;
- if (which > 2 || which < 0)
+ if (which > PRIO_USER || which < PRIO_PROCESS)
goto out;
/* normalize: avoid signed division (rounding problems) */
@@ -722,7 +724,7 @@ asmlinkage long sys_getpriority(int which, int who)
long niceval, retval = -ESRCH;
struct pid *pgrp;
- if (which > 2 || which < 0)
+ if (which > PRIO_USER || which < PRIO_PROCESS)
return -EINVAL;
read_lock(&tasklist_lock);
@@ -1486,7 +1488,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
if (process_group(p) != pgid) {
detach_pid(p, PIDTYPE_PGID);
p->signal->pgrp = pgid;
- attach_pid(p, PIDTYPE_PGID, pgid);
+ attach_pid(p, PIDTYPE_PGID, find_pid(pgid));
}
err = 0;
@@ -2082,6 +2084,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_nivcsw = p->signal->cnivcsw;
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
+ r->ru_inblock = p->signal->cinblock;
+ r->ru_oublock = p->signal->coublock;
if (who == RUSAGE_CHILDREN)
break;
@@ -2093,6 +2097,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
r->ru_majflt += p->signal->maj_flt;
+ r->ru_inblock += p->signal->inblock;
+ r->ru_oublock += p->signal->oublock;
t = p;
do {
utime = cputime_add(utime, t->utime);
@@ -2101,6 +2107,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_nivcsw += t->nivcsw;
r->ru_minflt += t->min_flt;
r->ru_majflt += t->maj_flt;
+ r->ru_inblock += task_io_get_inblock(t);
+ r->ru_oublock += task_io_get_oublock(t);
t = next_thread(t);
} while (t != p);
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d7306d0..b6d77a8 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -141,3 +141,8 @@ cond_syscall(compat_sys_migrate_pages);
cond_syscall(sys_bdflush);
cond_syscall(sys_ioprio_set);
cond_syscall(sys_ioprio_get);
+
+/* New file descriptors */
+cond_syscall(sys_signalfd);
+cond_syscall(sys_timerfd);
+cond_syscall(sys_eventfd);
OpenPOWER on IntegriCloud