summaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c101
1 files changed, 49 insertions, 52 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d5dbb7..989c7c2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -60,7 +60,9 @@
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
+#include <linux/fs_struct.h>
#include <trace/sched.h>
+#include <linux/magic.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -212,6 +214,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
+ unsigned long *stackend;
+
int err;
prepare_to_copy(orig);
@@ -237,6 +241,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
goto out;
setup_thread_stack(tsk, orig);
+ stackend = end_of_stack(tsk);
+ *stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
@@ -279,7 +285,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
- cpus_clear(mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
@@ -639,6 +645,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
+#ifdef CONFIG_DETECT_HUNG_TASK
+ tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
+#endif
tsk->mm = NULL;
tsk->active_mm = NULL;
@@ -676,38 +685,21 @@ fail_nomem:
return retval;
}
-static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
-{
- struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
- /* We don't need to lock fs - think why ;-) */
- if (fs) {
- atomic_set(&fs->count, 1);
- rwlock_init(&fs->lock);
- fs->umask = old->umask;
- read_lock(&old->lock);
- fs->root = old->root;
- path_get(&old->root);
- fs->pwd = old->pwd;
- path_get(&old->pwd);
- read_unlock(&old->lock);
- }
- return fs;
-}
-
-struct fs_struct *copy_fs_struct(struct fs_struct *old)
-{
- return __copy_fs_struct(old);
-}
-
-EXPORT_SYMBOL_GPL(copy_fs_struct);
-
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
+ struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
- atomic_inc(&current->fs->count);
+ /* tsk->fs is already what we want */
+ write_lock(&fs->lock);
+ if (fs->in_exec) {
+ write_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+ fs->users++;
+ write_unlock(&fs->lock);
return 0;
}
- tsk->fs = __copy_fs_struct(current->fs);
+ tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
@@ -836,6 +828,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
sig->flags = 0;
+ if (clone_flags & CLONE_NEWPID)
+ sig->flags |= SIGNAL_UNKILLABLE;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
@@ -851,13 +845,14 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->tty_old_pgrp = NULL;
sig->tty = NULL;
- sig->cutime = sig->cstime = cputime_zero;
+ sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->gtime = cputime_zero;
sig->cgtime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
task_io_accounting_init(&sig->ioac);
+ sig->sum_sched_runtime = 0;
taskstats_tgid_init(sig);
task_lock(current->group_leader);
@@ -1040,11 +1035,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->default_timer_slack_ns = current->timer_slack_ns;
-#ifdef CONFIG_DETECT_SOFTLOCKUP
- p->last_switch_count = 0;
- p->last_switch_timestamp = 0;
-#endif
-
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
@@ -1094,7 +1084,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
- if (unlikely(ptrace_reparented(current)))
+ if (unlikely(current->ptrace))
ptrace_fork(p, clone_flags);
/* Perform scheduler related setup. Assign this task to a CPU. */
@@ -1119,7 +1109,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
- retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
+ retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
@@ -1178,10 +1168,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif
clear_all_latency_tracing(p);
- /* Our parent execution domain becomes current domain
- These must match for thread signalling to apply */
- p->parent_exec_id = p->self_exec_id;
-
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
@@ -1219,10 +1205,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
set_task_cpu(p, smp_processor_id());
/* CLONE_PARENT re-uses the old parent */
- if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
+ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
- else
+ p->parent_exec_id = current->parent_exec_id;
+ } else {
p->real_parent = current;
+ p->parent_exec_id = current->self_exec_id;
+ }
spin_lock(&current->sighand->siglock);
@@ -1258,8 +1247,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->signal->leader_pid = pid;
tty_kref_put(p->signal->tty);
p->signal->tty = tty_kref_get(current->signal->tty);
- set_task_pgrp(p, task_pgrp_nr(current));
- set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
@@ -1483,6 +1470,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
@@ -1538,12 +1526,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
- if ((unshare_flags & CLONE_FS) &&
- (fs && atomic_read(&fs->count) > 1)) {
- *new_fsp = __copy_fs_struct(current->fs);
- if (!*new_fsp)
- return -ENOMEM;
- }
+ if (!(unshare_flags & CLONE_FS) || !fs)
+ return 0;
+
+ /* don't need lock here; in the worst case we'll do useless copy */
+ if (fs->users == 1)
+ return 0;
+
+ *new_fsp = copy_fs_struct(fs);
+ if (!*new_fsp)
+ return -ENOMEM;
return 0;
}
@@ -1659,8 +1651,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
if (new_fs) {
fs = current->fs;
+ write_lock(&fs->lock);
current->fs = new_fs;
- new_fs = fs;
+ if (--fs->users)
+ new_fs = NULL;
+ else
+ new_fs = fs;
+ write_unlock(&fs->lock);
}
if (new_mm) {
@@ -1699,7 +1696,7 @@ bad_unshare_cleanup_sigh:
bad_unshare_cleanup_fs:
if (new_fs)
- put_fs_struct(new_fs);
+ free_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
OpenPOWER on IntegriCloud