diff options
author | Oleg Nesterov <oleg@redhat.com> | 2010-05-26 14:43:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-27 09:12:47 -0700 |
commit | b3ac022cb9dc5883505a88b159d1b240ad1ef405 (patch) | |
tree | bffa035303cbe3c5bde048ac3d3154fb57059e2d | |
parent | dd98acf74762764fbc4382a1d9a244f11a2658cc (diff) | |
download | op-kernel-dev-b3ac022cb9dc5883505a88b159d1b240ad1ef405.zip op-kernel-dev-b3ac022cb9dc5883505a88b159d1b240ad1ef405.tar.gz |
proc: turn signal_struct->count into "int nr_threads"
No functional changes, just s/atomic_t count/int nr_threads/.
With the recent changes this counter has a single user, get_nr_threads()
And, none of its callers need the really accurate number of threads, not
to mention each caller obviously races with fork/exit. It is only used to
report this value to the user-space, except first_tid() uses it to avoid
the unnecessary while_each_thread() loop in the unlikely case.
It is a bit sad we need a word in struct signal_struct for this, perhaps
we can change get_nr_threads() to approximate the number of threads using
signal->live and kill ->nr_threads later.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/init_task.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | kernel/exit.c | 5 | ||||
-rw-r--r-- | kernel/fork.c | 8 |
4 files changed, 8 insertions, 11 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 7996fc2..0551e0d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -16,7 +16,7 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; #define INIT_SIGNALS(sig) { \ - .count = ATOMIC_INIT(1), \ + .nr_threads = 1, \ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ .shared_pending = { \ .list = LIST_HEAD_INIT(sig.shared_pending.list), \ diff --git a/include/linux/sched.h b/include/linux/sched.h index ccd2d15..f118809 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -527,8 +527,8 @@ struct thread_group_cputimer { */ struct signal_struct { atomic_t sigcnt; - atomic_t count; atomic_t live; + int nr_threads; wait_queue_head_t wait_chldexit; /* for wait4() */ @@ -2149,7 +2149,7 @@ extern bool current_is_single_threaded(void); static inline int get_nr_threads(struct task_struct *tsk) { - return atomic_read(&tsk->signal->count); + return tsk->signal->nr_threads; } /* de_thread depends on thread_group_leader not being a pid based check */ diff --git a/kernel/exit.c b/kernel/exit.c index 357d443..ceffc67 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -83,14 +83,10 @@ static void __exit_signal(struct task_struct *tsk) struct sighand_struct *sighand; struct tty_struct *uninitialized_var(tty); - BUG_ON(!sig); - BUG_ON(!atomic_read(&sig->count)); - sighand = rcu_dereference_check(tsk->sighand, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()); spin_lock(&sighand->siglock); - atomic_dec(&sig->count); posix_cpu_timers_exit(tsk); if (group_dead) { @@ -130,6 +126,7 @@ static void __exit_signal(struct task_struct *tsk) sig->sum_sched_runtime += tsk->se.sum_exec_runtime; } + sig->nr_threads--; __unhash_process(tsk, group_dead); /* diff --git a/kernel/fork.c b/kernel/fork.c index 40cd099..d32410bd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -877,9 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) if (!sig) return -ENOMEM; - atomic_set(&sig->sigcnt, 1); - atomic_set(&sig->count, 1); + sig->nr_threads = 1; atomic_set(&sig->live, 1); + atomic_set(&sig->sigcnt, 1); init_waitqueue_head(&sig->wait_chldexit); if (clone_flags & CLONE_NEWPID) sig->flags |= SIGNAL_UNKILLABLE; @@ -1256,9 +1256,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, } if (clone_flags & CLONE_THREAD) { - atomic_inc(¤t->signal->sigcnt); - atomic_inc(¤t->signal->count); + current->signal->nr_threads++; atomic_inc(¤t->signal->live); + atomic_inc(¤t->signal->sigcnt); p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); } |