diff options
author | Stanislaw Gruszka <sgruszka@redhat.com> | 2010-03-11 14:04:42 -0800 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-03-12 22:40:41 +0100 |
commit | c28739375bf0d6e239b4fa939ec8372aa2c707d2 (patch) | |
tree | 9d213503878c59b7adc9ca3e0517c566921f3c9a /kernel/posix-cpu-timers.c | |
parent | 1f169f84d25a74fb2dc67274d31d082ce30c60fb (diff) | |
download | op-kernel-dev-c28739375bf0d6e239b4fa939ec8372aa2c707d2.zip op-kernel-dev-c28739375bf0d6e239b4fa939ec8372aa2c707d2.tar.gz |
cpu-timers: Avoid iterating over all threads in fastpath_timer_check()
Spread p->sighand->siglock locking scope to make sure that
fastpath_timer_check() never iterates over all threads. Without
locking there is small possibility that signal->cputimer will stop
running while we write values to signal->cputime_expires.
Calling thread_group_cputime() from fastpath_timer_check() is not only
bad because it is slow, also it is racy with __exit_signal() which can
lead to invalid signal->{s,u}time values.
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 7d9d0fa..564b3b0 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -550,7 +550,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp) /* * Insert the timer on the appropriate list before any timers that * expire later. This must be called with the tasklist_lock held - * for reading, and interrupts disabled. + * for reading, interrupts disabled and p->sighand->siglock taken. */ static void arm_timer(struct k_itimer *timer) { @@ -569,9 +569,6 @@ static void arm_timer(struct k_itimer *timer) } head += CPUCLOCK_WHICH(timer->it_clock); - BUG_ON(!irqs_disabled()); - spin_lock(&p->sighand->siglock); - listpos = head; list_for_each_entry(next, head, entry) { if (cpu_time_before(timer->it_clock, nt->expires, next->expires)) @@ -606,8 +603,6 @@ static void arm_timer(struct k_itimer *timer) break; } } - - spin_unlock(&p->sighand->siglock); } /* @@ -720,7 +715,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, ret = TIMER_RETRY; } else list_del_init(&timer->it.cpu.entry); - spin_unlock(&p->sighand->siglock); /* * We need to sample the current value to convert the new @@ -774,6 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, * disable this firing since we are already reporting * it as an overrun (thanks to bump_cpu_timer above). */ + spin_unlock(&p->sighand->siglock); read_unlock(&tasklist_lock); goto out; } @@ -793,6 +788,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, arm_timer(timer); } + spin_unlock(&p->sighand->siglock); read_unlock(&tasklist_lock); /* @@ -1206,6 +1202,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) goto out; } read_lock(&tasklist_lock); /* arm_timer needs it. */ + spin_lock(&p->sighand->siglock); } else { read_lock(&tasklist_lock); if (unlikely(p->signal == NULL)) { @@ -1226,6 +1223,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) clear_dead_task(timer, now); goto out_unlock; } + spin_lock(&p->sighand->siglock); cpu_timer_sample_group(timer->it_clock, p, &now); bump_cpu_timer(timer, now); /* Leave the tasklist_lock locked for the call below. */ @@ -1234,7 +1232,9 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) /* * Now re-arm for the new expiry time. */ + BUG_ON(!irqs_disabled()); arm_timer(timer); + spin_unlock(&p->sighand->siglock); out_unlock: read_unlock(&tasklist_lock); |