summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-02-10 16:37:31 +0100
committerIngo Molnar <mingo@elte.hu>2009-02-11 14:04:19 +0100
commit3fccfd67df79c6351a156eb25a7a514e5f39c4d9 (patch)
treede4776e69e197119ac64097f3ff6239a55ad77be
parentff08f76d738d0ec0f334b187f61e160caa321d54 (diff)
downloadop-kernel-dev-3fccfd67df79c6351a156eb25a7a514e5f39c4d9.zip
op-kernel-dev-3fccfd67df79c6351a156eb25a7a514e5f39c4d9.tar.gz
timers: split process wide cpu clocks/timers, fix
To decrease the chance of a missed enable, always enable the timer when we sample it, we'll always disable it when we find that there are no active timers in the jiffy tick. This fixes a flood of warnings reported by Mike Galbraith. Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/posix-cpu-timers.c42
2 files changed, 15 insertions, 28 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7939291..5d10fa0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2209,6 +2209,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
unsigned long flags;
spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 1;
*times = cputimer->cputime;
spin_unlock_irqrestore(&cputimer->lock, flags);
}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index db107c9..e5d7bfd 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -488,7 +488,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
struct task_cputime cputime;
- thread_group_cputime(tsk, &cputime);
+ thread_group_cputimer(tsk, &cputime);
cleanup_timers(tsk->signal->cpu_timers,
cputime.utime, cputime.stime, cputime.sum_exec_runtime);
}
@@ -507,29 +507,6 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
}
/*
- * Enable the process wide cpu timer accounting.
- *
- * serialized using ->sighand->siglock
- */
-static void start_process_timers(struct task_struct *tsk)
-{
- tsk->signal->cputimer.running = 1;
- barrier();
-}
-
-/*
- * Release the process wide timer accounting -- timer stops ticking when
- * nobody cares about it.
- *
- * serialized using ->sighand->siglock
- */
-static void stop_process_timers(struct task_struct *tsk)
-{
- tsk->signal->cputimer.running = 0;
- barrier();
-}
-
-/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held
* for reading, and interrupts disabled.
@@ -549,9 +526,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
BUG_ON(!irqs_disabled());
spin_lock(&p->sighand->siglock);
- if (!CPUCLOCK_PERTHREAD(timer->it_clock))
- start_process_timers(p);
-
listpos = head;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
list_for_each_entry(next, head, entry) {
@@ -1021,6 +995,19 @@ static void check_thread_timers(struct task_struct *tsk,
}
}
+static void stop_process_timers(struct task_struct *tsk)
+{
+ struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+ unsigned long flags;
+
+ if (!cputimer->running)
+ return;
+
+ spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 0;
+ spin_unlock_irqrestore(&cputimer->lock, flags);
+}
+
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1427,7 +1414,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
struct list_head *head;
BUG_ON(clock_idx == CPUCLOCK_SCHED);
- start_process_timers(tsk);
cpu_timer_sample_group(clock_idx, tsk, &now);
if (oldval) {
OpenPOWER on IntegriCloud