summaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2008-12-04 11:17:10 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-04 11:31:25 +0100
commit37810659ea7d9572c5ac284ade272f806ef8f788 (patch)
tree7e251f9b8eb0552773702ecf45f8348f2b5b2e2f /kernel/hrtimer.c
parentca109491f612aab5c8152207631c0444f63da97f (diff)
downloadop-kernel-dev-37810659ea7d9572c5ac284ade272f806ef8f788.zip
op-kernel-dev-37810659ea7d9572c5ac284ade272f806ef8f788.tar.gz
hrtimer: removing all ur callback modes, fix hotplug
Impact: fix hrtimer locking (reported by lockdep) in the CPU hotplug case This addition fixes the hotplug locking issue on my machine Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c65
1 files changed, 37 insertions, 28 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index efd6f41..b09c7a2 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1496,7 +1496,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
#ifdef CONFIG_HOTPLUG_CPU
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base, int dcpu)
+ struct hrtimer_clock_base *new_base)
{
struct hrtimer *timer;
struct rb_node *node;
@@ -1514,40 +1514,34 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
timer->base = new_base;
/*
- * Enqueue the timer. Allow reprogramming of the event device
+ * Enqueue the timers on the new cpu, but do not reprogram
+ * the timer as that would enable a deadlock between
+ * hrtimer_enqueue_reprogramm() running the timer and us still
+ * holding a nested base lock.
+ *
+ * Instead we tickle the hrtimer interrupt after the migration
+ * is done, which will run all expired timers and re-programm
+ * the timer device.
*/
- enqueue_hrtimer(timer, new_base, 1);
+ enqueue_hrtimer(timer, new_base, 0);
-#ifdef CONFIG_HIGH_RES_TIMERS
- /*
- * Happens with high res enabled when the timer was
- * already expired and the callback mode is
- * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
- * enqueue code does not move them to the soft irq
- * pending list for performance/latency reasons, but
- * in the migration state, we need to do that
- * otherwise we end up with a stale timer.
- */
- if (timer->state == HRTIMER_STATE_MIGRATE) {
- /* XXX: running on offline cpu */
- __run_hrtimer(timer);
- }
-#endif
/* Clear the migration state bit */
timer->state &= ~HRTIMER_STATE_MIGRATE;
}
}
-static void migrate_hrtimers(int cpu)
+static int migrate_hrtimers(int scpu)
{
struct hrtimer_cpu_base *old_base, *new_base;
- int i;
+ int dcpu, i;
- BUG_ON(cpu_online(cpu));
- old_base = &per_cpu(hrtimer_bases, cpu);
+ BUG_ON(cpu_online(scpu));
+ old_base = &per_cpu(hrtimer_bases, scpu);
new_base = &get_cpu_var(hrtimer_bases);
- tick_cancel_sched_timer(cpu);
+ dcpu = smp_processor_id();
+
+ tick_cancel_sched_timer(scpu);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
@@ -1557,32 +1551,47 @@ static void migrate_hrtimers(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i], cpu);
+ &new_base->clock_base[i]);
}
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
put_cpu_var(hrtimer_bases);
+
+ return dcpu;
+}
+
+static void tickle_timers(void *arg)
+{
+ hrtimer_peek_ahead_timers();
}
+
#endif /* CONFIG_HOTPLUG_CPU */
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- unsigned int cpu = (long)hcpu;
+ int dcpu = -1, scpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- init_hrtimers_cpu(cpu);
+ init_hrtimers_cpu(scpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
- migrate_hrtimers(cpu);
+ clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
+ dcpu = migrate_hrtimers(scpu);
+ break;
+
+ case CPU_POST_DEAD:
+ if (dcpu == -1)
+ break;
+
+ smp_call_function_single(dcpu, tickle_timers, NULL, 0);
break;
#endif
OpenPOWER on IntegriCloud