summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-05-05 10:00:13 +0200
committerThomas Gleixner <tglx@linutronix.de>2015-05-05 10:25:23 +0200
commit2951d5c031a3aaefa31b688fbf229e75692f4786 (patch)
tree572e5e41e8a640d5362708f84c93d8fb28ab13f8
parent30fbd59057004f97f45467124693f22e8b6f3e16 (diff)
downloadop-kernel-dev-2951d5c031a3aaefa31b688fbf229e75692f4786.zip
op-kernel-dev-2951d5c031a3aaefa31b688fbf229e75692f4786.tar.gz
tick: broadcast: Prevent livelock from event handler
With the removal of the hrtimer softirq the switch to highres/nohz mode happens in the tick interrupt. That leads to a livelock when the per cpu event handler is directly called from the broadcast handler under broadcast lock because broadcast lock needs to be taken for the highres/nohz switch as well. Solve this by calling the cpu local handler outside the broadcast_lock held region. Fixes: c6eb3f70d448 "hrtimer: Get rid of hrtimer softirq" Reported-and-tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/time/tick-broadcast.c53
1 files changed, 25 insertions, 28 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 7e8ca4f..5d9e4aa 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -255,18 +255,18 @@ int tick_receive_broadcast(void)
/*
* Broadcast the event to the cpus, which are set in the mask (mangled).
*/
-static void tick_do_broadcast(struct cpumask *mask)
+static bool tick_do_broadcast(struct cpumask *mask)
{
int cpu = smp_processor_id();
struct tick_device *td;
+ bool local = false;
/*
* Check, if the current cpu is in the mask
*/
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
- td = &per_cpu(tick_cpu_device, cpu);
- td->evtdev->event_handler(td->evtdev);
+ local = true;
}
if (!cpumask_empty(mask)) {
@@ -279,16 +279,17 @@ static void tick_do_broadcast(struct cpumask *mask)
td = &per_cpu(tick_cpu_device, cpumask_first(mask));
td->evtdev->broadcast(mask);
}
+ return local;
}
/*
* Periodic broadcast:
* - invoke the broadcast handlers
*/
-static void tick_do_periodic_broadcast(void)
+static bool tick_do_periodic_broadcast(void)
{
cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
- tick_do_broadcast(tmpmask);
+ return tick_do_broadcast(tmpmask);
}
/*
@@ -296,34 +297,26 @@ static void tick_do_periodic_broadcast(void)
*/
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
- ktime_t next;
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+ bool bc_local;
raw_spin_lock(&tick_broadcast_lock);
+ bc_local = tick_do_periodic_broadcast();
- tick_do_periodic_broadcast();
+ if (dev->state == CLOCK_EVT_STATE_ONESHOT) {
+ ktime_t next = ktime_add(dev->next_event, tick_period);
- /*
- * The device is in periodic mode. No reprogramming necessary:
- */
- if (dev->state == CLOCK_EVT_STATE_PERIODIC)
- goto unlock;
+ clockevents_program_event(dev, next, true);
+ }
+ raw_spin_unlock(&tick_broadcast_lock);
/*
- * Setup the next period for devices, which do not have
- * periodic mode. We read dev->next_event first and add to it
- * when the event already expired. clockevents_program_event()
- * sets dev->next_event only when the event is really
- * programmed to the device.
+ * We run the handler of the local cpu after dropping
+ * tick_broadcast_lock because the handler might deadlock when
+ * trying to switch to oneshot mode.
*/
- for (next = dev->next_event; ;) {
- next = ktime_add(next, tick_period);
-
- if (!clockevents_program_event(dev, next, false))
- goto unlock;
- tick_do_periodic_broadcast();
- }
-unlock:
- raw_spin_unlock(&tick_broadcast_lock);
+ if (bc_local)
+ td->evtdev->event_handler(td->evtdev);
}
/**
@@ -622,9 +615,13 @@ again:
cpumask_and(tmpmask, tmpmask, cpu_online_mask);
/*
- * Wakeup the cpus which have an expired event.
+ * Wakeup the cpus which have an expired event and handle the
+ * broadcast event of the local cpu.
*/
- tick_do_broadcast(tmpmask);
+ if (tick_do_broadcast(tmpmask)) {
+ td = this_cpu_ptr(&tick_cpu_device);
+ td->evtdev->event_handler(td->evtdev);
+ }
/*
* Two reasons for reprogram:
OpenPOWER on IntegriCloud