summaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c390
1 files changed, 88 insertions, 302 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 47e6334..1455b76 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -32,7 +32,6 @@
*/
#include <linux/cpu.h>
-#include <linux/irq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
@@ -442,22 +441,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
-/*
- * Check, whether the timer is on the callback pending list
- */
-static inline int hrtimer_cb_pending(const struct hrtimer *timer)
-{
- return timer->state & HRTIMER_STATE_PENDING;
-}
-
-/*
- * Remove a timer from the callback pending list
- */
-static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
-{
- list_del_init(&timer->cb_entry);
-}
-
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -651,6 +634,7 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
{
}
+
/*
* When High resolution timers are active, try to reprogram. Note, that in case
* the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -661,31 +645,10 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-
- /* Timer is expired, act upon the callback mode */
- switch(timer->cb_mode) {
- case HRTIMER_CB_IRQSAFE_PERCPU:
- case HRTIMER_CB_IRQSAFE_UNLOCKED:
- /*
- * This is solely for the sched tick emulation with
- * dynamic tick support to ensure that we do not
- * restart the tick right on the edge and end up with
- * the tick timer in the softirq ! The calling site
- * takes care of this. Also used for hrtimer sleeper !
- */
- debug_hrtimer_deactivate(timer);
- return 1;
- case HRTIMER_CB_SOFTIRQ:
- /*
- * Move everything else into the softirq pending list !
- */
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- timer->state = HRTIMER_STATE_PENDING;
- return 1;
- default:
- BUG();
- }
+ spin_unlock(&base->cpu_base->lock);
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ spin_lock(&base->cpu_base->lock);
+ return 1;
}
return 0;
}
@@ -724,11 +687,6 @@ static int hrtimer_switch_to_hres(void)
return 1;
}
-static inline void hrtimer_raise_softirq(void)
-{
- raise_softirq(HRTIMER_SOFTIRQ);
-}
-
#else
static inline int hrtimer_hres_active(void) { return 0; }
@@ -742,12 +700,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
-static inline int hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- return 0;
-}
-static inline void hrtimer_raise_softirq(void) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -818,9 +770,11 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
*
* The timer is inserted in expiry order. Insertion into the
* red black tree is O(log(n)). Must hold the base lock.
+ *
+ * Returns 1 when the new timer is the leftmost timer in the tree.
*/
-static void enqueue_hrtimer(struct hrtimer *timer,
- struct hrtimer_clock_base *base, int reprogram)
+static int enqueue_hrtimer(struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
{
struct rb_node **link = &base->active.rb_node;
struct rb_node *parent = NULL;
@@ -852,20 +806,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
* Insert the timer to the rbtree and check whether it
* replaces the first pending timer
*/
- if (leftmost) {
- /*
- * Reprogram the clock event device. When the timer is already
- * expired hrtimer_enqueue_reprogram has either called the
- * callback or added it to the pending list and raised the
- * softirq.
- *
- * This is a NOP for !HIGHRES
- */
- if (reprogram && hrtimer_enqueue_reprogram(timer, base))
- return;
-
+ if (leftmost)
base->first = &timer->node;
- }
rb_link_node(&timer->node, parent, link);
rb_insert_color(&timer->node, &base->active);
@@ -874,6 +816,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
* state of a possibly running callback.
*/
timer->state |= HRTIMER_STATE_ENQUEUED;
+
+ return leftmost;
}
/*
@@ -890,10 +834,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
{
- /* High res. callback list. NOP for !HIGHRES */
- if (hrtimer_cb_pending(timer))
- hrtimer_remove_cb_pending(timer);
- else {
+ if (timer->state & HRTIMER_STATE_ENQUEUED) {
/*
* Remove the timer from the rbtree and replace the
* first entry pointer if necessary.
@@ -953,7 +894,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
- int ret, raise;
+ int ret, leftmost;
base = lock_hrtimer_base(timer, &flags);
@@ -981,33 +922,19 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
timer_stats_hrtimer_set_start_info(timer);
+ leftmost = enqueue_hrtimer(timer, new_base);
+
/*
* Only allow reprogramming if the new base is on this CPU.
* (it might still be on another CPU if the timer was pending)
+ *
+ * XXX send_remote_softirq() ?
*/
- enqueue_hrtimer(timer, new_base,
- new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
-
- /*
- * The timer may be expired and moved to the cb_pending
- * list. We can not raise the softirq with base lock held due
- * to a possible deadlock with runqueue lock.
- */
- raise = timer->state == HRTIMER_STATE_PENDING;
-
- /*
- * We use preempt_disable to prevent this task from migrating after
- * setting up the softirq and raising it. Otherwise, if me migrate
- * we will raise the softirq on the wrong CPU.
- */
- preempt_disable();
+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+ hrtimer_enqueue_reprogram(timer, new_base);
unlock_hrtimer_base(timer, &flags);
- if (raise)
- hrtimer_raise_softirq();
- preempt_enable();
-
return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1192,75 +1119,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
}
EXPORT_SYMBOL_GPL(hrtimer_get_res);
-static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
-{
- spin_lock_irq(&cpu_base->lock);
-
- while (!list_empty(&cpu_base->cb_pending)) {
- enum hrtimer_restart (*fn)(struct hrtimer *);
- struct hrtimer *timer;
- int restart;
- int emulate_hardirq_ctx = 0;
-
- timer = list_entry(cpu_base->cb_pending.next,
- struct hrtimer, cb_entry);
-
- debug_hrtimer_deactivate(timer);
- timer_stats_account_hrtimer(timer);
-
- fn = timer->function;
- /*
- * A timer might have been added to the cb_pending list
- * when it was migrated during a cpu-offline operation.
- * Emulate hardirq context for such timers.
- */
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
- timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
- emulate_hardirq_ctx = 1;
-
- __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
- spin_unlock_irq(&cpu_base->lock);
-
- if (unlikely(emulate_hardirq_ctx)) {
- local_irq_disable();
- restart = fn(timer);
- local_irq_enable();
- } else
- restart = fn(timer);
-
- spin_lock_irq(&cpu_base->lock);
-
- timer->state &= ~HRTIMER_STATE_CALLBACK;
- if (restart == HRTIMER_RESTART) {
- BUG_ON(hrtimer_active(timer));
- /*
- * Enqueue the timer, allow reprogramming of the event
- * device
- */
- enqueue_hrtimer(timer, timer->base, 1);
- } else if (hrtimer_active(timer)) {
- /*
- * If the timer was rearmed on another CPU, reprogram
- * the event device.
- */
- struct hrtimer_clock_base *base = timer->base;
-
- if (base->first == &timer->node &&
- hrtimer_reprogram(timer, base)) {
- /*
- * Timer is expired. Thus move it from tree to
- * pending list again.
- */
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- }
- }
- }
- spin_unlock_irq(&cpu_base->lock);
-}
-
static void __run_hrtimer(struct hrtimer *timer)
{
struct hrtimer_clock_base *base = timer->base;
@@ -1268,34 +1126,30 @@ static void __run_hrtimer(struct hrtimer *timer)
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
+ WARN_ON(!irqs_disabled());
+
debug_hrtimer_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
-
fn = timer->function;
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
- timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
- /*
- * Used for scheduler timers, avoid lock inversion with
- * rq->lock and tasklist_lock.
- *
- * These timers are required to deal with enqueue expiry
- * themselves and are not allowed to migrate.
- */
- spin_unlock(&cpu_base->lock);
- restart = fn(timer);
- spin_lock(&cpu_base->lock);
- } else
- restart = fn(timer);
/*
- * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
- * reprogramming of the event hardware. This happens at the end of this
- * function anyway.
+ * Because we run timers from hardirq context, there is no chance
+ * they get migrated to another cpu, therefore its safe to unlock
+ * the timer base.
+ */
+ spin_unlock(&cpu_base->lock);
+ restart = fn(timer);
+ spin_lock(&cpu_base->lock);
+
+ /*
+ * Note: We clear the CALLBACK bit after enqueue_hrtimer and
+ * we do not reprogramm the event hardware. Happens either in
+ * hrtimer_start_range_ns() or in hrtimer_interrupt()
*/
if (restart != HRTIMER_NORESTART) {
BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
- enqueue_hrtimer(timer, base, 0);
+ enqueue_hrtimer(timer, base);
}
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
@@ -1311,7 +1165,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
ktime_t expires_next, now;
- int i, raise = 0;
+ int i;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1360,16 +1214,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
break;
}
- /* Move softirq callbacks to the pending list */
- if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- raise = 1;
- continue;
- }
-
__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
@@ -1383,10 +1227,22 @@ void hrtimer_interrupt(struct clock_event_device *dev)
if (tick_program_event(expires_next, 0))
goto retry;
}
+}
+
+/*
+ * local version of hrtimer_peek_ahead_timers() called with interrupts
+ * disabled.
+ */
+static void __hrtimer_peek_ahead_timers(void)
+{
+ struct tick_device *td;
+
+ if (!hrtimer_hres_active())
+ return;
- /* Raise softirq ? */
- if (raise)
- raise_softirq(HRTIMER_SOFTIRQ);
+ td = &__get_cpu_var(tick_cpu_device);
+ if (td && td->evtdev)
+ hrtimer_interrupt(td->evtdev);
}
/**
@@ -1400,25 +1256,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)
*/
void hrtimer_peek_ahead_timers(void)
{
- struct tick_device *td;
unsigned long flags;
- if (!hrtimer_hres_active())
- return;
-
local_irq_save(flags);
- td = &__get_cpu_var(tick_cpu_device);
- if (td && td->evtdev)
- hrtimer_interrupt(td->evtdev);
+ __hrtimer_peek_ahead_timers();
local_irq_restore(flags);
}
static void run_hrtimer_softirq(struct softirq_action *h)
{
- run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
+ hrtimer_peek_ahead_timers();
}
-#endif /* CONFIG_HIGH_RES_TIMERS */
+#else /* CONFIG_HIGH_RES_TIMERS */
+
+static inline void __hrtimer_peek_ahead_timers(void) { }
+
+#endif /* !CONFIG_HIGH_RES_TIMERS */
/*
* Called from timer softirq every jiffy, expire hrtimers:
@@ -1429,8 +1283,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
*/
void hrtimer_run_pending(void)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-
if (hrtimer_hres_active())
return;
@@ -1444,8 +1296,6 @@ void hrtimer_run_pending(void)
*/
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
hrtimer_switch_to_hres();
-
- run_hrtimer_pending(cpu_base);
}
/*
@@ -1482,14 +1332,6 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- continue;
- }
-
__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
@@ -1516,9 +1358,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
sl->task = task;
-#ifdef CONFIG_HIGH_RES_TIMERS
- sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
-#endif
}
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -1655,18 +1494,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
- INIT_LIST_HEAD(&cpu_base->cb_pending);
hrtimer_init_hres(cpu_base);
}
#ifdef CONFIG_HOTPLUG_CPU
-static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base, int dcpu)
+static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ struct hrtimer_clock_base *new_base)
{
struct hrtimer *timer;
struct rb_node *node;
- int raise = 0;
while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node);
@@ -1674,18 +1511,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
debug_hrtimer_deactivate(timer);
/*
- * Should not happen. Per CPU timers should be
- * canceled _before_ the migration code is called
- */
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
- __remove_hrtimer(timer, old_base,
- HRTIMER_STATE_INACTIVE, 0);
- WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
- timer, timer->function, dcpu);
- continue;
- }
-
- /*
* Mark it as STATE_MIGRATE not INACTIVE otherwise the
* timer could be seen as !active and just vanish away
* under us on another CPU
@@ -1693,112 +1518,73 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
timer->base = new_base;
/*
- * Enqueue the timer. Allow reprogramming of the event device
+ * Enqueue the timers on the new cpu. This does not
+ * reprogram the event device in case the timer
+ * expires before the earliest on this CPU, but we run
+ * hrtimer_interrupt after we migrated everything to
+ * sort out already expired timers and reprogram the
+ * event device.
*/
- enqueue_hrtimer(timer, new_base, 1);
+ enqueue_hrtimer(timer, new_base);
-#ifdef CONFIG_HIGH_RES_TIMERS
- /*
- * Happens with high res enabled when the timer was
- * already expired and the callback mode is
- * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
- * enqueue code does not move them to the soft irq
- * pending list for performance/latency reasons, but
- * in the migration state, we need to do that
- * otherwise we end up with a stale timer.
- */
- if (timer->state == HRTIMER_STATE_MIGRATE) {
- timer->state = HRTIMER_STATE_PENDING;
- list_add_tail(&timer->cb_entry,
- &new_base->cpu_base->cb_pending);
- raise = 1;
- }
-#endif
/* Clear the migration state bit */
timer->state &= ~HRTIMER_STATE_MIGRATE;
}
- return raise;
-}
-
-#ifdef CONFIG_HIGH_RES_TIMERS
-static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
- struct hrtimer_cpu_base *new_base)
-{
- struct hrtimer *timer;
- int raise = 0;
-
- while (!list_empty(&old_base->cb_pending)) {
- timer = list_entry(old_base->cb_pending.next,
- struct hrtimer, cb_entry);
-
- __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
- timer->base = &new_base->clock_base[timer->base->index];
- list_add_tail(&timer->cb_entry, &new_base->cb_pending);
- raise = 1;
- }
- return raise;
-}
-#else
-static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
- struct hrtimer_cpu_base *new_base)
-{
- return 0;
}
-#endif
-static void migrate_hrtimers(int cpu)
+static void migrate_hrtimers(int scpu)
{
struct hrtimer_cpu_base *old_base, *new_base;
- int i, raise = 0;
+ int i;
- BUG_ON(cpu_online(cpu));
- old_base = &per_cpu(hrtimer_bases, cpu);
- new_base = &get_cpu_var(hrtimer_bases);
+ BUG_ON(cpu_online(scpu));
+ tick_cancel_sched_timer(scpu);
- tick_cancel_sched_timer(cpu);
+ local_irq_disable();
+ old_base = &per_cpu(hrtimer_bases, scpu);
+ new_base = &__get_cpu_var(hrtimer_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock_irq(&new_base->lock);
+ spin_lock(&new_base->lock);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- if (migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i], cpu))
- raise = 1;
+ migrate_hrtimer_list(&old_base->clock_base[i],
+ &new_base->clock_base[i]);
}
- if (migrate_hrtimer_pending(old_base, new_base))
- raise = 1;
-
spin_unlock(&old_base->lock);
- spin_unlock_irq(&new_base->lock);
- put_cpu_var(hrtimer_bases);
+ spin_unlock(&new_base->lock);
- if (raise)
- hrtimer_raise_softirq();
+ /* Check, if we got expired work to do */
+ __hrtimer_peek_ahead_timers();
+ local_irq_enable();
}
+
#endif /* CONFIG_HOTPLUG_CPU */
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- unsigned int cpu = (long)hcpu;
+ int scpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- init_hrtimers_cpu(cpu);
+ init_hrtimers_cpu(scpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
- migrate_hrtimers(cpu);
+ {
+ clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
+ migrate_hrtimers(scpu);
break;
+ }
#endif
default:
OpenPOWER on IntegriCloud