summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-09-10 16:06:00 -0700
committerArjan van de Ven <arjan@linux.intel.com>2008-09-11 07:17:49 -0700
commit2e94d1f71f7e4404d997e6fb4f1618aa147d76f9 (patch)
tree73958a61dffff311cdcdc8edcb7e6a4953150601
parentae4b748e81b7e366f04f55229d5e372e372c33af (diff)
downloadop-kernel-dev-2e94d1f71f7e4404d997e6fb4f1618aa147d76f9.zip
op-kernel-dev-2e94d1f71f7e4404d997e6fb4f1618aa147d76f9.tar.gz
hrtimer: peek at the timer queue just before going idle
As part of going idle, we already look at the time of the next timer event to determine which C-state to select etc. This patch adds functionality that causes the timers that are past their soft expire time, to fire at this time, before we calculate the next wakeup time. This functionality will thus avoid wakeups by running timers before going idle rather than specially waking up for it. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-rw-r--r--drivers/cpuidle/cpuidle.c7
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--kernel/hrtimer.c30
3 files changed, 42 insertions, 0 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 5ce07b5..2e31484 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -16,6 +16,7 @@
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/ktime.h>
+#include <linux/hrtimer.h>
#include "cpuidle.h"
@@ -60,6 +61,12 @@ static void cpuidle_idle_call(void)
return;
}
+ /*
+ * run any timers that can be run now, at this point
+ * before calculating the idle duration etc.
+ */
+ hrtimer_peek_ahead_timers();
+
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(dev);
if (need_resched())
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 95db11f..d93b1e1 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -326,6 +326,11 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
extern ktime_t ktime_get(void);
extern ktime_t ktime_get_real(void);
+
+DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+extern void hrtimer_peek_ahead_timers(void);
+
+
/* Exported timer functions: */
/* Initialize timers: */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 9a4c901..eb2cf98 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1381,6 +1381,36 @@ void hrtimer_interrupt(struct clock_event_device *dev)
raise_softirq(HRTIMER_SOFTIRQ);
}
+/**
+ * hrtimer_peek_ahead_timers -- run soft-expired timers now
+ *
+ * hrtimer_peek_ahead_timers will peek at the timer queue of
+ * the current cpu and check if there are any timers for which
+ * the soft expires time has passed. If any such timers exist,
+ * they are run immediately and then removed from the timer queue.
+ *
+ */
+void hrtimer_peek_ahead_timers(void)
+{
+ unsigned long flags;
+ struct tick_device *td;
+ struct clock_event_device *dev;
+
+ if (hrtimer_hres_active())
+ return;
+
+ local_irq_save(flags);
+ td = &__get_cpu_var(tick_cpu_device);
+ if (!td)
+ goto out;
+ dev = td->evtdev;
+ if (!dev)
+ goto out;
+ hrtimer_interrupt(dev);
+out:
+ local_irq_restore(flags);
+}
+
static void run_hrtimer_softirq(struct softirq_action *h)
{
run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
OpenPOWER on IntegriCloud