summaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-26 20:20:29 +0100
committerIngo Molnar <mingo@elte.hu>2009-02-26 21:20:22 +0100
commitb342501cd31e5546d0c9ca8ceff5ded1832f9e5b (patch)
tree3cd5778565a45a74e8ad0640093927a77d4d625e /kernel/sched_clock.c
parent694593e3374a67d95ece6a275a1f181644c2c4d8 (diff)
downloadop-kernel-dev-b342501cd31e5546d0c9ca8ceff5ded1832f9e5b.zip
op-kernel-dev-b342501cd31e5546d0c9ca8ceff5ded1832f9e5b.tar.gz
sched: allow architectures to specify sched_clock_stable
Allow CONFIG_HAVE_UNSTABLE_SCHED_CLOCK architectures to still specify that their sched_clock() implementation is reliable. This will be used by x86 to switch on a faster sched_clock_cpu() implementation on certain CPU types. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c45
1 files changed, 20 insertions, 25 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index a0b0852..a755d02 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,11 +24,11 @@
* The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
* consistent between cpus (never more than 2 jiffies difference).
*/
-#include <linux/sched.h>
-#include <linux/percpu.h>
#include <linux/spinlock.h>
-#include <linux/ktime.h>
#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
/*
* Scheduler clock - returns current time in nanosec units.
@@ -43,6 +43,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
static __read_mostly int sched_clock_running;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+__read_mostly int sched_clock_stable;
+#else
+static const int sched_clock_stable = 1;
+#endif
struct sched_clock_data {
/*
@@ -87,7 +91,7 @@ void sched_clock_init(void)
}
/*
- * min,max except they take wrapping into account
+ * min, max except they take wrapping into account
*/
static inline u64 wrap_min(u64 x, u64 y)
@@ -116,10 +120,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
if (unlikely(delta < 0))
delta = 0;
+ if (unlikely(!sched_clock_running))
+ return 0ull;
+
/*
* scd->clock = clamp(scd->tick_gtod + delta,
- * max(scd->tick_gtod, scd->clock),
- * scd->tick_gtod + TICK_NSEC);
+ * max(scd->tick_gtod, scd->clock),
+ * scd->tick_gtod + TICK_NSEC);
*/
clock = scd->tick_gtod + delta;
@@ -148,12 +155,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
u64 sched_clock_cpu(int cpu)
{
- struct sched_clock_data *scd = cpu_sdc(cpu);
u64 now, clock, this_clock, remote_clock;
+ struct sched_clock_data *scd;
- if (unlikely(!sched_clock_running))
- return 0ull;
+ if (sched_clock_stable)
+ return sched_clock();
+ scd = cpu_sdc(cpu);
WARN_ON_ONCE(!irqs_disabled());
now = sched_clock();
@@ -193,6 +201,8 @@ u64 sched_clock_cpu(int cpu)
return clock;
}
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+
void sched_clock_tick(void)
{
struct sched_clock_data *scd = this_scd();
@@ -235,22 +245,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
-#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
-
-void sched_clock_init(void)
-{
- sched_clock_running = 1;
-}
-
-u64 sched_clock_cpu(int cpu)
-{
- if (unlikely(!sched_clock_running))
- return 0;
-
- return sched_clock();
-}
-
-#endif
+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
unsigned long long cpu_clock(int cpu)
{
OpenPOWER on IntegriCloud