summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-10-10 06:25:23 +0200
committerIngo Molnar <mingo@kernel.org>2013-10-10 06:25:23 +0200
commit8a749de5e32d2b72def93f7bd7a2745580d75872 (patch)
treee46d9059d0328bcb2faea1c084d17f749338cfe4
parenta44eb870f815356dac56adf3a380ee9b94787424 (diff)
parentb4042ceaabbd913bc5b397ddd1e396eeb312d72f (diff)
downloadop-kernel-dev-8a749de5e32d2b72def93f7bd7a2745580d75872.zip
op-kernel-dev-8a749de5e32d2b72def93f7bd7a2745580d75872.tar.gz
Merge branch 'fortglx/3.13/time' of git://git.linaro.org/people/jstultz/linux into timers/core
Pull more timekeeping items for v3.13 from John Stultz: * Small cleanup in the clocksource code. * Fix for rtc-pl031 to let it work with alarmtimers. * Move arm64 to using the generic sched_clock framework & resulting cleanup in the generic sched_clock code. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/arm/kernel/arch_timer.c14
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/kernel/time.c10
-rw-r--r--drivers/clocksource/arm_arch_timer.c10
-rw-r--r--drivers/rtc/rtc-pl031.c3
-rw-r--r--include/linux/sched_clock.h2
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/sched_clock.c9
8 files changed, 15 insertions, 38 deletions
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 221f07b..1791f12 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/sched_clock.h>
#include <asm/delay.h>
@@ -22,13 +21,6 @@ static unsigned long arch_timer_read_counter_long(void)
return arch_timer_read_counter();
}
-static u32 sched_clock_mult __read_mostly;
-
-static unsigned long long notrace arch_timer_sched_clock(void)
-{
- return arch_timer_read_counter() * sched_clock_mult;
-}
-
static struct delay_timer arch_delay_timer;
static void __init arch_timer_delay_timer_register(void)
@@ -48,11 +40,5 @@ int __init arch_timer_arch_init(void)
arch_timer_delay_timer_register();
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
- sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
- sched_clock_func = arch_timer_sched_clock;
- pr_info("sched_clock: ARM arch timer >56 bits at %ukHz, resolution %uns\n",
- arch_timer_rate / 1000, sched_clock_mult);
-
return 0;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c044548..35fd0eb 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -14,6 +14,7 @@ config ARM64
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 03dc371..29c39d5 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -61,13 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-static u64 sched_clock_mult __read_mostly;
-
-unsigned long long notrace sched_clock(void)
-{
- return arch_timer_read_counter() * sched_clock_mult;
-}
-
void __init time_init(void)
{
u32 arch_timer_rate;
@@ -78,9 +71,6 @@ void __init time_init(void)
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
- sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
-
/* Calibrate the delay loop directly */
lpj_fine = arch_timer_rate / HZ;
}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index b94b0d4..f655036 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -20,6 +20,7 @@
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/sched_clock.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -514,6 +515,15 @@ static int __init arch_timer_register(void)
goto out;
}
+ clocksource_register_hz(&clocksource_counter, arch_timer_rate);
+ cyclecounter.mult = clocksource_counter.mult;
+ cyclecounter.shift = clocksource_counter.shift;
+ timecounter_init(&timecounter, &cyclecounter,
+ arch_counter_get_cntvct());
+
+ /* 56 bits minimum, so we assume worst case rollover */
+ sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
+
if (arch_timer_use_virtual) {
ppi = arch_timer_ppi[VIRT_PPI];
err = request_percpu_irq(ppi, arch_timer_handler_virt,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 0f0609b..e3b2571 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -371,6 +371,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
+ device_init_wakeup(&adev->dev, 1);
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {
@@ -384,8 +385,6 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_irq;
}
- device_init_wakeup(&adev->dev, 1);
-
return 0;
out_no_irq:
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index eca7abe..cddf0c2 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -18,6 +18,4 @@ extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
-extern unsigned long long (*sched_clock_func)(void);
-
#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 64cf63c..c9317e1 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -940,7 +940,7 @@ static ssize_t sysfs_override_clocksource(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- size_t ret;
+ ssize_t ret;
mutex_lock(&clocksource_mutex);
@@ -968,7 +968,7 @@ static ssize_t sysfs_unbind_clocksource(struct device *dev,
{
struct clocksource *cs;
char name[CS_NAME_LEN];
- size_t ret;
+ ssize_t ret;
ret = sysfs_get_uname(buf, name, count);
if (ret < 0)
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index f388bae..68b7993 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -63,7 +63,7 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
return (cyc * mult) >> shift;
}
-static unsigned long long notrace sched_clock_32(void)
+unsigned long long notrace sched_clock(void)
{
u64 epoch_ns;
u64 epoch_cyc;
@@ -170,13 +170,6 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
}
-unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
-
-unsigned long long notrace sched_clock(void)
-{
- return sched_clock_func();
-}
-
void __init sched_clock_postinit(void)
{
/*
OpenPOWER on IntegriCloud