From 2b55d10c46815d9660c0f1bc6044f7019ff384aa Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 19 Dec 2012 11:37:49 +0000 Subject: arm: arch_timer: balance device_node refcounting When we get the device_node for the arch timer, it's refcount is automatically incremented in of_find_matching_node, but it is never decremented. This patch decrements the refcount on the node after we're finished using it. Reported-by: Will Deacon Signed-off-by: Mark Rutland Acked-by: Santosh Shilimkar Acked-by: Catalin Marinas --- arch/arm/kernel/arch_timer.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index c8ef207..6dd73c6 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -488,6 +488,8 @@ int __init arch_timer_of_register(void) for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) arch_timer_ppi[i] = irq_of_parse_and_map(np, i); + of_node_put(np); + /* * If no interrupt provided for virtual timer, we'll have to * stick to the physical timer. It'd better be accessible... -- cgit v1.1 From ef201de430b0deb62a9afd2c4e67f04525cec43c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Nov 2012 14:40:44 +0000 Subject: arm: arch_timer: remove redundant available check This check is a holdover from the pre-devicetree days. As the timer is not probed except by platforms which register it via devicetree, it's not strictly necessary. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier --- arch/arm/kernel/arch_timer.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 6dd73c6..1bb3b582 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -20,11 +20,9 @@ #include #include -#include #include #include #include -#include #include static unsigned long arch_timer_rate; @@ -259,20 +257,10 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) return 0; } -/* Is the optional system timer available? */ -static int local_timer_is_architected(void) -{ - return (cpu_architecture() >= CPU_ARCH_ARMv7) && - ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1; -} - static int arch_timer_available(void) { unsigned long freq; - if (!local_timer_is_architected()) - return -ENXIO; - if (arch_timer_rate == 0) { freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS, ARCH_TIMER_REG_FREQ); -- cgit v1.1 From ef01c1d1483d214357f183949bc6173f29906a87 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Nov 2012 14:49:27 +0000 Subject: arm: arch_timer: use u64/u32 for register data To ensure the correct size of types, use u64 for the return value of arch_timer_get_cnt{p,v}ct, and u32 for arch_timer_rate, matching the size of the registers these values are taken from. While we're changing them anyway, simplify the implementation of arch_timer_get_cnt{p,v}ct. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Acked-by: Santosh Shilimkar --- arch/arm/kernel/arch_timer.c | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 1bb3b582..498c29f 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -25,7 +25,7 @@ #include #include -static unsigned long arch_timer_rate; +static u32 arch_timer_rate; enum ppi_nr { PHYS_SECURE_PPI, @@ -121,27 +121,18 @@ static inline u32 arch_timer_reg_read(const int access, const int reg) return val; } -static inline cycle_t arch_timer_counter_read(const int access) +static inline u64 arch_counter_get_cntpct(void) { - cycle_t cval = 0; - - if (access == ARCH_TIMER_PHYS_ACCESS) - asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); - - if (access == ARCH_TIMER_VIRT_ACCESS) - asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); - + u64 cval; + asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); return cval; } -static inline cycle_t arch_counter_get_cntpct(void) -{ - return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS); -} - -static inline cycle_t arch_counter_get_cntvct(void) +static inline u64 arch_counter_get_cntvct(void) { - return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS); + u64 cval; + asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); + return cval; } static irqreturn_t inline timer_handler(const int access, @@ -259,7 +250,7 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) static int arch_timer_available(void) { - unsigned long freq; + u32 freq; if (arch_timer_rate == 0) { freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS, @@ -275,7 +266,8 @@ static int arch_timer_available(void) } pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", - arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100, + (unsigned long)arch_timer_rate / 1000000, + (unsigned long)(arch_timer_rate / 10000) % 100, arch_timer_use_virtual ? "virt" : "phys"); return 0; } -- cgit v1.1 From b8e243431fa11f542a0fd94e939543bcb7d236ee Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 14 Nov 2012 09:50:19 +0000 Subject: arm: arch_timer: standardise counter reading We're currently inconsistent with respect to our accesses to the physical and virtual counters, mixing and matching the two. This patch introduces and uses a function pointer for accessing the correct counter based on whether we're using physical or virtual interrupts. All current accesses to the counter accessors are redirected through it. When the driver is moved out to drivers/clocksource, there's the possibility that code called before the timer code is initialised will attempt to call arch_timer_read_counter (e.g. sched_clock for AArch64). To avoid having to have to check whether the timer has been initialised either in arch_timer_read_counter or one of it's callers, a default implementation is assigned that simply returns 0. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Cc: Marc Zyngier Cc: Santosh Shilimkar --- arch/arm/kernel/arch_timer.c | 58 +++++++++++++++++--------------------------- 1 file changed, 22 insertions(+), 36 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 498c29f..eb78f43 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -272,51 +272,37 @@ static int arch_timer_available(void) return 0; } -static u32 notrace arch_counter_get_cntpct32(void) +/* + * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to + * call it before it has been initialised. Rather than incur a performance + * penalty checking for initialisation, provide a default implementation that + * won't lead to time appearing to jump backwards. + */ +static u64 arch_timer_read_zero(void) { - cycle_t cnt = arch_counter_get_cntpct(); - - /* - * The sched_clock infrastructure only knows about counters - * with at most 32bits. Forget about the upper 24 bits for the - * time being... - */ - return (u32)cnt; + return 0; } -static u32 notrace arch_counter_get_cntvct32(void) -{ - cycle_t cnt = arch_counter_get_cntvct(); +u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero; - /* - * The sched_clock infrastructure only knows about counters - * with at most 32bits. Forget about the upper 24 bits for the - * time being... - */ - return (u32)cnt; +static u32 arch_timer_read_counter32(void) +{ + return arch_timer_read_counter(); } static cycle_t arch_counter_read(struct clocksource *cs) { - /* - * Always use the physical counter for the clocksource. - * CNTHCTL.PL1PCTEN must be set to 1. - */ - return arch_counter_get_cntpct(); + return arch_timer_read_counter(); } static unsigned long arch_timer_read_current_timer(void) { - return arch_counter_get_cntpct(); + return arch_timer_read_counter(); } static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) { - /* - * Always use the physical counter for the clocksource. - * CNTHCTL.PL1PCTEN must be set to 1. - */ - return arch_counter_get_cntpct(); + return arch_timer_read_counter(); } static struct clocksource clocksource_counter = { @@ -484,23 +470,23 @@ int __init arch_timer_of_register(void) } } + if (arch_timer_use_virtual) + arch_timer_read_counter = arch_counter_get_cntvct; + else + arch_timer_read_counter = arch_counter_get_cntpct; + return arch_timer_register(); } int __init arch_timer_sched_clock_init(void) { - u32 (*cnt32)(void); int err; err = arch_timer_available(); if (err) return err; - if (arch_timer_use_virtual) - cnt32 = arch_counter_get_cntvct32; - else - cnt32 = arch_counter_get_cntpct32; - - setup_sched_clock(cnt32, 32, arch_timer_rate); + setup_sched_clock(arch_timer_read_counter32, + 32, arch_timer_rate); return 0; } -- cgit v1.1 From fd5583a4c271ec03e2da04196aaaab177b385eb8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Nov 2012 16:46:40 +0000 Subject: arm: arch_timer: split cntfrq accessor The CNTFRQ register is not duplicated for physical and virtual timers, and accessing it as if it were is confusing. Instead, use a separate accessor which doesn't take the access type as a parameter. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Acked-by: Santosh Shilimkar --- arch/arm/kernel/arch_timer.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index eb78f43..f31c9ee 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -51,8 +51,7 @@ static bool arch_timer_use_virtual = true; #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) #define ARCH_TIMER_REG_CTRL 0 -#define ARCH_TIMER_REG_FREQ 1 -#define ARCH_TIMER_REG_TVAL 2 +#define ARCH_TIMER_REG_TVAL 1 #define ARCH_TIMER_PHYS_ACCESS 0 #define ARCH_TIMER_VIRT_ACCESS 1 @@ -101,9 +100,6 @@ static inline u32 arch_timer_reg_read(const int access, const int reg) case ARCH_TIMER_REG_TVAL: asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); break; - case ARCH_TIMER_REG_FREQ: - asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); - break; } } @@ -121,6 +117,13 @@ static inline u32 arch_timer_reg_read(const int access, const int reg) return val; } +static inline u32 arch_timer_get_cntfrq(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); + return val; +} + static inline u64 arch_counter_get_cntpct(void) { u64 cval; @@ -253,8 +256,7 @@ static int arch_timer_available(void) u32 freq; if (arch_timer_rate == 0) { - freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS, - ARCH_TIMER_REG_FREQ); + freq = arch_timer_get_cntfrq(); /* Check the timer frequency. */ if (freq == 0) { -- cgit v1.1 From ec944c93a293bee6b4cc6b6f1c9560526c7ed635 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Nov 2012 16:18:00 +0000 Subject: arm: arch_timer: factor out register accessors Currently the arch_timer register accessors are thrown together with the main driver, preventing us from porting the driver to other architectures. This patch moves the register accessors into a header file, as with the arm64 version. Constants required by the accessors are also moved. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Acked-by: Santosh Shilimkar --- arch/arm/include/asm/arch_timer.h | 94 +++++++++++++++++++++++++++++++++++++++ arch/arm/kernel/arch_timer.c | 92 -------------------------------------- 2 files changed, 94 insertions(+), 92 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index d40229d..db0fdc4 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -1,13 +1,107 @@ #ifndef __ASMARM_ARCH_TIMER_H #define __ASMARM_ARCH_TIMER_H +#include #include #include +#include #ifdef CONFIG_ARM_ARCH_TIMER int arch_timer_of_register(void); int arch_timer_sched_clock_init(void); struct timecounter *arch_timer_get_timecounter(void); + +#define ARCH_TIMER_CTRL_ENABLE (1 << 0) +#define ARCH_TIMER_CTRL_IT_MASK (1 << 1) +#define ARCH_TIMER_CTRL_IT_STAT (1 << 2) + +#define ARCH_TIMER_REG_CTRL 0 +#define ARCH_TIMER_REG_TVAL 1 + +#define ARCH_TIMER_PHYS_ACCESS 0 +#define ARCH_TIMER_VIRT_ACCESS 1 + +/* + * These register accessors are marked inline so the compiler can + * nicely work out which register we want, and chuck away the rest of + * the code. At least it does so with a recent GCC (4.6.3). + */ +static inline void arch_timer_reg_write(const int access, const int reg, u32 val) +{ + if (access == ARCH_TIMER_PHYS_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); + break; + } + } + + if (access == ARCH_TIMER_VIRT_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); + break; + } + } +} + +static inline u32 arch_timer_reg_read(const int access, const int reg) +{ + u32 val = 0; + + if (access == ARCH_TIMER_PHYS_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); + break; + } + } + + if (access == ARCH_TIMER_VIRT_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); + break; + } + } + + return val; +} + +static inline u32 arch_timer_get_cntfrq(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); + return val; +} + +static inline u64 arch_counter_get_cntpct(void) +{ + u64 cval; + + asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); + return cval; +} + +static inline u64 arch_counter_get_cntvct(void) +{ + u64 cval; + + asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); + return cval; +} #else static inline int arch_timer_of_register(void) { diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index f31c9ee..e973cc0 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -46,98 +46,6 @@ static bool arch_timer_use_virtual = true; * Architected system timer support. */ -#define ARCH_TIMER_CTRL_ENABLE (1 << 0) -#define ARCH_TIMER_CTRL_IT_MASK (1 << 1) -#define ARCH_TIMER_CTRL_IT_STAT (1 << 2) - -#define ARCH_TIMER_REG_CTRL 0 -#define ARCH_TIMER_REG_TVAL 1 - -#define ARCH_TIMER_PHYS_ACCESS 0 -#define ARCH_TIMER_VIRT_ACCESS 1 - -/* - * These register accessors are marked inline so the compiler can - * nicely work out which register we want, and chuck away the rest of - * the code. At least it does so with a recent GCC (4.6.3). - */ -static inline void arch_timer_reg_write(const int access, const int reg, u32 val) -{ - if (access == ARCH_TIMER_PHYS_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); - break; - } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); - break; - } - } - - isb(); -} - -static inline u32 arch_timer_reg_read(const int access, const int reg) -{ - u32 val = 0; - - if (access == ARCH_TIMER_PHYS_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); - break; - } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); - break; - } - } - - return val; -} - -static inline u32 arch_timer_get_cntfrq(void) -{ - u32 val; - asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); - return val; -} - -static inline u64 arch_counter_get_cntpct(void) -{ - u64 cval; - asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); - return cval; -} - -static inline u64 arch_counter_get_cntvct(void) -{ - u64 cval; - asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); - return cval; -} - static irqreturn_t inline timer_handler(const int access, struct clock_event_device *evt) { -- cgit v1.1 From 45801042225c66a66fb2cb50fae6ff71883a99d6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 11 Jan 2013 14:32:33 +0000 Subject: arm: arch_timer: add isbs to register accessors Without the isbs in arch_timer_get_cnt{p,v}ct the cpu may speculate reads and return stale values. This could be bad for code sensitive to changes in expected deltas between calls (e.g. the delay loop). Without isbs in arch_timer_reg_write the processor may reorder instructions around enabling/disabling of the timer or writing the compare value, which we probably don't want. This patch adds isbs to prevent those issues. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas --- arch/arm/include/asm/arch_timer.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index db0fdc4..75975d9 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -49,6 +49,8 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val break; } } + + isb(); } static inline u32 arch_timer_reg_read(const int access, const int reg) @@ -91,6 +93,7 @@ static inline u64 arch_counter_get_cntpct(void) { u64 cval; + isb(); asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); return cval; } @@ -99,6 +102,7 @@ static inline u64 arch_counter_get_cntvct(void) { u64 cval; + isb(); asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); return cval; } -- cgit v1.1 From 1ba1cefc277865a0ac222f53bbbf2ebacad1559a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Nov 2012 17:29:43 +0000 Subject: arm: arch_timer: divorce from local_timer api Currently, the arch_timer driver is tied to the arm port, as it relies on code in arch/arm/smp.c to setup and teardown timers as cores are hotplugged on and off. The timer is registered through an arm-specific registration mechanism, preventing sharing the driver with the arm64 port. This patch moves the driver to using a cpu notifier instead, making it easier to port. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Acked-by: Santosh Shilimkar --- arch/arm/kernel/arch_timer.c | 52 ++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 23 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index e973cc0..c8dfec0 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -21,7 +21,6 @@ #include #include -#include #include #include @@ -37,7 +36,7 @@ enum ppi_nr { static int arch_timer_ppi[MAX_TIMER_PPI]; -static struct clock_event_device __percpu **arch_timer_evt; +static struct clock_event_device __percpu *arch_timer_evt; static struct delay_timer arch_delay_timer; static bool arch_timer_use_virtual = true; @@ -63,14 +62,14 @@ static irqreturn_t inline timer_handler(const int access, static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) { - struct clock_event_device *evt = *(struct clock_event_device **)dev_id; + struct clock_event_device *evt = dev_id; return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); } static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) { - struct clock_event_device *evt = *(struct clock_event_device **)dev_id; + struct clock_event_device *evt = dev_id; return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); } @@ -141,13 +140,13 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) clk->set_next_event = arch_timer_set_next_event_phys; } + clk->cpumask = cpumask_of(smp_processor_id()); + clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); - *__this_cpu_ptr(arch_timer_evt) = clk; - if (arch_timer_use_virtual) enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); else { @@ -251,12 +250,26 @@ static void __cpuinit arch_timer_stop(struct clock_event_device *clk) clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); } -static struct local_timer_ops arch_timer_ops __cpuinitdata = { - .setup = arch_timer_setup, - .stop = arch_timer_stop, -}; +static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt); + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + arch_timer_setup(evt); + break; + case CPU_DYING: + arch_timer_stop(evt); + break; + } + + return NOTIFY_OK; +} -static struct clock_event_device arch_timer_global_evt; +static struct notifier_block arch_timer_cpu_nb __cpuinitdata = { + .notifier_call = arch_timer_cpu_notify, +}; static int __init arch_timer_register(void) { @@ -267,7 +280,7 @@ static int __init arch_timer_register(void) if (err) goto out; - arch_timer_evt = alloc_percpu(struct clock_event_device *); + arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; @@ -303,20 +316,13 @@ static int __init arch_timer_register(void) goto out_free; } - err = local_timer_register(&arch_timer_ops); - if (err) { - /* - * We couldn't register as a local timer (could be - * because we're on a UP platform, or because some - * other local timer is already present...). Try as a - * global timer instead. - */ - arch_timer_global_evt.cpumask = cpumask_of(0); - err = arch_timer_setup(&arch_timer_global_evt); - } + err = register_cpu_notifier(&arch_timer_cpu_nb); if (err) goto out_free_irq; + /* Immediately configure the timer on the boot CPU */ + arch_timer_setup(this_cpu_ptr(arch_timer_evt)); + /* Use the architected timer for the delay loop. */ arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; arch_delay_timer.freq = arch_timer_rate; -- cgit v1.1 From b2deabe3ba664a1ec47400c0ca285e951874e0cc Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 14 Nov 2012 10:32:24 +0000 Subject: arm: arch_timer: add arch_counter_set_user_access Several bits in CNTKCTL reset to 0, including PL0VTEN. For architectures using the generic timer which wish to have a fast gettimeofday vDSO implementation, these bits must be set to 1 by the kernel. For architectures without a vDSO, it's best to leave the bits set to 0 for now to ensure that if and when support is added, it's implemented sanely architecture wide. As the bootloader might set PL0VTEN to a value that doesn't correspond to that which the kernel prefers, we must explicitly set it to the architecture port's preferred value. This patch adds arch_counter_set_user_access, which sets the PL0 access permissions to that required by the architecture. For arch/arm, this currently means disabling all userspace access. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas --- arch/arm/include/asm/arch_timer.h | 12 ++++++++++++ arch/arm/kernel/arch_timer.c | 2 ++ 2 files changed, 14 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 75975d9..729f6d9 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -106,6 +106,18 @@ static inline u64 arch_counter_get_cntvct(void) asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); return cval; } + +static inline void __cpuinit arch_counter_set_user_access(void) +{ + u32 cntkctl; + + asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl)); + + /* disable user access to everything */ + cntkctl &= ~((3 << 8) | (7 << 0)); + + asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl)); +} #else static inline int arch_timer_of_register(void) { diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index c8dfec0..94f5033 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -155,6 +155,8 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); } + arch_counter_set_user_access(); + return 0; } -- cgit v1.1 From 8a4da6e36c582ff746191eca85b6c1c068dbfbd6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Nov 2012 14:33:44 +0000 Subject: arm: arch_timer: move core to drivers/clocksource The core functionality of the arch_timer driver is not directly tied to anything under arch/arm, and can be split out. This patch factors out the core of the arch_timer driver, so it can be shared with other architectures. A couple of functions are added so that architecture-specific code can interact with the driver without needing to touch its internals. The ARM_ARCH_TIMER config variable is moved out to drivers/clocksource/Kconfig, existing uses in arch/arm are replaced with HAVE_ARM_ARCH_TIMER, which selects it. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier --- arch/arm/Kconfig | 3 +- arch/arm/include/asm/arch_timer.h | 19 +- arch/arm/kernel/arch_timer.c | 386 ++------------------------------------ arch/arm/mach-omap2/Kconfig | 2 +- 4 files changed, 24 insertions(+), 386 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 67874b8..e1162f5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1572,9 +1572,10 @@ config HAVE_ARM_SCU help This option enables support for the ARM system coherency unit -config ARM_ARCH_TIMER +config HAVE_ARM_ARCH_TIMER bool "Architected timer support" depends on CPU_V7 + select ARM_ARCH_TIMER help This option enables support for the ARM architected timer diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 729f6d9..7ade91d 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -4,22 +4,14 @@ #include #include #include +#include #include +#include + #ifdef CONFIG_ARM_ARCH_TIMER int arch_timer_of_register(void); int arch_timer_sched_clock_init(void); -struct timecounter *arch_timer_get_timecounter(void); - -#define ARCH_TIMER_CTRL_ENABLE (1 << 0) -#define ARCH_TIMER_CTRL_IT_MASK (1 << 1) -#define ARCH_TIMER_CTRL_IT_STAT (1 << 2) - -#define ARCH_TIMER_REG_CTRL 0 -#define ARCH_TIMER_REG_TVAL 1 - -#define ARCH_TIMER_PHYS_ACCESS 0 -#define ARCH_TIMER_VIRT_ACCESS 1 /* * These register accessors are marked inline so the compiler can @@ -128,11 +120,6 @@ static inline int arch_timer_sched_clock_init(void) { return -ENXIO; } - -static inline struct timecounter *arch_timer_get_timecounter(void) -{ - return NULL; -} #endif #endif diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 94f5033..36ebcf4 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -9,402 +9,52 @@ * published by the Free Software Foundation. */ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include #include -static u32 arch_timer_rate; +#include -enum ppi_nr { - PHYS_SECURE_PPI, - PHYS_NONSECURE_PPI, - VIRT_PPI, - HYP_PPI, - MAX_TIMER_PPI -}; - -static int arch_timer_ppi[MAX_TIMER_PPI]; - -static struct clock_event_device __percpu *arch_timer_evt; -static struct delay_timer arch_delay_timer; - -static bool arch_timer_use_virtual = true; - -/* - * Architected system timer support. - */ - -static irqreturn_t inline timer_handler(const int access, - struct clock_event_device *evt) -{ - unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { - ctrl |= ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); - evt->event_handler(evt); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - -static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - - return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); -} - -static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - - return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); -} - -static inline void timer_set_mode(const int access, int mode) -{ - unsigned long ctrl; - switch (mode) { - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - ctrl &= ~ARCH_TIMER_CTRL_ENABLE; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); - break; - default: - break; - } -} - -static void arch_timer_set_mode_virt(enum clock_event_mode mode, - struct clock_event_device *clk) -{ - timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); -} - -static void arch_timer_set_mode_phys(enum clock_event_mode mode, - struct clock_event_device *clk) -{ - timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); -} - -static inline void set_next_event(const int access, unsigned long evt) -{ - unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - ctrl |= ARCH_TIMER_CTRL_ENABLE; - ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); -} - -static int arch_timer_set_next_event_virt(unsigned long evt, - struct clock_event_device *unused) -{ - set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); - return 0; -} - -static int arch_timer_set_next_event_phys(unsigned long evt, - struct clock_event_device *unused) -{ - set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); - return 0; -} - -static int __cpuinit arch_timer_setup(struct clock_event_device *clk) -{ - clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; - clk->name = "arch_sys_timer"; - clk->rating = 450; - if (arch_timer_use_virtual) { - clk->irq = arch_timer_ppi[VIRT_PPI]; - clk->set_mode = arch_timer_set_mode_virt; - clk->set_next_event = arch_timer_set_next_event_virt; - } else { - clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; - clk->set_mode = arch_timer_set_mode_phys; - clk->set_next_event = arch_timer_set_next_event_phys; - } - - clk->cpumask = cpumask_of(smp_processor_id()); - - clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); - - clockevents_config_and_register(clk, arch_timer_rate, - 0xf, 0x7fffffff); - - if (arch_timer_use_virtual) - enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); - else { - enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); - } - - arch_counter_set_user_access(); - - return 0; -} - -static int arch_timer_available(void) -{ - u32 freq; - - if (arch_timer_rate == 0) { - freq = arch_timer_get_cntfrq(); - - /* Check the timer frequency. */ - if (freq == 0) { - pr_warn("Architected timer frequency not available\n"); - return -EINVAL; - } - - arch_timer_rate = freq; - } - - pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", - (unsigned long)arch_timer_rate / 1000000, - (unsigned long)(arch_timer_rate / 10000) % 100, - arch_timer_use_virtual ? "virt" : "phys"); - return 0; -} - -/* - * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to - * call it before it has been initialised. Rather than incur a performance - * penalty checking for initialisation, provide a default implementation that - * won't lead to time appearing to jump backwards. - */ -static u64 arch_timer_read_zero(void) -{ - return 0; -} - -u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero; - -static u32 arch_timer_read_counter32(void) -{ - return arch_timer_read_counter(); -} - -static cycle_t arch_counter_read(struct clocksource *cs) +static unsigned long arch_timer_read_counter_long(void) { return arch_timer_read_counter(); } -static unsigned long arch_timer_read_current_timer(void) +static u32 arch_timer_read_counter_u32(void) { return arch_timer_read_counter(); } -static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) -{ - return arch_timer_read_counter(); -} - -static struct clocksource clocksource_counter = { - .name = "arch_sys_counter", - .rating = 400, - .read = arch_counter_read, - .mask = CLOCKSOURCE_MASK(56), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static struct cyclecounter cyclecounter = { - .read = arch_counter_read_cc, - .mask = CLOCKSOURCE_MASK(56), -}; - -static struct timecounter timecounter; - -struct timecounter *arch_timer_get_timecounter(void) -{ - return &timecounter; -} - -static void __cpuinit arch_timer_stop(struct clock_event_device *clk) -{ - pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", - clk->irq, smp_processor_id()); - - if (arch_timer_use_virtual) - disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); - else { - disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); - } - - clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); -} - -static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt); - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - arch_timer_setup(evt); - break; - case CPU_DYING: - arch_timer_stop(evt); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block arch_timer_cpu_nb __cpuinitdata = { - .notifier_call = arch_timer_cpu_notify, -}; +static struct delay_timer arch_delay_timer; -static int __init arch_timer_register(void) +static void __init arch_timer_delay_timer_register(void) { - int err; - int ppi; - - err = arch_timer_available(); - if (err) - goto out; - - arch_timer_evt = alloc_percpu(struct clock_event_device); - if (!arch_timer_evt) { - err = -ENOMEM; - goto out; - } - - clocksource_register_hz(&clocksource_counter, arch_timer_rate); - cyclecounter.mult = clocksource_counter.mult; - cyclecounter.shift = clocksource_counter.shift; - timecounter_init(&timecounter, &cyclecounter, - arch_counter_get_cntpct()); - - if (arch_timer_use_virtual) { - ppi = arch_timer_ppi[VIRT_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_virt, - "arch_timer", arch_timer_evt); - } else { - ppi = arch_timer_ppi[PHYS_SECURE_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_phys, - "arch_timer", arch_timer_evt); - if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { - ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_phys, - "arch_timer", arch_timer_evt); - if (err) - free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], - arch_timer_evt); - } - } - - if (err) { - pr_err("arch_timer: can't register interrupt %d (%d)\n", - ppi, err); - goto out_free; - } - - err = register_cpu_notifier(&arch_timer_cpu_nb); - if (err) - goto out_free_irq; - - /* Immediately configure the timer on the boot CPU */ - arch_timer_setup(this_cpu_ptr(arch_timer_evt)); - /* Use the architected timer for the delay loop. */ - arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; - arch_delay_timer.freq = arch_timer_rate; + arch_delay_timer.read_current_timer = arch_timer_read_counter_long; + arch_delay_timer.freq = arch_timer_get_rate(); register_current_timer_delay(&arch_delay_timer); - return 0; - -out_free_irq: - if (arch_timer_use_virtual) - free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); - else { - free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], - arch_timer_evt); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], - arch_timer_evt); - } - -out_free: - free_percpu(arch_timer_evt); -out: - return err; } -static const struct of_device_id arch_timer_of_match[] __initconst = { - { .compatible = "arm,armv7-timer", }, - {}, -}; - int __init arch_timer_of_register(void) { - struct device_node *np; - u32 freq; - int i; - - np = of_find_matching_node(NULL, arch_timer_of_match); - if (!np) { - pr_err("arch_timer: can't find DT node\n"); - return -ENODEV; - } + int ret; - /* Try to determine the frequency from the device tree or CNTFRQ */ - if (!of_property_read_u32(np, "clock-frequency", &freq)) - arch_timer_rate = freq; + ret = arch_timer_init(); + if (ret) + return ret; - for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) - arch_timer_ppi[i] = irq_of_parse_and_map(np, i); + arch_timer_delay_timer_register(); - of_node_put(np); - - /* - * If no interrupt provided for virtual timer, we'll have to - * stick to the physical timer. It'd better be accessible... - */ - if (!arch_timer_ppi[VIRT_PPI]) { - arch_timer_use_virtual = false; - - if (!arch_timer_ppi[PHYS_SECURE_PPI] || - !arch_timer_ppi[PHYS_NONSECURE_PPI]) { - pr_warn("arch_timer: No interrupt available, giving up\n"); - return -EINVAL; - } - } - - if (arch_timer_use_virtual) - arch_timer_read_counter = arch_counter_get_cntvct; - else - arch_timer_read_counter = arch_counter_get_cntpct; - - return arch_timer_register(); + return 0; } int __init arch_timer_sched_clock_init(void) { - int err; - - err = arch_timer_available(); - if (err) - return err; + if (arch_timer_get_rate() == 0) + return -ENXIO; - setup_sched_clock(arch_timer_read_counter32, - 32, arch_timer_rate); + setup_sched_clock(arch_timer_read_counter_u32, + 32, arch_timer_get_rate()); return 0; } diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 41b581f..9d7909e 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -76,12 +76,12 @@ config ARCH_OMAP4 config SOC_OMAP5 bool "TI OMAP5" - select ARM_ARCH_TIMER select ARM_CPU_SUSPEND if PM select ARM_GIC select CPU_V7 select HAVE_SMP select COMMON_CLK + select HAVE_ARM_ARCH_TIMER comment "OMAP Core Type" depends on ARCH_OMAP2 -- cgit v1.1