diff options
Diffstat (limited to 'drivers/clocksource')
43 files changed, 1432 insertions, 156 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index e2c6e43..3356ab8 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -5,6 +5,10 @@ config CLKSRC_OF bool select CLKSRC_PROBE +config CLKEVT_OF + bool + select CLKEVT_PROBE + config CLKSRC_ACPI bool select CLKSRC_PROBE @@ -12,6 +16,9 @@ config CLKSRC_ACPI config CLKSRC_PROBE bool +config CLKEVT_PROBE + bool + config CLKSRC_I8253 bool @@ -60,6 +67,16 @@ config DW_APB_TIMER_OF select DW_APB_TIMER select CLKSRC_OF +config GEMINI_TIMER + bool "Cortina Gemini timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + depends on HAS_IOMEM + select CLKSRC_MMIO + select CLKSRC_OF + select MFD_SYSCON + help + Enables support for the Gemini timer + config ROCKCHIP_TIMER bool "Rockchip timer driver" if COMPILE_TEST depends on ARM || ARM64 @@ -282,6 +299,26 @@ config CLKSRC_MPS2 select CLKSRC_MMIO select CLKSRC_OF +config ARC_TIMERS + bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_OF + help + These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores + (ARC700 as well as ARC HS38). + TIMER0 serves as clockevent while TIMER1 provides clocksource + +config ARC_TIMERS_64BIT + bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + depends on ARC_TIMERS + select CLKSRC_OF + help + This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP) + RTC is implemented inside the core, while GFRC sits outside the core in + ARConnect IP block. Driver automatically picks one of them for clocksource + as appropriate. + config ARM_ARCH_TIMER bool select CLKSRC_OF if OF @@ -305,16 +342,30 @@ config ARM_ARCH_TIMER_EVTSTREAM This must be disabled for hardware validation purposes to detect any hardware anomalies of missing events. +config ARM_ARCH_TIMER_OOL_WORKAROUND + bool + config FSL_ERRATUM_A008585 bool "Workaround for Freescale/NXP Erratum A-008585" default y depends on ARM_ARCH_TIMER && ARM64 + select ARM_ARCH_TIMER_OOL_WORKAROUND help This option enables a workaround for Freescale/NXP Erratum A-008585 ("ARM generic timer may contain an erroneous value"). The workaround will only be active if the fsl,erratum-a008585 property is found in the timer node. +config HISILICON_ERRATUM_161010101 + bool "Workaround for Hisilicon Erratum 161010101" + default y + select ARM_ARCH_TIMER_OOL_WORKAROUND + depends on ARM_ARCH_TIMER && ARM64 + help + This option enables a workaround for Hisilicon Erratum + 161010101. The workaround will be active if the hisilicon,erratum-161010101 + property is found in the timer node. + config ARM_GLOBAL_TIMER bool "Support for the ARM global timer" if COMPILE_TEST select CLKSRC_OF if OF @@ -447,6 +498,13 @@ config SH_TIMER_MTU2 Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas. This hardware comes with 16 bit-timer registers. +config RENESAS_OSTM + bool "Renesas OSTM timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables the support for the Renesas OSTM. + config SH_TIMER_TMU bool "Renesas TMU timer driver" if COMPILE_TEST depends on GENERIC_CLOCKEVENTS diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index cf87f40..d227d13 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_CLKSRC_PROBE) += clksrc-probe.o +obj-$(CONFIG_CLKEVT_PROBE) += clkevt-probe.o obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o @@ -8,6 +9,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o +obj-$(CONFIG_RENESAS_OSTM) += renesas-ostm.o obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o obj-$(CONFIG_EM_TIMER_STI) += em_sti.o obj-$(CONFIG_CLKBLD_I8253) += i8253.o @@ -15,6 +17,7 @@ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o +obj-$(CONFIG_GEMINI_TIMER) += timer-gemini.o obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o @@ -51,6 +54,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o +obj-$(CONFIG_ARC_TIMERS) += arc_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index 28037d0..1961e35 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -58,16 +58,16 @@ u32 acpi_pm_read_verified(void) return v2; } -static cycle_t acpi_pm_read(struct clocksource *cs) +static u64 acpi_pm_read(struct clocksource *cs) { - return (cycle_t)read_pmtmr(); + return (u64)read_pmtmr(); } static struct clocksource clocksource_acpi_pm = { .name = "acpi_pm", .rating = 200, .read = acpi_pm_read, - .mask = (cycle_t)ACPI_PM_MASK, + .mask = (u64)ACPI_PM_MASK, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -81,9 +81,9 @@ static int __init acpi_pm_good_setup(char *__str) } __setup("acpi_pm_good", acpi_pm_good_setup); -static cycle_t acpi_pm_read_slow(struct clocksource *cs) +static u64 acpi_pm_read_slow(struct clocksource *cs) { - return (cycle_t)acpi_pm_read_verified(); + return (u64)acpi_pm_read_verified(); } static inline void acpi_pm_need_workaround(void) @@ -145,7 +145,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE, */ static int verify_pmtmr_rate(void) { - cycle_t value1, value2; + u64 value1, value2; unsigned long count, delta; mach_prepare_counter(); @@ -175,7 +175,7 @@ static int verify_pmtmr_rate(void) static int __init init_acpi_pm_clocksource(void) { - cycle_t value1, value2; + u64 value1, value2; unsigned int i, j = 0; if (!pmtmr_ioport) diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c new file mode 100644 index 0000000..7517f95 --- /dev/null +++ b/drivers/clocksource/arc_timer.c @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com) + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be + * programmed to go from @count to @limit and optionally interrupt. + * We've designated TIMER0 for clockevents and TIMER1 for clocksource + * + * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP) + * which are suitable for UP and SMP based clocksources respectively + */ + +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/of_irq.h> + +#include <soc/arc/timers.h> +#include <soc/arc/mcip.h> + + +static unsigned long arc_timer_freq; + +static int noinline arc_get_timer_clk(struct device_node *node) +{ + struct clk *clk; + int ret; + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + pr_err("timer missing clk"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("Couldn't enable parent clk\n"); + return ret; + } + + arc_timer_freq = clk_get_rate(clk); + + return 0; +} + +/********** Clock Source Device *********/ + +#ifdef CONFIG_ARC_TIMERS_64BIT + +static u64 arc_read_gfrc(struct clocksource *cs) +{ + unsigned long flags; + u32 l, h; + + local_irq_save(flags); + + __mcip_cmd(CMD_GFRC_READ_LO, 0); + l = read_aux_reg(ARC_REG_MCIP_READBACK); + + __mcip_cmd(CMD_GFRC_READ_HI, 0); + h = read_aux_reg(ARC_REG_MCIP_READBACK); + + local_irq_restore(flags); + + return (((u64)h) << 32) | l; +} + +static struct clocksource arc_counter_gfrc = { + .name = "ARConnect GFRC", + .rating = 400, + .read = arc_read_gfrc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_gfrc(struct device_node *node) +{ + struct mcip_bcr mp; + int ret; + + READ_BCR(ARC_REG_MCIP_BCR, mp); + if (!mp.gfrc) { + pr_warn("Global-64-bit-Ctr clocksource not detected"); + return -ENXIO; + } + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); +} +CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); + +#define AUX_RTC_CTRL 0x103 +#define AUX_RTC_LOW 0x104 +#define AUX_RTC_HIGH 0x105 + +static u64 arc_read_rtc(struct clocksource *cs) +{ + unsigned long status; + u32 l, h; + + /* + * hardware has an internal state machine which tracks readout of + * low/high and updates the CTRL.status if + * - interrupt/exception taken between the two reads + * - high increments after low has been read + */ + do { + l = read_aux_reg(AUX_RTC_LOW); + h = read_aux_reg(AUX_RTC_HIGH); + status = read_aux_reg(AUX_RTC_CTRL); + } while (!(status & _BITUL(31))); + + return (((u64)h) << 32) | l; +} + +static struct clocksource arc_counter_rtc = { + .name = "ARCv2 RTC", + .rating = 350, + .read = arc_read_rtc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_rtc(struct device_node *node) +{ + struct bcr_timer timer; + int ret; + + READ_BCR(ARC_REG_TIMERS_BCR, timer); + if (!timer.rtc) { + pr_warn("Local-64-bit-Ctr clocksource not detected"); + return -ENXIO; + } + + /* Local to CPU hence not usable in SMP */ + if (IS_ENABLED(CONFIG_SMP)) { + pr_warn("Local-64-bit-Ctr not usable in SMP"); + return -EINVAL; + } + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + write_aux_reg(AUX_RTC_CTRL, 1); + + return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); +} +CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); + +#endif + +/* + * 32bit TIMER1 to keep counting monotonically and wraparound + */ + +static u64 arc_read_timer1(struct clocksource *cs) +{ + return (u64) read_aux_reg(ARC_REG_TIMER1_CNT); +} + +static struct clocksource arc_counter_timer1 = { + .name = "ARC Timer1", + .rating = 300, + .read = arc_read_timer1, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_timer1(struct device_node *node) +{ + int ret; + + /* Local to CPU hence not usable in SMP */ + if (IS_ENABLED(CONFIG_SMP)) + return -EINVAL; + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX); + write_aux_reg(ARC_REG_TIMER1_CNT, 0); + write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); + + return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); +} + +/********** Clock Event Device *********/ + +static int arc_timer_irq; + +/* + * Arm the timer to interrupt after @cycles + * The distinction for oneshot/periodic is done in arc_event_timer_ack() below + */ +static void arc_timer_event_setup(unsigned int cycles) +{ + write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles); + write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ + + write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); +} + + +static int arc_clkevent_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + arc_timer_event_setup(delta); + return 0; +} + +static int arc_clkevent_set_periodic(struct clock_event_device *dev) +{ + /* + * At X Hz, 1 sec = 1000ms -> X cycles; + * 10ms -> X / 100 cycles + */ + arc_timer_event_setup(arc_timer_freq / HZ); + return 0; +} + +static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { + .name = "ARC Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .set_next_event = arc_clkevent_set_next_event, + .set_state_periodic = arc_clkevent_set_periodic, +}; + +static irqreturn_t timer_irq_handler(int irq, void *dev_id) +{ + /* + * Note that generic IRQ core could have passed @evt for @dev_id if + * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() + */ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + int irq_reenable = clockevent_state_periodic(evt); + + /* + * Any write to CTRL reg ACks the interrupt, we rewrite the + * Count when [N]ot [H]alted bit. + * And re-arm it if perioid by [I]nterrupt [E]nable bit + */ + write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + + +static int arc_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + + evt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX); + enable_percpu_irq(arc_timer_irq, 0); + return 0; +} + +static int arc_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(arc_timer_irq); + return 0; +} + +/* + * clockevent setup for boot CPU + */ +static int __init arc_clockevent_setup(struct device_node *node) +{ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + int ret; + + arc_timer_irq = irq_of_parse_and_map(node, 0); + if (arc_timer_irq <= 0) { + pr_err("clockevent: missing irq"); + return -EINVAL; + } + + ret = arc_get_timer_clk(node); + if (ret) { + pr_err("clockevent: missing clk"); + return ret; + } + + /* Needs apriori irq_set_percpu_devid() done in intc map function */ + ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, + "Timer0 (per-cpu-tick)", evt); + if (ret) { + pr_err("clockevent: unable to request irq\n"); + return ret; + } + + ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, + "clockevents/arc/timer:starting", + arc_timer_starting_cpu, + arc_timer_dying_cpu); + if (ret) { + pr_err("Failed to setup hotplug state"); + return ret; + } + return 0; +} + +static int __init arc_of_timer_init(struct device_node *np) +{ + static int init_count = 0; + int ret; + + if (!init_count) { + init_count = 1; + ret = arc_clockevent_setup(np); + } else { + ret = arc_cs_setup_timer1(np); + } + + return ret; +} +CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init); diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 73c487d..7a8a411 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -24,6 +24,7 @@ #include <linux/of_address.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/acpi.h> @@ -81,6 +82,7 @@ static struct clock_event_device __percpu *arch_timer_evt; static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI; static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; +static bool arch_counter_suspend_stop; static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); @@ -95,41 +97,107 @@ early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); */ #ifdef CONFIG_FSL_ERRATUM_A008585 -DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); -EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled); - -static int fsl_a008585_enable = -1; - -static int __init early_fsl_a008585_cfg(char *buf) +/* + * The number of retries is an arbitrary value well beyond the highest number + * of iterations the loop has been observed to take. + */ +#define __fsl_a008585_read_reg(reg) ({ \ + u64 _old, _new; \ + int _retries = 200; \ + \ + do { \ + _old = read_sysreg(reg); \ + _new = read_sysreg(reg); \ + _retries--; \ + } while (unlikely(_old != _new) && _retries); \ + \ + WARN_ON_ONCE(!_retries); \ + _new; \ +}) + +static u32 notrace fsl_a008585_read_cntp_tval_el0(void) { - int ret; - bool val; + return __fsl_a008585_read_reg(cntp_tval_el0); +} - ret = strtobool(buf, &val); - if (ret) - return ret; +static u32 notrace fsl_a008585_read_cntv_tval_el0(void) +{ + return __fsl_a008585_read_reg(cntv_tval_el0); +} - fsl_a008585_enable = val; - return 0; +static u64 notrace fsl_a008585_read_cntvct_el0(void) +{ + return __fsl_a008585_read_reg(cntvct_el0); } -early_param("clocksource.arm_arch_timer.fsl-a008585", early_fsl_a008585_cfg); +#endif -u32 __fsl_a008585_read_cntp_tval_el0(void) +#ifdef CONFIG_HISILICON_ERRATUM_161010101 +/* + * Verify whether the value of the second read is larger than the first by + * less than 32 is the only way to confirm the value is correct, so clear the + * lower 5 bits to check whether the difference is greater than 32 or not. + * Theoretically the erratum should not occur more than twice in succession + * when reading the system counter, but it is possible that some interrupts + * may lead to more than twice read errors, triggering the warning, so setting + * the number of retries far beyond the number of iterations the loop has been + * observed to take. + */ +#define __hisi_161010101_read_reg(reg) ({ \ + u64 _old, _new; \ + int _retries = 50; \ + \ + do { \ + _old = read_sysreg(reg); \ + _new = read_sysreg(reg); \ + _retries--; \ + } while (unlikely((_new - _old) >> 5) && _retries); \ + \ + WARN_ON_ONCE(!_retries); \ + _new; \ +}) + +static u32 notrace hisi_161010101_read_cntp_tval_el0(void) { - return __fsl_a008585_read_reg(cntp_tval_el0); + return __hisi_161010101_read_reg(cntp_tval_el0); } -u32 __fsl_a008585_read_cntv_tval_el0(void) +static u32 notrace hisi_161010101_read_cntv_tval_el0(void) { - return __fsl_a008585_read_reg(cntv_tval_el0); + return __hisi_161010101_read_reg(cntv_tval_el0); } -u64 __fsl_a008585_read_cntvct_el0(void) +static u64 notrace hisi_161010101_read_cntvct_el0(void) { - return __fsl_a008585_read_reg(cntvct_el0); + return __hisi_161010101_read_reg(cntvct_el0); } -EXPORT_SYMBOL(__fsl_a008585_read_cntvct_el0); -#endif /* CONFIG_FSL_ERRATUM_A008585 */ +#endif + +#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND +const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL; +EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); + +DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); +EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled); + +static const struct arch_timer_erratum_workaround ool_workarounds[] = { +#ifdef CONFIG_FSL_ERRATUM_A008585 + { + .id = "fsl,erratum-a008585", + .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, + .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, + .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161010101 + { + .id = "hisilicon,erratum-161010101", + .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, + .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, + .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, + }, +#endif +}; +#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ static __always_inline void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, @@ -280,8 +348,8 @@ static __always_inline void set_next_event(const int access, unsigned long evt, arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); } -#ifdef CONFIG_FSL_ERRATUM_A008585 -static __always_inline void fsl_a008585_set_next_event(const int access, +#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND +static __always_inline void erratum_set_next_event_generic(const int access, unsigned long evt, struct clock_event_device *clk) { unsigned long ctrl; @@ -299,20 +367,20 @@ static __always_inline void fsl_a008585_set_next_event(const int access, arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); } -static int fsl_a008585_set_next_event_virt(unsigned long evt, +static int erratum_set_next_event_virt(unsigned long evt, struct clock_event_device *clk) { - fsl_a008585_set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); + erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); return 0; } -static int fsl_a008585_set_next_event_phys(unsigned long evt, +static int erratum_set_next_event_phys(unsigned long evt, struct clock_event_device *clk) { - fsl_a008585_set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); + erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); return 0; } -#endif /* CONFIG_FSL_ERRATUM_A008585 */ +#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ static int arch_timer_set_next_event_virt(unsigned long evt, struct clock_event_device *clk) @@ -342,16 +410,16 @@ static int arch_timer_set_next_event_phys_mem(unsigned long evt, return 0; } -static void fsl_a008585_set_sne(struct clock_event_device *clk) +static void erratum_workaround_set_sne(struct clock_event_device *clk) { -#ifdef CONFIG_FSL_ERRATUM_A008585 +#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND if (!static_branch_unlikely(&arch_timer_read_ool_enabled)) return; if (arch_timer_uses_ppi == VIRT_PPI) - clk->set_next_event = fsl_a008585_set_next_event_virt; + clk->set_next_event = erratum_set_next_event_virt; else - clk->set_next_event = fsl_a008585_set_next_event_phys; + clk->set_next_event = erratum_set_next_event_phys; #endif } @@ -384,7 +452,7 @@ static void __arch_timer_setup(unsigned type, BUG(); } - fsl_a008585_set_sne(clk); + erratum_workaround_set_sne(clk); } else { clk->features |= CLOCK_EVT_FEAT_DYNIRQ; clk->name = "arch_mem_timer"; @@ -561,12 +629,12 @@ static u64 arch_counter_get_cntvct_mem(void) */ u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; -static cycle_t arch_counter_read(struct clocksource *cs) +static u64 arch_counter_read(struct clocksource *cs) { return arch_timer_read_counter(); } -static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) +static u64 arch_counter_read_cc(const struct cyclecounter *cc) { return arch_timer_read_counter(); } @@ -576,10 +644,10 @@ static struct clocksource clocksource_counter = { .rating = 400, .read = arch_counter_read, .mask = CLOCKSOURCE_MASK(56), - .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static struct cyclecounter cyclecounter = { +static struct cyclecounter cyclecounter __ro_after_init = { .read = arch_counter_read_cc, .mask = CLOCKSOURCE_MASK(56), }; @@ -604,7 +672,7 @@ static void __init arch_counter_register(unsigned type) clocksource_counter.archdata.vdso_direct = true; -#ifdef CONFIG_FSL_ERRATUM_A008585 +#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND /* * Don't use the vdso fastpath if errata require using * the out-of-line counter accessor. @@ -616,6 +684,8 @@ static void __init arch_counter_register(unsigned type) arch_timer_read_counter = arch_counter_get_cntvct_mem; } + if (!arch_counter_suspend_stop) + clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; start_count = arch_timer_read_counter(); clocksource_register_hz(&clocksource_counter, arch_timer_rate); cyclecounter.mult = clocksource_counter.mult; @@ -735,7 +805,7 @@ static int __init arch_timer_register(void) /* Register and immediately configure the timer on the boot CPU */ err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, - "AP_ARM_ARCH_TIMER_STARTING", + "clockevents/arm/arch_timer:starting", arch_timer_starting_cpu, arch_timer_dying_cpu); if (err) goto out_unreg_cpupm; @@ -890,12 +960,15 @@ static int __init arch_timer_of_init(struct device_node *np) arch_timer_c3stop = !of_property_read_bool(np, "always-on"); -#ifdef CONFIG_FSL_ERRATUM_A008585 - if (fsl_a008585_enable < 0) - fsl_a008585_enable = of_property_read_bool(np, "fsl,erratum-a008585"); - if (fsl_a008585_enable) { - static_branch_enable(&arch_timer_read_ool_enabled); - pr_info("Enabling workaround for FSL erratum A-008585\n"); +#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND + for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) { + if (of_property_read_bool(np, ool_workarounds[i].id)) { + timer_unstable_counter_workaround = &ool_workarounds[i]; + static_branch_enable(&arch_timer_read_ool_enabled); + pr_info("arch_timer: Enabling workaround for %s\n", + timer_unstable_counter_workaround->id); + break; + } } #endif @@ -907,6 +980,10 @@ static int __init arch_timer_of_init(struct device_node *np) of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) arch_timer_uses_ppi = PHYS_SECURE_PPI; + /* On some systems, the counter stops ticking when in suspend. */ + arch_counter_suspend_stop = of_property_read_bool(np, + "arm,no-tick-in-suspend"); + return arch_timer_init(); } CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); @@ -964,8 +1041,9 @@ static int __init arch_timer_mem_init(struct device_node *np) } ret= -ENXIO; - base = arch_counter_base = of_iomap(best_frame, 0); - if (!base) { + base = arch_counter_base = of_io_request_and_map(best_frame, 0, + "arch_mem_timer"); + if (IS_ERR(base)) { pr_err("arch_timer: Can't map frame's registers\n"); goto out; } diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index 8da0329..123ed20 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -195,7 +195,7 @@ static int gt_dying_cpu(unsigned int cpu) return 0; } -static cycle_t gt_clocksource_read(struct clocksource *cs) +static u64 gt_clocksource_read(struct clocksource *cs) { return gt_counter_read(); } @@ -316,7 +316,7 @@ static int __init global_timer_of_register(struct device_node *np) goto out_irq; err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, - "AP_ARM_GLOBAL_TIMER_STARTING", + "clockevents/arm/global_timer:starting", gt_starting_cpu, gt_dying_cpu); if (err) goto out_irq; diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index e71acf2..f2f29d2 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -96,7 +96,7 @@ static int __init bcm2835_timer_init(struct device_node *node) ret = of_property_read_u32(node, "clock-frequency", &freq); if (ret) { pr_err("Can't read clock-frequency"); - return ret; + goto err_iounmap; } system_clock = base + REG_COUNTER_LO; @@ -108,13 +108,15 @@ static int __init bcm2835_timer_init(struct device_node *node) irq = irq_of_parse_and_map(node, DEFAULT_TIMER); if (irq <= 0) { pr_err("Can't parse IRQ"); - return -EINVAL; + ret = -EINVAL; + goto err_iounmap; } timer = kzalloc(sizeof(*timer), GFP_KERNEL); if (!timer) { pr_err("Can't allocate timer struct\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_iounmap; } timer->control = base + REG_CONTROL; @@ -133,7 +135,7 @@ static int __init bcm2835_timer_init(struct device_node *node) ret = setup_irq(irq, &timer->act); if (ret) { pr_err("Can't set up timer IRQ\n"); - return ret; + goto err_iounmap; } clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); @@ -141,6 +143,10 @@ static int __init bcm2835_timer_init(struct device_node *node) pr_info("bcm2835: system timer (irq = %d)\n", irq); return 0; + +err_iounmap: + iounmap(base); + return ret; } CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer", bcm2835_timer_init); diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index fbfbdec..44e5e95 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -158,11 +158,11 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id) * * returns: Current timer counter register value **/ -static cycle_t __ttc_clocksource_read(struct clocksource *cs) +static u64 __ttc_clocksource_read(struct clocksource *cs) { struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; - return (cycle_t)readl_relaxed(timer->base_addr + + return (u64)readl_relaxed(timer->base_addr + TTC_COUNT_VAL_OFFSET); } diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c new file mode 100644 index 0000000..eb89b50 --- /dev/null +++ b/drivers/clocksource/clkevt-probe.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016, Linaro Ltd. All rights reserved. + * Daniel Lezcano <daniel.lezcano@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/of.h> +#include <linux/clockchips.h> + +extern struct of_device_id __clkevt_of_table[]; + +static const struct of_device_id __clkevt_of_table_sentinel + __used __section(__clkevt_of_table_end); + +int __init clockevent_probe(void) +{ + struct device_node *np; + const struct of_device_id *match; + of_init_fn_1_ret init_func; + int ret, clockevents = 0; + + for_each_matching_node_and_match(np, __clkevt_of_table, &match) { + if (!of_device_is_available(np)) + continue; + + init_func = match->data; + + ret = init_func(np); + if (ret) { + pr_warn("Failed to initialize '%s' (%d)\n", + np->name, ret); + continue; + } + + clockevents++; + } + + if (!clockevents) { + pr_crit("%s: no matching clockevent found\n", __func__); + return -ENODEV; + } + + return 0; +} diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index 77a365f..c69e277 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -30,7 +30,7 @@ static void __iomem *clksrc_dbx500_timer_base; -static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) +static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) { void __iomem *base = clksrc_dbx500_timer_base; u32 count, count2; diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c index 89f1c2e..01f3f5a 100644 --- a/drivers/clocksource/dummy_timer.c +++ b/drivers/clocksource/dummy_timer.c @@ -34,7 +34,7 @@ static int dummy_timer_starting_cpu(unsigned int cpu) static int __init dummy_timer_register(void) { return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING, - "AP_DUMMY_TIMER_STARTING", + "clockevents/dummy_timer:starting", dummy_timer_starting_cpu, NULL); } early_initcall(dummy_timer_register); diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 797505a..63e4f55 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -348,7 +348,7 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs) dw_apb_clocksource_read(dw_cs); } -static cycle_t __apbt_read_clocksource(struct clocksource *cs) +static u64 __apbt_read_clocksource(struct clocksource *cs) { u32 current_count; struct dw_apb_clocksource *dw_cs = @@ -357,7 +357,7 @@ static cycle_t __apbt_read_clocksource(struct clocksource *cs) current_count = apbt_readl_relaxed(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); - return (cycle_t)~current_count; + return (u64)~current_count; } static void apbt_restart_clocksource(struct clocksource *cs) @@ -416,7 +416,7 @@ void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs) * * @dw_cs: The clocksource to read. */ -cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) +u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) { - return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); + return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); } diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index 19bb179..aff87df 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -110,9 +110,9 @@ static void em_sti_disable(struct em_sti_priv *p) clk_disable_unprepare(p->clk); } -static cycle_t em_sti_count(struct em_sti_priv *p) +static u64 em_sti_count(struct em_sti_priv *p) { - cycle_t ticks; + u64 ticks; unsigned long flags; /* the STI hardware buffers the 48-bit count, but to @@ -121,14 +121,14 @@ static cycle_t em_sti_count(struct em_sti_priv *p) * Always read STI_COUNT_H before STI_COUNT_L. */ raw_spin_lock_irqsave(&p->lock, flags); - ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; + ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; ticks |= em_sti_read(p, STI_COUNT_L); raw_spin_unlock_irqrestore(&p->lock, flags); return ticks; } -static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) +static u64 em_sti_set_next(struct em_sti_priv *p, u64 next) { unsigned long flags; @@ -198,7 +198,7 @@ static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) return container_of(cs, struct em_sti_priv, cs); } -static cycle_t em_sti_clocksource_read(struct clocksource *cs) +static u64 em_sti_clocksource_read(struct clocksource *cs) { return em_sti_count(cs_to_em_sti(cs)); } @@ -271,7 +271,7 @@ static int em_sti_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct em_sti_priv *p = ced_to_em_sti(ced); - cycle_t next; + u64 next; int safe; next = em_sti_set_next(p, em_sti_count(p) + delta); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 8f3488b..670ff0f 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -183,7 +183,7 @@ static u64 exynos4_read_count_64(void) hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); } while (hi != hi2); - return ((cycle_t)hi << 32) | lo; + return ((u64)hi << 32) | lo; } /** @@ -199,7 +199,7 @@ static u32 notrace exynos4_read_count_32(void) return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); } -static cycle_t exynos4_frc_read(struct clocksource *cs) +static u64 exynos4_frc_read(struct clocksource *cs) { return exynos4_read_count_32(); } @@ -266,7 +266,7 @@ static void exynos4_mct_comp0_stop(void) static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) { unsigned int tcon; - cycle_t comp_cycle; + u64 comp_cycle; tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); @@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) if (mct_int_type == MCT_INT_SPI) { if (evt->irq != -1) disable_irq_nosync(evt->irq); + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); } else { disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); } @@ -552,7 +553,7 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * /* Install hotplug callbacks which configure the timer on this CPU */ err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, - "AP_EXYNOS4_MCT_TIMER_STARTING", + "clockevents/exynos4/mct_timer:starting", exynos4_mct_starting_cpu, exynos4_mct_dying_cpu); if (err) diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c index 07d9d5b..5b27fb9 100644 --- a/drivers/clocksource/h8300_timer16.c +++ b/drivers/clocksource/h8300_timer16.c @@ -72,7 +72,7 @@ static inline struct timer16_priv *cs_to_priv(struct clocksource *cs) return container_of(cs, struct timer16_priv, cs); } -static cycle_t timer16_clocksource_read(struct clocksource *cs) +static u64 timer16_clocksource_read(struct clocksource *cs) { struct timer16_priv *p = cs_to_priv(cs); unsigned long raw, value; diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c index 7bdf199..72e1cf2 100644 --- a/drivers/clocksource/h8300_tpu.c +++ b/drivers/clocksource/h8300_tpu.c @@ -64,7 +64,7 @@ static inline struct tpu_priv *cs_to_priv(struct clocksource *cs) return container_of(cs, struct tpu_priv, cs); } -static cycle_t tpu_clocksource_read(struct clocksource *cs) +static u64 tpu_clocksource_read(struct clocksource *cs) { struct tpu_priv *p = cs_to_priv(cs); unsigned long flags; diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 0efd36e..64f6490 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -25,7 +25,7 @@ EXPORT_SYMBOL(i8253_lock); * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t i8253_read(struct clocksource *cs) +static u64 i8253_read(struct clocksource *cs) { static int old_count; static u32 old_jifs; @@ -83,7 +83,7 @@ static cycle_t i8253_read(struct clocksource *cs) count = (PIT_LATCH - 1) - count; - return (cycle_t)(jifs * PIT_LATCH) + count; + return (u64)(jifs * PIT_LATCH) + count; } static struct clocksource i8253_cs = { diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c index 54e1665..7c61226 100644 --- a/drivers/clocksource/jcore-pit.c +++ b/drivers/clocksource/jcore-pit.c @@ -57,7 +57,7 @@ static notrace u64 jcore_sched_clock_read(void) return seclo * NSEC_PER_SEC + nsec; } -static cycle_t jcore_clocksource_read(struct clocksource *cs) +static u64 jcore_clocksource_read(struct clocksource *cs) { return jcore_sched_clock_read(); } @@ -240,7 +240,7 @@ static int __init jcore_pit_init(struct device_node *node) } cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING, - "AP_JCORE_TIMER_STARTING", + "clockevents/jcore:starting", jcore_pit_local_init, NULL); return 0; diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c index a80ab3e..6fcf965 100644 --- a/drivers/clocksource/metag_generic.c +++ b/drivers/clocksource/metag_generic.c @@ -56,7 +56,7 @@ static int metag_timer_set_next_event(unsigned long delta, return 0; } -static cycle_t metag_clocksource_read(struct clocksource *cs) +static u64 metag_clocksource_read(struct clocksource *cs) { return __core_reg_get(TXTIMER); } @@ -154,6 +154,6 @@ int __init metag_generic_timer_init(void) /* Hook cpu boot to configure the CPU's timers */ return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING, - "AP_METAG_TIMER_STARTING", + "clockevents/metag:starting", arch_timer_starting_cpu, NULL); } diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 7a960cd..d9ef7a6 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -120,12 +120,12 @@ static int gic_clockevent_init(void) } cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING, - "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu, - gic_dying_cpu); + "clockevents/mips/gic/timer:starting", + gic_starting_cpu, gic_dying_cpu); return 0; } -static cycle_t gic_hpt_read(struct clocksource *cs) +static u64 gic_hpt_read(struct clocksource *cs) { return gic_read_count(); } diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c index c4f7d7a..4c4df98 100644 --- a/drivers/clocksource/mmio.c +++ b/drivers/clocksource/mmio.c @@ -20,24 +20,24 @@ static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c) return container_of(c, struct clocksource_mmio, clksrc); } -cycle_t clocksource_mmio_readl_up(struct clocksource *c) +u64 clocksource_mmio_readl_up(struct clocksource *c) { - return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg); + return (u64)readl_relaxed(to_mmio_clksrc(c)->reg); } -cycle_t clocksource_mmio_readl_down(struct clocksource *c) +u64 clocksource_mmio_readl_down(struct clocksource *c) { - return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; + return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; } -cycle_t clocksource_mmio_readw_up(struct clocksource *c) +u64 clocksource_mmio_readw_up(struct clocksource *c) { - return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg); + return (u64)readw_relaxed(to_mmio_clksrc(c)->reg); } -cycle_t clocksource_mmio_readw_down(struct clocksource *c) +u64 clocksource_mmio_readw_down(struct clocksource *c) { - return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; + return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; } /** @@ -51,7 +51,7 @@ cycle_t clocksource_mmio_readw_down(struct clocksource *c) */ int __init clocksource_mmio_init(void __iomem *base, const char *name, unsigned long hz, int rating, unsigned bits, - cycle_t (*read)(struct clocksource *)) + u64 (*read)(struct clocksource *)) { struct clocksource_mmio *cs; diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c index 2a8f470..7f34306 100644 --- a/drivers/clocksource/moxart_timer.c +++ b/drivers/clocksource/moxart_timer.c @@ -161,19 +161,22 @@ static int __init moxart_timer_init(struct device_node *node) timer->base = of_iomap(node, 0); if (!timer->base) { pr_err("%s: of_iomap failed\n", node->full_name); - return -ENXIO; + ret = -ENXIO; + goto out_free; } irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { pr_err("%s: irq_of_parse_and_map failed\n", node->full_name); - return -EINVAL; + ret = -EINVAL; + goto out_unmap; } clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("%s: of_clk_get failed\n", node->full_name); - return PTR_ERR(clk); + ret = PTR_ERR(clk); + goto out_unmap; } pclk = clk_get_rate(clk); @@ -186,7 +189,8 @@ static int __init moxart_timer_init(struct device_node *node) timer->t1_disable_val = ASPEED_TIMER1_DISABLE; } else { pr_err("%s: unknown platform\n", node->full_name); - return -EINVAL; + ret = -EINVAL; + goto out_unmap; } timer->count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); @@ -208,14 +212,14 @@ static int __init moxart_timer_init(struct device_node *node) clocksource_mmio_readl_down); if (ret) { pr_err("%s: clocksource_mmio_init failed\n", node->full_name); - return ret; + goto out_unmap; } ret = request_irq(irq, moxart_timer_interrupt, IRQF_TIMER, node->name, &timer->clkevt); if (ret) { pr_err("%s: setup_irq failed\n", node->full_name); - return ret; + goto out_unmap; } /* Clear match registers */ @@ -241,6 +245,12 @@ static int __init moxart_timer_init(struct device_node *node) clockevents_config_and_register(&timer->clkevt, pclk, 0x4, 0xfffffffe); return 0; + +out_unmap: + iounmap(timer->base); +out_free: + kfree(timer); + return ret; } CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init); CLOCKSOURCE_OF_DECLARE(aspeed, "aspeed,ast2400-timer", moxart_timer_init); diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index 0ba0a91..99b77af 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c @@ -97,7 +97,7 @@ static void timrot_irq_acknowledge(void) HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR); } -static cycle_t timrotv1_get_cycles(struct clocksource *cs) +static u64 timrotv1_get_cycles(struct clocksource *cs) { return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)) & 0xffff0000) >> 16); diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 3e1cb51..1c24de2 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c @@ -19,6 +19,7 @@ #include <linux/clockchips.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <clocksource/pxa.h> @@ -220,17 +221,16 @@ CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init); /* * Legacy timer init for non device-tree boards. */ -void __init pxa_timer_nodt_init(int irq, void __iomem *base, - unsigned long clock_tick_rate) +void __init pxa_timer_nodt_init(int irq, void __iomem *base) { struct clk *clk; timer_base = base; clk = clk_get(NULL, "OSTIMER0"); - if (clk && !IS_ERR(clk)) + if (clk && !IS_ERR(clk)) { clk_prepare_enable(clk); - else + pxa_timer_common_init(irq, clk_get_rate(clk)); + } else { pr_crit("%s: unable to get clk\n", __func__); - - pxa_timer_common_init(irq, clock_tick_rate); + } } diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c index 3283cfa..ee358cd 100644 --- a/drivers/clocksource/qcom-timer.c +++ b/drivers/clocksource/qcom-timer.c @@ -89,7 +89,7 @@ static struct clock_event_device __percpu *msm_evt; static void __iomem *source_base; -static notrace cycle_t msm_read_timer_count(struct clocksource *cs) +static notrace u64 msm_read_timer_count(struct clocksource *cs) { return readl_relaxed(source_base + TIMER_COUNT_VAL); } @@ -182,7 +182,7 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, } else { /* Install and invoke hotplug callbacks */ res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, - "AP_QCOM_TIMER_STARTING", + "clockevents/qcom/timer:starting", msm_local_timer_starting_cpu, msm_local_timer_dying_cpu); if (res) { diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c new file mode 100644 index 0000000..c76f576 --- /dev/null +++ b/drivers/clocksource/renesas-ostm.c @@ -0,0 +1,265 @@ +/* + * Renesas Timer Support - OSTM + * + * Copyright (C) 2017 Renesas Electronics America, Inc. + * Copyright (C) 2017 Chris Brandt + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/sched_clock.h> +#include <linux/slab.h> + +/* + * The OSTM contains independent channels. + * The first OSTM channel probed will be set up as a free running + * clocksource. Additionally we will use this clocksource for the system + * schedule timer sched_clock(). + * + * The second (or more) channel probed will be set up as an interrupt + * driven clock event. + */ + +struct ostm_device { + void __iomem *base; + unsigned long ticks_per_jiffy; + struct clock_event_device ced; +}; + +static void __iomem *system_clock; /* For sched_clock() */ + +/* OSTM REGISTERS */ +#define OSTM_CMP 0x000 /* RW,32 */ +#define OSTM_CNT 0x004 /* R,32 */ +#define OSTM_TE 0x010 /* R,8 */ +#define OSTM_TS 0x014 /* W,8 */ +#define OSTM_TT 0x018 /* W,8 */ +#define OSTM_CTL 0x020 /* RW,8 */ + +#define TE 0x01 +#define TS 0x01 +#define TT 0x01 +#define CTL_PERIODIC 0x00 +#define CTL_ONESHOT 0x02 +#define CTL_FREERUN 0x02 + +static struct ostm_device *ced_to_ostm(struct clock_event_device *ced) +{ + return container_of(ced, struct ostm_device, ced); +} + +static void ostm_timer_stop(struct ostm_device *ostm) +{ + if (readb(ostm->base + OSTM_TE) & TE) { + writeb(TT, ostm->base + OSTM_TT); + + /* + * Read back the register simply to confirm the write operation + * has completed since I/O writes can sometimes get queued by + * the bus architecture. + */ + while (readb(ostm->base + OSTM_TE) & TE) + ; + } +} + +static int __init ostm_init_clksrc(struct ostm_device *ostm, unsigned long rate) +{ + /* + * irq not used (clock sources don't use interrupts) + */ + + ostm_timer_stop(ostm); + + writel(0, ostm->base + OSTM_CMP); + writeb(CTL_FREERUN, ostm->base + OSTM_CTL); + writeb(TS, ostm->base + OSTM_TS); + + return clocksource_mmio_init(ostm->base + OSTM_CNT, + "ostm", rate, + 300, 32, clocksource_mmio_readl_up); +} + +static u64 notrace ostm_read_sched_clock(void) +{ + return readl(system_clock); +} + +static void __init ostm_init_sched_clock(struct ostm_device *ostm, + unsigned long rate) +{ + system_clock = ostm->base + OSTM_CNT; + sched_clock_register(ostm_read_sched_clock, 32, rate); +} + +static int ostm_clock_event_next(unsigned long delta, + struct clock_event_device *ced) +{ + struct ostm_device *ostm = ced_to_ostm(ced); + + ostm_timer_stop(ostm); + + writel(delta, ostm->base + OSTM_CMP); + writeb(CTL_ONESHOT, ostm->base + OSTM_CTL); + writeb(TS, ostm->base + OSTM_TS); + + return 0; +} + +static int ostm_shutdown(struct clock_event_device *ced) +{ + struct ostm_device *ostm = ced_to_ostm(ced); + + ostm_timer_stop(ostm); + + return 0; +} +static int ostm_set_periodic(struct clock_event_device *ced) +{ + struct ostm_device *ostm = ced_to_ostm(ced); + + if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced)) + ostm_timer_stop(ostm); + + writel(ostm->ticks_per_jiffy - 1, ostm->base + OSTM_CMP); + writeb(CTL_PERIODIC, ostm->base + OSTM_CTL); + writeb(TS, ostm->base + OSTM_TS); + + return 0; +} + +static int ostm_set_oneshot(struct clock_event_device *ced) +{ + struct ostm_device *ostm = ced_to_ostm(ced); + + ostm_timer_stop(ostm); + + return 0; +} + +static irqreturn_t ostm_timer_interrupt(int irq, void *dev_id) +{ + struct ostm_device *ostm = dev_id; + + if (clockevent_state_oneshot(&ostm->ced)) + ostm_timer_stop(ostm); + + /* notify clockevent layer */ + if (ostm->ced.event_handler) + ostm->ced.event_handler(&ostm->ced); + + return IRQ_HANDLED; +} + +static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq, + unsigned long rate) +{ + struct clock_event_device *ced = &ostm->ced; + int ret = -ENXIO; + + ret = request_irq(irq, ostm_timer_interrupt, + IRQF_TIMER | IRQF_IRQPOLL, + "ostm", ostm); + if (ret) { + pr_err("ostm: failed to request irq\n"); + return ret; + } + + ced->name = "ostm"; + ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; + ced->set_state_shutdown = ostm_shutdown; + ced->set_state_periodic = ostm_set_periodic; + ced->set_state_oneshot = ostm_set_oneshot; + ced->set_next_event = ostm_clock_event_next; + ced->shift = 32; + ced->rating = 300; + ced->cpumask = cpumask_of(0); + clockevents_config_and_register(ced, rate, 0xf, 0xffffffff); + + return 0; +} + +static int __init ostm_init(struct device_node *np) +{ + struct ostm_device *ostm; + int ret = -EFAULT; + struct clk *ostm_clk = NULL; + int irq; + unsigned long rate; + + ostm = kzalloc(sizeof(*ostm), GFP_KERNEL); + if (!ostm) + return -ENOMEM; + + ostm->base = of_iomap(np, 0); + if (!ostm->base) { + pr_err("ostm: failed to remap I/O memory\n"); + goto err; + } + + irq = irq_of_parse_and_map(np, 0); + if (irq < 0) { + pr_err("ostm: Failed to get irq\n"); + goto err; + } + + ostm_clk = of_clk_get(np, 0); + if (IS_ERR(ostm_clk)) { + pr_err("ostm: Failed to get clock\n"); + ostm_clk = NULL; + goto err; + } + + ret = clk_prepare_enable(ostm_clk); + if (ret) { + pr_err("ostm: Failed to enable clock\n"); + goto err; + } + + rate = clk_get_rate(ostm_clk); + ostm->ticks_per_jiffy = (rate + HZ / 2) / HZ; + + /* + * First probed device will be used as system clocksource. Any + * additional devices will be used as clock events. + */ + if (!system_clock) { + ret = ostm_init_clksrc(ostm, rate); + + if (!ret) { + ostm_init_sched_clock(ostm, rate); + pr_info("ostm: used for clocksource\n"); + } + + } else { + ret = ostm_init_clkevt(ostm, irq, rate); + + if (!ret) + pr_info("ostm: used for clock events\n"); + } + +err: + if (ret) { + clk_disable_unprepare(ostm_clk); + iounmap(ostm->base); + kfree(ostm); + return ret; + } + + return 0; +} + +CLOCKSOURCE_OF_DECLARE(ostm, "renesas,ostm", ostm_init); diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index 54565bd..0093ece 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c @@ -307,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs) samsung_time_start(pwm.source_id, true); } -static cycle_t notrace samsung_clocksource_read(struct clocksource *c) +static u64 notrace samsung_clocksource_read(struct clocksource *c) { return ~readl_relaxed(pwm.source_reg); } diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c index 64f9e82..a46660b 100644 --- a/drivers/clocksource/scx200_hrt.c +++ b/drivers/clocksource/scx200_hrt.c @@ -43,10 +43,10 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)"); /* The base timer frequency, * 27 if selected */ #define HRT_FREQ 1000000 -static cycle_t read_hrt(struct clocksource *cs) +static u64 read_hrt(struct clocksource *cs) { /* Read the timer value */ - return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); + return (u64) inl(scx200_cb_base + SCx200_TIMER_OFFSET); } static struct clocksource cs_hrt = { diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 103c493..28757ed 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -612,7 +612,7 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs) return container_of(cs, struct sh_cmt_channel, cs); } -static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) +static u64 sh_cmt_clocksource_read(struct clocksource *cs) { struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); unsigned long flags, raw; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 469e776..1fbf2aa 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -255,7 +255,7 @@ static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) return container_of(cs, struct sh_tmu_channel, cs); } -static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) +static u64 sh_tmu_clocksource_read(struct clocksource *cs) { struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 4da2af9..d4ca996 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -41,7 +41,7 @@ static void __iomem *tcaddr; -static cycle_t tc_get_cycles(struct clocksource *cs) +static u64 tc_get_cycles(struct clocksource *cs) { unsigned long flags; u32 lower, upper; @@ -56,7 +56,7 @@ static cycle_t tc_get_cycles(struct clocksource *cs) return (upper << 16) | lower; } -static cycle_t tc_get_cycles32(struct clocksource *cs) +static u64 tc_get_cycles32(struct clocksource *cs) { return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); } diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 3c39e6f..4440aef 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -320,7 +320,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np) } res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, - "AP_ARMADA_TIMER_STARTING", + "clockevents/armada:starting", armada_370_xp_timer_starting_cpu, armada_370_xp_timer_dying_cpu); if (res) { diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c index a8e6c7d..3710e4d 100644 --- a/drivers/clocksource/time-pistachio.c +++ b/drivers/clocksource/time-pistachio.c @@ -67,7 +67,7 @@ static inline void gpt_writel(void __iomem *base, u32 value, u32 offset, writel(value, base + 0x20 * gpt_id + offset); } -static cycle_t notrace +static u64 notrace pistachio_clocksource_read_cycles(struct clocksource *cs) { struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); @@ -84,7 +84,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs) counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); raw_spin_unlock_irqrestore(&pcs->lock, flags); - return (cycle_t)~counter; + return (u64)~counter; } static u64 notrace pistachio_read_sched_clock(void) diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c index 4334e03..3d8a181 100644 --- a/drivers/clocksource/timer-atlas7.c +++ b/drivers/clocksource/timer-atlas7.c @@ -85,7 +85,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) } /* read 64-bit timer counter */ -static cycle_t sirfsoc_timer_read(struct clocksource *cs) +static u64 sirfsoc_timer_read(struct clocksource *cs) { u64 cycles; @@ -221,7 +221,7 @@ static int __init sirfsoc_clockevent_init(void) /* Install and invoke hotplug callbacks */ return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING, - "AP_MARCO_TIMER_STARTING", + "clockevents/marco:starting", sirfsoc_local_timer_starting_cpu, sirfsoc_local_timer_dying_cpu); } diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index 6555821..c0b5df3 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c @@ -73,7 +73,7 @@ static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsign * Clocksource: just a monotonic counter of MCK/16 cycles. * We don't care whether or not PIT irqs are enabled. */ -static cycle_t read_pit_clk(struct clocksource *cs) +static u64 read_pit_clk(struct clocksource *cs) { struct pit_data *data = clksrc_to_pit_data(cs); unsigned long flags; diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c index e90ab5b..be4ac76 100644 --- a/drivers/clocksource/timer-atmel-st.c +++ b/drivers/clocksource/timer-atmel-st.c @@ -92,7 +92,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id) return IRQ_NONE; } -static cycle_t read_clk32k(struct clocksource *cs) +static u64 read_clk32k(struct clocksource *cs) { return read_CRTR(); } diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c index 10318cc..e9f50d2 100644 --- a/drivers/clocksource/timer-digicolor.c +++ b/drivers/clocksource/timer-digicolor.c @@ -31,6 +31,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqreturn.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/of.h> #include <linux/of_address.h> diff --git a/drivers/clocksource/timer-gemini.c b/drivers/clocksource/timer-gemini.c new file mode 100644 index 0000000..dda27b7 --- /dev/null +++ b/drivers/clocksource/timer-gemini.c @@ -0,0 +1,277 @@ +/* + * Gemini timer driver + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * + * Based on a rewrite of arch/arm/mach-gemini/timer.c: + * Copyright (C) 2001-2006 Storlink, Corp. + * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> + */ +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/sched_clock.h> + +/* + * Relevant registers in the global syscon + */ +#define GLOBAL_STATUS 0x04 +#define CPU_AHB_RATIO_MASK (0x3 << 18) +#define CPU_AHB_1_1 (0x0 << 18) +#define CPU_AHB_3_2 (0x1 << 18) +#define CPU_AHB_24_13 (0x2 << 18) +#define CPU_AHB_2_1 (0x3 << 18) +#define REG_TO_AHB_SPEED(reg) ((((reg) >> 15) & 0x7) * 10 + 130) + +/* + * Register definitions for the timers + */ +#define TIMER1_COUNT (0x00) +#define TIMER1_LOAD (0x04) +#define TIMER1_MATCH1 (0x08) +#define TIMER1_MATCH2 (0x0c) +#define TIMER2_COUNT (0x10) +#define TIMER2_LOAD (0x14) +#define TIMER2_MATCH1 (0x18) +#define TIMER2_MATCH2 (0x1c) +#define TIMER3_COUNT (0x20) +#define TIMER3_LOAD (0x24) +#define TIMER3_MATCH1 (0x28) +#define TIMER3_MATCH2 (0x2c) +#define TIMER_CR (0x30) +#define TIMER_INTR_STATE (0x34) +#define TIMER_INTR_MASK (0x38) + +#define TIMER_1_CR_ENABLE (1 << 0) +#define TIMER_1_CR_CLOCK (1 << 1) +#define TIMER_1_CR_INT (1 << 2) +#define TIMER_2_CR_ENABLE (1 << 3) +#define TIMER_2_CR_CLOCK (1 << 4) +#define TIMER_2_CR_INT (1 << 5) +#define TIMER_3_CR_ENABLE (1 << 6) +#define TIMER_3_CR_CLOCK (1 << 7) +#define TIMER_3_CR_INT (1 << 8) +#define TIMER_1_CR_UPDOWN (1 << 9) +#define TIMER_2_CR_UPDOWN (1 << 10) +#define TIMER_3_CR_UPDOWN (1 << 11) +#define TIMER_DEFAULT_FLAGS (TIMER_1_CR_UPDOWN | \ + TIMER_3_CR_ENABLE | \ + TIMER_3_CR_UPDOWN) + +#define TIMER_1_INT_MATCH1 (1 << 0) +#define TIMER_1_INT_MATCH2 (1 << 1) +#define TIMER_1_INT_OVERFLOW (1 << 2) +#define TIMER_2_INT_MATCH1 (1 << 3) +#define TIMER_2_INT_MATCH2 (1 << 4) +#define TIMER_2_INT_OVERFLOW (1 << 5) +#define TIMER_3_INT_MATCH1 (1 << 6) +#define TIMER_3_INT_MATCH2 (1 << 7) +#define TIMER_3_INT_OVERFLOW (1 << 8) +#define TIMER_INT_ALL_MASK 0x1ff + +static unsigned int tick_rate; +static void __iomem *base; + +static u64 notrace gemini_read_sched_clock(void) +{ + return readl(base + TIMER3_COUNT); +} + +static int gemini_timer_set_next_event(unsigned long cycles, + struct clock_event_device *evt) +{ + u32 cr; + + /* Setup the match register */ + cr = readl(base + TIMER1_COUNT); + writel(cr + cycles, base + TIMER1_MATCH1); + if (readl(base + TIMER1_COUNT) - cr > cycles) + return -ETIME; + + return 0; +} + +static int gemini_timer_shutdown(struct clock_event_device *evt) +{ + u32 cr; + + /* + * Disable also for oneshot: the set_next() call will arm the timer + * instead. + */ + /* Stop timer and interrupt. */ + cr = readl(base + TIMER_CR); + cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT); + writel(cr, base + TIMER_CR); + + /* Setup counter start from 0 */ + writel(0, base + TIMER1_COUNT); + writel(0, base + TIMER1_LOAD); + + /* enable interrupt */ + cr = readl(base + TIMER_INTR_MASK); + cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2); + cr |= TIMER_1_INT_MATCH1; + writel(cr, base + TIMER_INTR_MASK); + + /* start the timer */ + cr = readl(base + TIMER_CR); + cr |= TIMER_1_CR_ENABLE; + writel(cr, base + TIMER_CR); + + return 0; +} + +static int gemini_timer_set_periodic(struct clock_event_device *evt) +{ + u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ); + u32 cr; + + /* Stop timer and interrupt */ + cr = readl(base + TIMER_CR); + cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT); + writel(cr, base + TIMER_CR); + + /* Setup timer to fire at 1/HT intervals. */ + cr = 0xffffffff - (period - 1); + writel(cr, base + TIMER1_COUNT); + writel(cr, base + TIMER1_LOAD); + + /* enable interrupt on overflow */ + cr = readl(base + TIMER_INTR_MASK); + cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2); + cr |= TIMER_1_INT_OVERFLOW; + writel(cr, base + TIMER_INTR_MASK); + + /* Start the timer */ + cr = readl(base + TIMER_CR); + cr |= TIMER_1_CR_ENABLE; + cr |= TIMER_1_CR_INT; + writel(cr, base + TIMER_CR); + + return 0; +} + +/* Use TIMER1 as clock event */ +static struct clock_event_device gemini_clockevent = { + .name = "TIMER1", + /* Reasonably fast and accurate clock event */ + .rating = 300, + .shift = 32, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = gemini_timer_set_next_event, + .set_state_shutdown = gemini_timer_shutdown, + .set_state_periodic = gemini_timer_set_periodic, + .set_state_oneshot = gemini_timer_shutdown, + .tick_resume = gemini_timer_shutdown, +}; + +/* + * IRQ handler for the timer + */ +static irqreturn_t gemini_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = &gemini_clockevent; + + evt->event_handler(evt); + return IRQ_HANDLED; +} + +static struct irqaction gemini_timer_irq = { + .name = "Gemini Timer Tick", + .flags = IRQF_TIMER, + .handler = gemini_timer_interrupt, +}; + +static int __init gemini_timer_of_init(struct device_node *np) +{ + static struct regmap *map; + int irq; + int ret; + u32 val; + + map = syscon_regmap_lookup_by_phandle(np, "syscon"); + if (IS_ERR(map)) { + pr_err("Can't get regmap for syscon handle"); + return -ENODEV; + } + ret = regmap_read(map, GLOBAL_STATUS, &val); + if (ret) { + pr_err("Can't read syscon status register"); + return -ENXIO; + } + + base = of_iomap(np, 0); + if (!base) { + pr_err("Can't remap registers"); + return -ENXIO; + } + /* IRQ for timer 1 */ + irq = irq_of_parse_and_map(np, 0); + if (irq <= 0) { + pr_err("Can't parse IRQ"); + return -EINVAL; + } + + tick_rate = REG_TO_AHB_SPEED(val) * 1000000; + printk(KERN_INFO "Bus: %dMHz", tick_rate / 1000000); + + tick_rate /= 6; /* APB bus run AHB*(1/6) */ + + switch (val & CPU_AHB_RATIO_MASK) { + case CPU_AHB_1_1: + printk(KERN_CONT "(1/1)\n"); + break; + case CPU_AHB_3_2: + printk(KERN_CONT "(3/2)\n"); + break; + case CPU_AHB_24_13: + printk(KERN_CONT "(24/13)\n"); + break; + case CPU_AHB_2_1: + printk(KERN_CONT "(2/1)\n"); + break; + } + + /* + * Reset the interrupt mask and status + */ + writel(TIMER_INT_ALL_MASK, base + TIMER_INTR_MASK); + writel(0, base + TIMER_INTR_STATE); + writel(TIMER_DEFAULT_FLAGS, base + TIMER_CR); + + /* + * Setup free-running clocksource timer (interrupts + * disabled.) + */ + writel(0, base + TIMER3_COUNT); + writel(0, base + TIMER3_LOAD); + writel(0, base + TIMER3_MATCH1); + writel(0, base + TIMER3_MATCH2); + clocksource_mmio_init(base + TIMER3_COUNT, + "gemini_clocksource", tick_rate, + 300, 32, clocksource_mmio_readl_up); + sched_clock_register(gemini_read_sched_clock, 32, tick_rate); + + /* + * Setup clockevent timer (interrupt-driven.) + */ + writel(0, base + TIMER1_COUNT); + writel(0, base + TIMER1_LOAD); + writel(0, base + TIMER1_MATCH1); + writel(0, base + TIMER1_MATCH2); + setup_irq(irq, &gemini_timer_irq); + gemini_clockevent.cpumask = cpumask_of(0); + clockevents_config_and_register(&gemini_clockevent, tick_rate, + 1, 0xffffffff); + + return 0; +} +CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "cortina,gemini-timer", + gemini_timer_of_init); diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c index 70c149a..da1f798 100644 --- a/drivers/clocksource/timer-nps.c +++ b/drivers/clocksource/timer-nps.c @@ -46,35 +46,62 @@ /* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */ static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly; -static unsigned long nps_timer_rate; +static int __init nps_get_timer_clk(struct device_node *node, + unsigned long *timer_freq, + struct clk **clk) +{ + int ret; + + *clk = of_clk_get(node, 0); + ret = PTR_ERR_OR_ZERO(*clk); + if (ret) { + pr_err("timer missing clk"); + return ret; + } + + ret = clk_prepare_enable(*clk); + if (ret) { + pr_err("Couldn't enable parent clk\n"); + clk_put(*clk); + return ret; + } + + *timer_freq = clk_get_rate(*clk); + if (!(*timer_freq)) { + pr_err("Couldn't get clk rate\n"); + clk_disable_unprepare(*clk); + clk_put(*clk); + return -EINVAL; + } + + return 0; +} -static cycle_t nps_clksrc_read(struct clocksource *clksrc) +static u64 nps_clksrc_read(struct clocksource *clksrc) { int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET; - return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); + return (u64)ioread32be(nps_msu_reg_low_addr[cluster]); } -static int __init nps_setup_clocksource(struct device_node *node, - struct clk *clk) +static int __init nps_setup_clocksource(struct device_node *node) { int ret, cluster; + struct clk *clk; + unsigned long nps_timer1_freq; + for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++) nps_msu_reg_low_addr[cluster] = nps_host_reg((cluster << NPS_CLUSTER_OFFSET), - NPS_MSU_BLKID, NPS_MSU_TICK_LOW); + NPS_MSU_BLKID, NPS_MSU_TICK_LOW); - ret = clk_prepare_enable(clk); - if (ret) { - pr_err("Couldn't enable parent clock\n"); + ret = nps_get_timer_clk(node, &nps_timer1_freq, &clk); + if (ret) return ret; - } - - nps_timer_rate = clk_get_rate(clk); - ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick", - nps_timer_rate, 301, 32, nps_clksrc_read); + ret = clocksource_mmio_init(nps_msu_reg_low_addr, "nps-tick", + nps_timer1_freq, 300, 32, nps_clksrc_read); if (ret) { pr_err("Couldn't register clock source.\n"); clk_disable_unprepare(clk); @@ -83,18 +110,175 @@ static int __init nps_setup_clocksource(struct device_node *node, return ret; } -static int __init nps_timer_init(struct device_node *node) +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", + nps_setup_clocksource); +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1", + nps_setup_clocksource); + +#ifdef CONFIG_EZNPS_MTM_EXT +#include <soc/nps/mtm.h> + +/* Timer related Aux registers */ +#define NPS_REG_TIMER0_TSI 0xFFFFF850 +#define NPS_REG_TIMER0_LIMIT 0x23 +#define NPS_REG_TIMER0_CTRL 0x22 +#define NPS_REG_TIMER0_CNT 0x21 + +/* + * Interrupt Enabled (IE) - re-arm the timer + * Not Halted (NH) - is cleared when working with JTAG (for debug) + */ +#define TIMER0_CTRL_IE BIT(0) +#define TIMER0_CTRL_NH BIT(1) + +static unsigned long nps_timer0_freq; +static unsigned long nps_timer0_irq; + +static void nps_clkevent_rm_thread(void) +{ + int thread; + unsigned int cflags, enabled_threads; + + hw_schd_save(&cflags); + + enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI); + + /* remove thread from TSI1 */ + thread = read_aux_reg(CTOP_AUX_THREAD_ID); + enabled_threads &= ~(1 << thread); + write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads); + + /* Acknowledge and if needed re-arm the timer */ + if (!enabled_threads) + write_aux_reg(NPS_REG_TIMER0_CTRL, TIMER0_CTRL_NH); + else + write_aux_reg(NPS_REG_TIMER0_CTRL, + TIMER0_CTRL_IE | TIMER0_CTRL_NH); + + hw_schd_restore(cflags); +} + +static void nps_clkevent_add_thread(unsigned long delta) +{ + int thread; + unsigned int cflags, enabled_threads; + + hw_schd_save(&cflags); + + /* add thread to TSI1 */ + thread = read_aux_reg(CTOP_AUX_THREAD_ID); + enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI); + enabled_threads |= (1 << thread); + write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads); + + /* set next timer event */ + write_aux_reg(NPS_REG_TIMER0_LIMIT, delta); + write_aux_reg(NPS_REG_TIMER0_CNT, 0); + write_aux_reg(NPS_REG_TIMER0_CTRL, + TIMER0_CTRL_IE | TIMER0_CTRL_NH); + + hw_schd_restore(cflags); +} + +/* + * Whenever anyone tries to change modes, we just mask interrupts + * and wait for the next event to get set. + */ +static int nps_clkevent_set_state(struct clock_event_device *dev) +{ + nps_clkevent_rm_thread(); + disable_percpu_irq(nps_timer0_irq); + + return 0; +} + +static int nps_clkevent_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + nps_clkevent_add_thread(delta); + enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE); + + return 0; +} + +static DEFINE_PER_CPU(struct clock_event_device, nps_clockevent_device) = { + .name = "NPS Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = nps_clkevent_set_next_event, + .set_state_oneshot = nps_clkevent_set_state, + .set_state_oneshot_stopped = nps_clkevent_set_state, + .set_state_shutdown = nps_clkevent_set_state, + .tick_resume = nps_clkevent_set_state, +}; + +static irqreturn_t timer_irq_handler(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + nps_clkevent_rm_thread(); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static int nps_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device); + + evt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX); + enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE); + + return 0; +} + +static int nps_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(nps_timer0_irq); + return 0; +} + +static int __init nps_setup_clockevent(struct device_node *node) { struct clk *clk; + int ret; - clk = of_clk_get(node, 0); - if (IS_ERR(clk)) { - pr_err("Can't get timer clock.\n"); - return PTR_ERR(clk); + nps_timer0_irq = irq_of_parse_and_map(node, 0); + if (nps_timer0_irq <= 0) { + pr_err("clockevent: missing irq"); + return -EINVAL; } - return nps_setup_clocksource(node, clk); + ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk); + if (ret) + return ret; + + /* Needs apriori irq_set_percpu_devid() done in intc map function */ + ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler, + "Timer0 (per-cpu-tick)", + &nps_clockevent_device); + if (ret) { + pr_err("Couldn't request irq\n"); + clk_disable_unprepare(clk); + return ret; + } + + ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, + "clockevents/nps:starting", + nps_timer_starting_cpu, + nps_timer_dying_cpu); + if (ret) { + pr_err("Failed to setup hotplug state"); + clk_disable_unprepare(clk); + free_percpu_irq(nps_timer0_irq, &nps_clockevent_device); + return ret; + } + + return 0; } -CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", - nps_timer_init); +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0", + nps_setup_clockevent); +#endif /* CONFIG_EZNPS_MTM_EXT */ diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c index c32148e..bfa981a 100644 --- a/drivers/clocksource/timer-prima2.c +++ b/drivers/clocksource/timer-prima2.c @@ -72,7 +72,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) } /* read 64-bit timer counter */ -static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs) +static u64 notrace sirfsoc_timer_read(struct clocksource *cs) { u64 cycles; diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 4f87f3e..a3e662b 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -152,7 +152,7 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static cycle_t sun5i_clksrc_read(struct clocksource *clksrc) +static u64 sun5i_clksrc_read(struct clocksource *clksrc) { struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc); diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c index cf5b14e..6240677 100644 --- a/drivers/clocksource/timer-ti-32k.c +++ b/drivers/clocksource/timer-ti-32k.c @@ -65,11 +65,11 @@ static inline struct ti_32k *to_ti_32k(struct clocksource *cs) return container_of(cs, struct ti_32k, cs); } -static cycle_t notrace ti_32k_read_cycles(struct clocksource *cs) +static u64 notrace ti_32k_read_cycles(struct clocksource *cs) { struct ti_32k *ti = to_ti_32k(cs); - return (cycle_t)readl_relaxed(ti->counter); + return (u64)readl_relaxed(ti->counter); } static struct ti_32k ti_32k_timer = { diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c index b150694..d02b510 100644 --- a/drivers/clocksource/vt8500_timer.c +++ b/drivers/clocksource/vt8500_timer.c @@ -53,7 +53,7 @@ static void __iomem *regbase; -static cycle_t vt8500_timer_read(struct clocksource *cs) +static u64 vt8500_timer_read(struct clocksource *cs) { int loops = msecs_to_loops(10); writel(3, regbase + TIMER_CTRL_VAL); @@ -75,7 +75,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { int loops = msecs_to_loops(10); - cycle_t alarm = clocksource.read(&clocksource) + cycles; + u64 alarm = clocksource.read(&clocksource) + cycles; while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) && --loops) cpu_relax(); |