summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt17
-rw-r--r--Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt (renamed from Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt)6
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--arch/arc/kernel/time.c63
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/kernel/smp_twd.c3
-rw-r--r--arch/arm/mach-bcm/Kconfig2
-rw-r--r--arch/arm/mach-integrator/Kconfig2
-rw-r--r--arch/arm/mach-keystone/Kconfig2
-rw-r--r--arch/arm/mach-moxart/Kconfig2
-rw-r--r--arch/arm/mach-mxs/Kconfig2
-rw-r--r--arch/arm/mach-nspire/Kconfig1
-rw-r--r--arch/arm/mach-prima2/Kconfig2
-rw-r--r--arch/arm/mach-u300/Kconfig2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi8
-rw-r--r--arch/microblaze/kernel/timer.c49
-rw-r--r--arch/mips/ralink/cevt-rt3352.c17
-rw-r--r--arch/nios2/kernel/time.c63
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--block/genhd.c5
-rw-r--r--drivers/clocksource/Kconfig116
-rw-r--r--drivers/clocksource/Makefile23
-rw-r--r--drivers/clocksource/arm_arch_timer.c56
-rw-r--r--drivers/clocksource/arm_global_timer.c26
-rw-r--r--drivers/clocksource/armv7m_systick.c17
-rw-r--r--drivers/clocksource/asm9260_timer.c22
-rw-r--r--drivers/clocksource/bcm2835_timer.c38
-rw-r--r--drivers/clocksource/bcm_kona_timer.c12
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c74
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c4
-rw-r--r--drivers/clocksource/clksrc-probe.c14
-rw-r--r--drivers/clocksource/clksrc_st_lpc.c20
-rw-r--r--drivers/clocksource/clps711x-timer.c10
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c4
-rw-r--r--drivers/clocksource/exynos_mct.c32
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c20
-rw-r--r--drivers/clocksource/h8300_timer16.c12
-rw-r--r--drivers/clocksource/h8300_timer8.c11
-rw-r--r--drivers/clocksource/h8300_tpu.c10
-rw-r--r--drivers/clocksource/meson6_timer.c19
-rw-r--r--drivers/clocksource/mips-gic-timer.c24
-rw-r--r--drivers/clocksource/moxart_timer.c39
-rw-r--r--drivers/clocksource/mps2-timer.c8
-rw-r--r--drivers/clocksource/mtk_timer.c8
-rw-r--r--drivers/clocksource/mxs_timer.c26
-rw-r--r--drivers/clocksource/nomadik-mtu.c43
-rw-r--r--drivers/clocksource/pxa_timer.c44
-rw-r--r--drivers/clocksource/qcom-timer.c23
-rw-r--r--drivers/clocksource/rockchip_timer.c53
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c70
-rw-r--r--drivers/clocksource/sun4i_timer.c43
-rw-r--r--drivers/clocksource/tango_xtal.c10
-rw-r--r--drivers/clocksource/tegra20_timer.c24
-rw-r--r--drivers/clocksource/time-armada-370-xp.c98
-rw-r--r--drivers/clocksource/time-efm32.c17
-rw-r--r--drivers/clocksource/time-lpc32xx.c10
-rw-r--r--drivers/clocksource/time-orion.c50
-rw-r--r--drivers/clocksource/time-pistachio.c18
-rw-r--r--drivers/clocksource/timer-atlas7.c30
-rw-r--r--drivers/clocksource/timer-atmel-pit.c41
-rw-r--r--drivers/clocksource/timer-atmel-st.c42
-rw-r--r--drivers/clocksource/timer-digicolor.c16
-rw-r--r--drivers/clocksource/timer-imx-gpt.c51
-rw-r--r--drivers/clocksource/timer-integrator-ap.c57
-rw-r--r--drivers/clocksource/timer-keystone.c13
-rw-r--r--drivers/clocksource/timer-nps.c14
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c297
-rw-r--r--drivers/clocksource/timer-prima2.c42
-rw-r--r--drivers/clocksource/timer-sp804.c86
-rw-r--r--drivers/clocksource/timer-stm32.c8
-rw-r--r--drivers/clocksource/timer-sun5i.c33
-rw-r--r--drivers/clocksource/timer-ti-32k.c8
-rw-r--r--drivers/clocksource/timer-u300.c36
-rw-r--r--drivers/clocksource/versatile.c6
-rw-r--r--drivers/clocksource/vf_pit_timer.c25
-rw-r--r--drivers/clocksource/vt8500_timer.c24
-rw-r--r--drivers/clocksource/zevio-timer.c4
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c5
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c4
-rw-r--r--drivers/power/bq27xxx_battery.c5
-rw-r--r--drivers/tty/metag_da.c4
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c1
-rw-r--r--drivers/usb/host/xhci.c2
-rw-r--r--fs/timerfd.c10
-rw-r--r--include/clocksource/timer-sp804.h8
-rw-r--r--include/linux/alarmtimer.h6
-rw-r--r--include/linux/clk.h4
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/list.h10
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/time.h15
-rw-r--r--include/linux/timer.h34
-rw-r--r--kernel/signal.c24
-rw-r--r--kernel/time/alarmtimer.c1
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/test_udelay.c16
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c1
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c98
-rw-r--r--kernel/time/timeconv.c11
-rw-r--r--kernel/time/timekeeping.c10
-rw-r--r--kernel/time/timer.c1111
-rw-r--r--kernel/time/timer_stats.c6
-rw-r--r--lib/random32.c1
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/inet_timewait_sock.c5
111 files changed, 2460 insertions, 1208 deletions
diff --git a/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt b/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt
new file mode 100644
index 0000000..3ca89cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt
@@ -0,0 +1,17 @@
+Oxford Semiconductor OXNAS SoCs Family RPS Timer
+================================================
+
+Required properties:
+- compatible: Should be "oxsemi,ox810se-rps-timer"
+- reg : Specifies base physical address and size of the registers.
+- interrupts : The interrupts of the two timers
+- clocks : The phandle of the timer clock source
+
+example:
+
+timer0: timer@200 {
+ compatible = "oxsemi,ox810se-rps-timer";
+ reg = <0x200 0x40>;
+ clocks = <&rpsclk>;
+ interrupts = <4 5>;
+};
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
index 87f0b00..a41b184 100644
--- a/Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt
+++ b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
@@ -1,7 +1,9 @@
-Rockchip rk3288 timer
+Rockchip rk timer
Required properties:
-- compatible: shall be "rockchip,rk3288-timer"
+- compatible: shall be one of:
+ "rockchip,rk3288-timer" - for rk3066, rk3036, rk3188, rk322x, rk3288, rk3368
+ "rockchip,rk3399-timer" - for rk3399
- reg: base address of the timer register starting with TIMERS CONTROL register
- interrupts: should contain the interrupts for Timer0
- clocks : must contain an entry for each entry in clock-names
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fa8c6d4..17e33db 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -687,6 +687,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
[SPARC64] tick
[X86-64] hpet,tsc
+ clocksource.arm_arch_timer.evtstrm=
+ [ARM,ARM64]
+ Format: <bool>
+ Enable/disable the eventstream feature of the ARM
+ architected timer so that code using WFE-based polling
+ loops can be debugged more effectively on production
+ systems.
+
clearcpuid=BITNUM [X86]
Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeatures.h for the valid bit
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 4549ab2..98f22d2 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -116,19 +116,19 @@ static struct clocksource arc_counter_gfrc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init arc_cs_setup_gfrc(struct device_node *node)
+static int __init arc_cs_setup_gfrc(struct device_node *node)
{
int exists = cpuinfo_arc700[0].extn.gfrc;
int ret;
if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
- return;
+ return -ENXIO;
ret = arc_get_timer_clk(node);
if (ret)
- return;
+ return ret;
- clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
+ return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
}
CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
@@ -172,25 +172,25 @@ static struct clocksource arc_counter_rtc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init arc_cs_setup_rtc(struct device_node *node)
+static int __init arc_cs_setup_rtc(struct device_node *node)
{
int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
int ret;
if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
- return;
+ return -ENXIO;
/* Local to CPU hence not usable in SMP */
if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
- return;
+ return -EINVAL;
ret = arc_get_timer_clk(node);
if (ret)
- return;
+ return ret;
write_aux_reg(AUX_RTC_CTRL, 1);
- clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
+ return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
}
CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
@@ -213,23 +213,23 @@ static struct clocksource arc_counter_timer1 = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init arc_cs_setup_timer1(struct device_node *node)
+static int __init arc_cs_setup_timer1(struct device_node *node)
{
int ret;
/* Local to CPU hence not usable in SMP */
if (IS_ENABLED(CONFIG_SMP))
- return;
+ return -EINVAL;
ret = arc_get_timer_clk(node);
if (ret)
- return;
+ return ret;
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
- clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
+ return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
}
/********** Clock Event Device *********/
@@ -324,20 +324,28 @@ static struct notifier_block arc_timer_cpu_nb = {
/*
* clockevent setup for boot CPU
*/
-static void __init arc_clockevent_setup(struct device_node *node)
+static int __init arc_clockevent_setup(struct device_node *node)
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int ret;
- register_cpu_notifier(&arc_timer_cpu_nb);
+ ret = register_cpu_notifier(&arc_timer_cpu_nb);
+ if (ret) {
+ pr_err("Failed to register cpu notifier");
+ return ret;
+ }
arc_timer_irq = irq_of_parse_and_map(node, 0);
- if (arc_timer_irq <= 0)
- panic("clockevent: missing irq");
+ if (arc_timer_irq <= 0) {
+ pr_err("clockevent: missing irq");
+ return -EINVAL;
+ }
ret = arc_get_timer_clk(node);
- if (ret)
- panic("clockevent: missing clk");
+ if (ret) {
+ pr_err("clockevent: missing clk");
+ return ret;
+ }
evt->irq = arc_timer_irq;
evt->cpumask = cpumask_of(smp_processor_id());
@@ -347,22 +355,29 @@ static void __init arc_clockevent_setup(struct device_node *node)
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
"Timer0 (per-cpu-tick)", evt);
- if (ret)
- panic("clockevent: unable to request irq\n");
+ if (ret) {
+ pr_err("clockevent: unable to request irq\n");
+ return ret;
+ }
enable_percpu_irq(arc_timer_irq, 0);
+
+ return 0;
}
-static void __init arc_of_timer_init(struct device_node *np)
+static int __init arc_of_timer_init(struct device_node *np)
{
static int init_count = 0;
+ int ret;
if (!init_count) {
init_count = 1;
- arc_clockevent_setup(np);
+ ret = arc_clockevent_setup(np);
} else {
- arc_cs_setup_timer1(np);
+ ret = arc_cs_setup_timer1(np);
}
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 90542db..f0636ec9 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -358,10 +358,10 @@ config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
select ARCH_REQUIRE_GPIOLIB
select AUTO_ZRELADDR
- select CLKSRC_MMIO
select COMMON_CLK
select CPU_ARM720T
select GENERIC_CLOCKEVENTS
+ select CLPS711X_TIMER
select MFD_SYSCON
select SOC_BUS
help
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 1bfa7a7..b6ec65e 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -390,7 +390,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
}
#ifdef CONFIG_OF
-static void __init twd_local_timer_of_register(struct device_node *np)
+static int __init twd_local_timer_of_register(struct device_node *np)
{
int err;
@@ -410,6 +410,7 @@ static void __init twd_local_timer_of_register(struct device_node *np)
out:
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
+ return err;
}
CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 68ab641..4f1709b 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -89,6 +89,7 @@ config ARCH_BCM_MOBILE
select HAVE_ARM_ARCH_TIMER
select PINCTRL
select ARCH_BCM_MOBILE_SMP if SMP
+ select BCM_KONA_TIMER
help
This enables support for systems based on Broadcom mobile SoCs.
@@ -143,6 +144,7 @@ config ARCH_BCM2835
select ARM_TIMER_SP804
select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
select CLKSRC_OF
+ select BCM2835_TIMER
select PINCTRL
select PINCTRL_BCM2835
help
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index b2a85ba..291262e 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -20,7 +20,7 @@ if ARCH_INTEGRATOR
config ARCH_INTEGRATOR_AP
bool "Support Integrator/AP and Integrator/PP2 platforms"
- select CLKSRC_MMIO
+ select INTEGRATOR_AP_TIMER
select MIGHT_HAVE_PCI
select SERIAL_AMBA_PL010 if TTY
select SERIAL_AMBA_PL010_CONSOLE if TTY
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index ea955f6db..bac577b 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -4,7 +4,7 @@ config ARCH_KEYSTONE
depends on ARM_PATCH_PHYS_VIRT
select ARM_GIC
select HAVE_ARM_ARCH_TIMER
- select CLKSRC_MMIO
+ select KEYSTONE_TIMER
select ARM_ERRATA_798181 if SMP
select COMMON_CLK_KEYSTONE
select ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index 180d9d2..ddc79ce 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -3,7 +3,7 @@ menuconfig ARCH_MOXART
depends on ARCH_MULTI_V4
select CPU_FA526
select ARM_DMA_MEM_BUFFERABLE
- select CLKSRC_MMIO
+ select MOXART_TIMER
select GENERIC_IRQ_CHIP
select ARCH_REQUIRE_GPIOLIB
select PHYLIB if NETDEVICES
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 8479413..68a3a9e 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -16,7 +16,7 @@ config ARCH_MXS
bool "Freescale MXS (i.MX23, i.MX28) support"
depends on ARCH_MULTI_V5
select ARCH_REQUIRE_GPIOLIB
- select CLKSRC_MMIO
+ select MXS_TIMER
select PINCTRL
select SOC_BUS
select SOC_IMX23
diff --git a/arch/arm/mach-nspire/Kconfig b/arch/arm/mach-nspire/Kconfig
index bc41f26..d498530 100644
--- a/arch/arm/mach-nspire/Kconfig
+++ b/arch/arm/mach-nspire/Kconfig
@@ -7,5 +7,6 @@ config ARCH_NSPIRE
select ARM_AMBA
select ARM_VIC
select ARM_TIMER_SP804
+ select NSPIRE_TIMER
help
This enables support for systems using the TI-NSPIRE CPU
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 0cf4426..9e938f2 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -28,6 +28,7 @@ config ARCH_ATLAS7
default y
select ARM_GIC
select CPU_V7
+ select ATLAS7_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_SMP
help
@@ -38,6 +39,7 @@ config ARCH_PRIMA2
default y
select SIRF_IRQ
select ZONE_DMA
+ select PRIMA2_TIMER
help
Support for CSR SiRFSoC ARM Cortex A9 Platform
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 301a984..4fdc342 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -4,7 +4,7 @@ menuconfig ARCH_U300
select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_VIC
- select CLKSRC_MMIO
+ select U300_TIMER
select CPU_ARM926T
select HAVE_TCM
select PINCTRL
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index d7f8e06..188bbea 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -492,6 +492,14 @@
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
};
+ rktimer: rktimer@ff850000 {
+ compatible = "rockchip,rk3399-timer";
+ reg = <0x0 0xff850000 0x0 0x1000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
+ clock-names = "pclk", "timer";
+ };
+
spdif: spdif@ff870000 {
compatible = "rockchip,rk3399-spdif";
reg = <0x0 0xff870000 0x0 0x1000>;
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 67e2ef4..5bbf38b 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -170,7 +170,7 @@ static struct irqaction timer_irqaction = {
.dev_id = &clockevent_xilinx_timer,
};
-static __init void xilinx_clockevent_init(void)
+static __init int xilinx_clockevent_init(void)
{
clockevent_xilinx_timer.mult =
div_sc(timer_clock_freq, NSEC_PER_SEC,
@@ -181,6 +181,8 @@ static __init void xilinx_clockevent_init(void)
clockevent_delta2ns(1, &clockevent_xilinx_timer);
clockevent_xilinx_timer.cpumask = cpumask_of(0);
clockevents_register_device(&clockevent_xilinx_timer);
+
+ return 0;
}
static u64 xilinx_clock_read(void)
@@ -229,8 +231,14 @@ static struct clocksource clocksource_microblaze = {
static int __init xilinx_clocksource_init(void)
{
- if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq))
- panic("failed to register clocksource");
+ int ret;
+
+ ret = clocksource_register_hz(&clocksource_microblaze,
+ timer_clock_freq);
+ if (ret) {
+ pr_err("failed to register clocksource");
+ return ret;
+ }
/* stop timer1 */
write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
@@ -239,16 +247,16 @@ static int __init xilinx_clocksource_init(void)
write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
/* register timecounter - for ftrace support */
- init_xilinx_timecounter();
- return 0;
+ return init_xilinx_timecounter();
}
-static void __init xilinx_timer_init(struct device_node *timer)
+static int __init xilinx_timer_init(struct device_node *timer)
{
struct clk *clk;
static int initialized;
u32 irq;
u32 timer_num = 1;
+ int ret;
if (initialized)
return;
@@ -258,7 +266,7 @@ static void __init xilinx_timer_init(struct device_node *timer)
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
pr_err("ERROR: invalid timer base address\n");
- BUG();
+ return -ENXIO;
}
write_fn = timer_write32;
@@ -271,11 +279,15 @@ static void __init xilinx_timer_init(struct device_node *timer)
}
irq = irq_of_parse_and_map(timer, 0);
+ if (irq <= 0) {
+ pr_err("Failed to parse and map irq");
+ return -EINVAL;
+ }
of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
if (timer_num) {
- pr_emerg("Please enable two timers in HW\n");
- BUG();
+ pr_err("Please enable two timers in HW\n");
+ return -EINVAL;
}
pr_info("%s: irq=%d\n", timer->full_name, irq);
@@ -297,14 +309,27 @@ static void __init xilinx_timer_init(struct device_node *timer)
freq_div_hz = timer_clock_freq / HZ;
- setup_irq(irq, &timer_irqaction);
+ ret = setup_irq(irq, &timer_irqaction);
+ if (ret) {
+ pr_err("Failed to setup IRQ");
+ return ret;
+ }
+
#ifdef CONFIG_HEART_BEAT
microblaze_setup_heartbeat();
#endif
- xilinx_clocksource_init();
- xilinx_clockevent_init();
+
+ ret = xilinx_clocksource_init();
+ if (ret)
+ return ret;
+
+ ret = xilinx_clockevent_init();
+ if (ret)
+ return ret;
sched_clock_register(xilinx_clock_read, 32, timer_clock_freq);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index 3ad0b07..f24eee0 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -117,11 +117,13 @@ static int systick_set_oneshot(struct clock_event_device *evt)
return 0;
}
-static void __init ralink_systick_init(struct device_node *np)
+static int __init ralink_systick_init(struct device_node *np)
{
+ int ret;
+
systick.membase = of_iomap(np, 0);
if (!systick.membase)
- return;
+ return -ENXIO;
systick_irqaction.name = np->name;
systick.dev.name = np->name;
@@ -131,16 +133,21 @@ static void __init ralink_systick_init(struct device_node *np)
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%s: request_irq failed", np->name);
- return;
+ return -EINVAL;
}
- clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
- SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up);
+ ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+ SYSTICK_FREQ, 301, 16,
+ clocksource_mmio_readl_up);
+ if (ret)
+ return ret;
clockevents_register_device(&systick.dev);
pr_info("%s: running - mult: %d, shift: %d\n",
np->name, systick.dev.mult, systick.dev.shift);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index e835dda..d9563dd 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -206,15 +206,21 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __init nios2_timer_get_base_and_freq(struct device_node *np,
+static int __init nios2_timer_get_base_and_freq(struct device_node *np,
void __iomem **base, u32 *freq)
{
*base = of_iomap(np, 0);
- if (!*base)
- panic("Unable to map reg for %s\n", np->name);
+ if (!*base) {
+ pr_crit("Unable to map reg for %s\n", np->name);
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "clock-frequency", freq)) {
+ pr_crit("Unable to get %s clock frequency\n", np->name);
+ return -EINVAL;
+ }
- if (of_property_read_u32(np, "clock-frequency", freq))
- panic("Unable to get %s clock frequency\n", np->name);
+ return 0;
}
static struct nios2_clockevent_dev nios2_ce = {
@@ -231,17 +237,21 @@ static struct nios2_clockevent_dev nios2_ce = {
},
};
-static __init void nios2_clockevent_init(struct device_node *timer)
+static __init int nios2_clockevent_init(struct device_node *timer)
{
void __iomem *iobase;
u32 freq;
- int irq;
+ int irq, ret;
- nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ if (ret)
+ return ret;
irq = irq_of_parse_and_map(timer, 0);
- if (!irq)
- panic("Unable to parse timer irq\n");
+ if (!irq) {
+ pr_crit("Unable to parse timer irq\n");
+ return -EINVAL;
+ }
nios2_ce.timer.base = iobase;
nios2_ce.timer.freq = freq;
@@ -253,25 +263,35 @@ static __init void nios2_clockevent_init(struct device_node *timer)
/* clear pending interrupt */
timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG);
- if (request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
- &nios2_ce.ced))
- panic("Unable to setup timer irq\n");
+ ret = request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
+ &nios2_ce.ced);
+ if (ret) {
+ pr_crit("Unable to setup timer irq\n");
+ return ret;
+ }
clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX);
+
+ return 0;
}
-static __init void nios2_clocksource_init(struct device_node *timer)
+static __init int nios2_clocksource_init(struct device_node *timer)
{
unsigned int ctrl;
void __iomem *iobase;
u32 freq;
+ int ret;
- nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ if (ret)
+ return ret;
nios2_cs.timer.base = iobase;
nios2_cs.timer.freq = freq;
- clocksource_register_hz(&nios2_cs.cs, freq);
+ ret = clocksource_register_hz(&nios2_cs.cs, freq);
+ if (ret)
+ return ret;
timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG);
timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG);
@@ -282,6 +302,8 @@ static __init void nios2_clocksource_init(struct device_node *timer)
/* Calibrate the delay loop directly */
lpj_fine = freq / HZ;
+
+ return 0;
}
/*
@@ -289,22 +311,25 @@ static __init void nios2_clocksource_init(struct device_node *timer)
* more instances, the second one gets used as clocksource and all
* others are unused.
*/
-static void __init nios2_time_init(struct device_node *timer)
+static int __init nios2_time_init(struct device_node *timer)
{
static int num_called;
+ int ret;
switch (num_called) {
case 0:
- nios2_clockevent_init(timer);
+ ret = nios2_clockevent_init(timer);
break;
case 1:
- nios2_clocksource_init(timer);
+ ret = nios2_clocksource_init(timer);
break;
default:
break;
}
num_called++;
+
+ return ret;
}
void read_persistent_clock(struct timespec *ts)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 5a58c91..64dd38f 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -918,7 +918,7 @@ static void uv_heartbeat(unsigned long ignored)
uv_set_scir_bits(bits);
/* enable next timer period */
- mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
+ mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
}
static void uv_heartbeat_enable(int cpu)
@@ -927,7 +927,7 @@ static void uv_heartbeat_enable(int cpu)
struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
- setup_timer(timer, uv_heartbeat, cpu);
+ setup_pinned_timer(timer, uv_heartbeat, cpu);
timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
add_timer_on(timer, cpu);
uv_cpu_scir_info(cpu)->enabled = 1;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 58af630..79d8ec8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1309,7 +1309,7 @@ static void __restart_timer(struct timer_list *t, unsigned long interval)
if (timer_pending(t)) {
if (time_before(when, t->expires))
- mod_timer_pinned(t, when);
+ mod_timer(t, when);
} else {
t->expires = round_jiffies(when);
add_timer_on(t, smp_processor_id());
@@ -1735,7 +1735,7 @@ static void __mcheck_cpu_init_timer(void)
struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned int cpu = smp_processor_id();
- setup_timer(t, mce_timer_fn, cpu);
+ setup_pinned_timer(t, mce_timer_fn, cpu);
mce_start_timer(cpu, t);
}
diff --git a/block/genhd.c b/block/genhd.c
index 9f42526..f06d7f3 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1523,12 +1523,7 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
if (--ev->block)
goto out_unlock;
- /*
- * Not exactly a latency critical operation, set poll timer
- * slack to 25% and kick event check.
- */
intv = disk_events_poll_jiffies(disk);
- set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now)
queue_delayed_work(system_freezable_power_efficient_wq,
&ev->dwork, 0);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 47352d2..5677886 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -27,6 +27,20 @@ config CLKBLD_I8253
config CLKSRC_MMIO
bool
+config BCM2835_TIMER
+ bool "BCM2835 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables the support for the BCM2835 timer driver.
+
+config BCM_KONA_TIMER
+ bool "BCM mobile timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables the support for the BCM Kona mobile timer driver.
+
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
@@ -141,6 +155,72 @@ config CLKSRC_DBX500_PRCMU
help
Use the always on PRCMU Timer as clocksource
+config CLPS711X_TIMER
+ bool "Cirrus logic timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Cirrus Logic PS711 timer.
+
+config ATLAS7_TIMER
+ bool "Atlas7 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Atlas7 timer.
+
+config MOXART_TIMER
+ bool "Moxart timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Moxart timer.
+
+config MXS_TIMER
+ bool "Mxs timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ select STMP_DEVICE
+ help
+ Enables support for the Mxs timer.
+
+config PRIMA2_TIMER
+ bool "Prima2 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Prima2 timer.
+
+config U300_TIMER
+ bool "U300 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on ARM
+ select CLKSRC_MMIO
+ help
+ Enables support for the U300 timer.
+
+config NSPIRE_TIMER
+ bool "NSpire timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Nspire timer.
+
+config KEYSTONE_TIMER
+ bool "Keystone timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on ARM || ARM64
+ select CLKSRC_MMIO
+ help
+ Enables support for the Keystone timer.
+
+config INTEGRATOR_AP_TIMER
+ bool "Integrator-ap timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Integrator-ap timer.
+
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
bool "Clocksource PRCMU Timer sched_clock"
depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
@@ -208,14 +288,16 @@ config ARM_ARCH_TIMER
select CLKSRC_ACPI if ACPI
config ARM_ARCH_TIMER_EVTSTREAM
- bool "Support for ARM architected timer event stream generation"
+ bool "Enable ARM architected timer event stream generation by default"
default y if ARM_ARCH_TIMER
depends on ARM_ARCH_TIMER
help
- This option enables support for event stream generation based on
- the ARM architected timer. It is used for waking up CPUs executing
- the wfe instruction at a frequency represented as a power-of-2
- divisor of the clock rate.
+ This option enables support by default for event stream generation
+ based on the ARM architected timer. It is used for waking up CPUs
+ executing the wfe instruction at a frequency represented as a
+ power-of-2 divisor of the clock rate. The behaviour can also be
+ overridden on the command line using the
+ clocksource.arm_arch_timer.evtstream parameter.
The main use of the event stream is wfe-based timeouts of userspace
locking implementations. It might also be useful for imposing timeout
on wfe to safeguard against any programming errors in case an expected
@@ -224,8 +306,9 @@ config ARM_ARCH_TIMER_EVTSTREAM
hardware anomalies of missing events.
config ARM_GLOBAL_TIMER
- bool
+ bool "Support for the ARM global timer" if COMPILE_TEST
select CLKSRC_OF if OF
+ depends on ARM
help
This options enables support for the ARM global timer unit
@@ -243,7 +326,7 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
Use ARM global timer clock source as sched_clock
config ARMV7M_SYSTICK
- bool
+ bool "Support for the ARMv7M system time" if COMPILE_TEST
select CLKSRC_OF if OF
select CLKSRC_MMIO
help
@@ -254,9 +337,12 @@ config ATMEL_PIT
def_bool SOC_AT91SAM9 || SOC_SAMA5
config ATMEL_ST
- bool
+ bool "Atmel ST timer support" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
select CLKSRC_OF
select MFD_SYSCON
+ help
+ Support for the Atmel ST timer.
config CLKSRC_METAG_GENERIC
def_bool y if METAG
@@ -270,7 +356,7 @@ config CLKSRC_EXYNOS_MCT
Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM
- bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
+ bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
@@ -293,6 +379,14 @@ config VF_PIT_TIMER
help
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+config OXNAS_RPS_TIMER
+ bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_OF
+ select CLKSRC_MMIO
+ help
+ This enables support for the Oxford Semiconductor OXNAS RPS timers.
+
config SYS_SUPPORTS_SH_CMT
bool
@@ -361,8 +455,8 @@ config CLKSRC_QCOM
Qualcomm SoCs.
config CLKSRC_VERSATILE
- bool "ARM Versatile (Express) reference platforms clock source"
- depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
+ bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
select CLKSRC_OF
default y if MFD_VEXPRESS_SYSREG
help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 473974f..fd9d6df 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -19,21 +19,21 @@ obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += time-orion.o
-obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
-obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o
-obj-$(CONFIG_ARCH_ATLAS7) += timer-atlas7.o
-obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
-obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
+obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
+obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
+obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
+obj-$(CONFIG_MOXART_TIMER) += moxart_timer.o
+obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
-obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
-obj-$(CONFIG_ARCH_U300) += timer-u300.o
+obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
+obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
-obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o
+obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o
+obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
+obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
@@ -55,8 +56,8 @@ obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
-obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
-obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
+obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
+obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4814446..5effd30 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -79,6 +79,14 @@ static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
+static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
+
+static int __init early_evtstrm_cfg(char *buf)
+{
+ return strtobool(buf, &evtstrm_enable);
+}
+early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
+
/*
* Architected system timer support.
*/
@@ -372,7 +380,7 @@ static int arch_timer_setup(struct clock_event_device *clk)
enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
arch_counter_set_user_access();
- if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+ if (evtstrm_enable)
arch_timer_configure_evtstream();
return 0;
@@ -693,25 +701,26 @@ arch_timer_needs_probing(int type, const struct of_device_id *matches)
return needs_probing;
}
-static void __init arch_timer_common_init(void)
+static int __init arch_timer_common_init(void)
{
unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
/* Wait until both nodes are probed if we have two timers */
if ((arch_timers_present & mask) != mask) {
if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
- return;
+ return 0;
if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
- return;
+ return 0;
}
arch_timer_banner(arch_timers_present);
arch_counter_register(arch_timers_present);
- arch_timer_arch_init();
+ return arch_timer_arch_init();
}
-static void __init arch_timer_init(void)
+static int __init arch_timer_init(void)
{
+ int ret;
/*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
@@ -739,23 +748,30 @@ static void __init arch_timer_init(void)
if (!has_ppi) {
pr_warn("arch_timer: No interrupt available, giving up\n");
- return;
+ return -EINVAL;
}
}
- arch_timer_register();
- arch_timer_common_init();
+ ret = arch_timer_register();
+ if (ret)
+ return ret;
+
+ ret = arch_timer_common_init();
+ if (ret)
+ return ret;
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
+
+ return 0;
}
-static void __init arch_timer_of_init(struct device_node *np)
+static int __init arch_timer_of_init(struct device_node *np)
{
int i;
if (arch_timers_present & ARCH_CP15_TIMER) {
pr_warn("arch_timer: multiple nodes in dt, skipping\n");
- return;
+ return 0;
}
arch_timers_present |= ARCH_CP15_TIMER;
@@ -774,23 +790,23 @@ static void __init arch_timer_of_init(struct device_node *np)
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = PHYS_SECURE_PPI;
- arch_timer_init();
+ return arch_timer_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
-static void __init arch_timer_mem_init(struct device_node *np)
+static int __init arch_timer_mem_init(struct device_node *np)
{
struct device_node *frame, *best_frame = NULL;
void __iomem *cntctlbase, *base;
- unsigned int irq;
+ unsigned int irq, ret = -EINVAL;
u32 cnttidr;
arch_timers_present |= ARCH_MEM_TIMER;
cntctlbase = of_iomap(np, 0);
if (!cntctlbase) {
pr_err("arch_timer: Can't find CNTCTLBase\n");
- return;
+ return -ENXIO;
}
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
@@ -830,6 +846,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
best_frame = of_node_get(frame);
}
+ ret= -ENXIO;
base = arch_counter_base = of_iomap(best_frame, 0);
if (!base) {
pr_err("arch_timer: Can't map frame's registers\n");
@@ -841,6 +858,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
else
irq = irq_of_parse_and_map(best_frame, 0);
+ ret = -EINVAL;
if (!irq) {
pr_err("arch_timer: Frame missing %s irq",
arch_timer_mem_use_virtual ? "virt" : "phys");
@@ -848,11 +866,15 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
arch_timer_detect_rate(base, np);
- arch_timer_mem_register(base, irq);
- arch_timer_common_init();
+ ret = arch_timer_mem_register(base, irq);
+ if (ret)
+ goto out;
+
+ return arch_timer_common_init();
out:
iounmap(cntctlbase);
of_node_put(best_frame);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 9df0d16..2a9ceb6 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -238,7 +238,7 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
-static void __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(void)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
@@ -249,7 +249,7 @@ static void __init gt_clocksource_init(void)
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
#endif
- clocksource_register_hz(&gt_clocksource, gt_clk_rate);
+ return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
}
static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
@@ -270,7 +270,7 @@ static struct notifier_block gt_cpu_nb = {
.notifier_call = gt_cpu_notify,
};
-static void __init global_timer_of_register(struct device_node *np)
+static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
int err = 0;
@@ -283,19 +283,19 @@ static void __init global_timer_of_register(struct device_node *np)
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
&& (read_cpuid_id() & 0xf0000f) < 0x200000) {
pr_warn("global-timer: non support for this cpu version.\n");
- return;
+ return -ENOSYS;
}
gt_ppi = irq_of_parse_and_map(np, 0);
if (!gt_ppi) {
pr_warn("global-timer: unable to parse irq\n");
- return;
+ return -EINVAL;
}
gt_base = of_iomap(np, 0);
if (!gt_base) {
pr_warn("global-timer: invalid base address\n");
- return;
+ return -ENXIO;
}
gt_clk = of_clk_get(np, 0);
@@ -332,11 +332,17 @@ static void __init global_timer_of_register(struct device_node *np)
}
/* Immediately configure the timer on the boot CPU */
- gt_clocksource_init();
- gt_clockevents_init(this_cpu_ptr(gt_evt));
+ err = gt_clocksource_init();
+ if (err)
+ goto out_irq;
+
+ err = gt_clockevents_init(this_cpu_ptr(gt_evt));
+ if (err)
+ goto out_irq;
+
gt_delay_timer_init();
- return;
+ return 0;
out_irq:
free_percpu_irq(gt_ppi, gt_evt);
@@ -347,6 +353,8 @@ out_clk:
out_unmap:
iounmap(gt_base);
WARN(err, "ARM Global timer register failed (%d)\n", err);
+
+ return err;
}
/* Only tested on r2p2 and r3p0 */
diff --git a/drivers/clocksource/armv7m_systick.c b/drivers/clocksource/armv7m_systick.c
index addfd2c..a315491 100644
--- a/drivers/clocksource/armv7m_systick.c
+++ b/drivers/clocksource/armv7m_systick.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk.h>
@@ -21,7 +22,7 @@
#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF
-static void __init system_timer_of_register(struct device_node *np)
+static int __init system_timer_of_register(struct device_node *np)
{
struct clk *clk = NULL;
void __iomem *base;
@@ -31,22 +32,26 @@ static void __init system_timer_of_register(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_warn("system-timer: invalid base address\n");
- return;
+ return -ENXIO;
}
ret = of_property_read_u32(np, "clock-frequency", &rate);
if (ret) {
clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
goto out_unmap;
+ }
ret = clk_prepare_enable(clk);
if (ret)
goto out_clk_put;
rate = clk_get_rate(clk);
- if (!rate)
+ if (!rate) {
+ ret = -EINVAL;
goto out_clk_disable;
+ }
}
writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR);
@@ -64,7 +69,7 @@ static void __init system_timer_of_register(struct device_node *np)
pr_info("ARM System timer initialized as clocksource\n");
- return;
+ return 0;
out_clk_disable:
clk_disable_unprepare(clk);
@@ -73,6 +78,8 @@ out_clk_put:
out_unmap:
iounmap(base);
pr_warn("ARM System timer register failed (%d)\n", ret);
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 217438d..1ba871b 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -184,7 +184,7 @@ static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
* Timer initialization
* ---------------------------------------------------------------------------
*/
-static void __init asm9260_timer_init(struct device_node *np)
+static int __init asm9260_timer_init(struct device_node *np)
{
int irq;
struct clk *clk;
@@ -192,20 +192,26 @@ static void __init asm9260_timer_init(struct device_node *np)
unsigned long rate;
priv.base = of_io_request_and_map(np, 0, np->name);
- if (IS_ERR(priv.base))
- panic("%s: unable to map resource", np->name);
+ if (IS_ERR(priv.base)) {
+ pr_err("%s: unable to map resource", np->name);
+ return PTR_ERR(priv.base);
+ }
clk = of_clk_get(np, 0);
ret = clk_prepare_enable(clk);
- if (ret)
- panic("Failed to enable clk!\n");
+ if (ret) {
+ pr_err("Failed to enable clk!\n");
+ return ret;
+ }
irq = irq_of_parse_and_map(np, 0);
ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER,
DRIVER_NAME, &event_dev);
- if (ret)
- panic("Failed to setup irq!\n");
+ if (ret) {
+ pr_err("Failed to setup irq!\n");
+ return ret;
+ }
/* set all timers for count-up */
writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR);
@@ -229,6 +235,8 @@ static void __init asm9260_timer_init(struct device_node *np)
priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
event_dev.cpumask = cpumask_of(0);
clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
asm9260_timer_init);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 6f28229..e71acf2 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -80,19 +80,24 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
}
}
-static void __init bcm2835_timer_init(struct device_node *node)
+static int __init bcm2835_timer_init(struct device_node *node)
{
void __iomem *base;
u32 freq;
- int irq;
+ int irq, ret;
struct bcm2835_timer *timer;
base = of_iomap(node, 0);
- if (!base)
- panic("Can't remap registers");
+ if (!base) {
+ pr_err("Can't remap registers");
+ return -ENXIO;
+ }
- if (of_property_read_u32(node, "clock-frequency", &freq))
- panic("Can't read clock-frequency");
+ ret = of_property_read_u32(node, "clock-frequency", &freq);
+ if (ret) {
+ pr_err("Can't read clock-frequency");
+ return ret;
+ }
system_clock = base + REG_COUNTER_LO;
sched_clock_register(bcm2835_sched_read, 32, freq);
@@ -101,12 +106,16 @@ static void __init bcm2835_timer_init(struct device_node *node)
freq, 300, 32, clocksource_mmio_readl_up);
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
- if (!timer)
- panic("Can't allocate timer struct\n");
+ if (!timer) {
+ pr_err("Can't allocate timer struct\n");
+ return -ENOMEM;
+ }
timer->control = base + REG_CONTROL;
timer->compare = base + REG_COMPARE(DEFAULT_TIMER);
@@ -121,12 +130,17 @@ static void __init bcm2835_timer_init(struct device_node *node)
timer->act.dev_id = timer;
timer->act.handler = bcm2835_time_interrupt;
- if (setup_irq(irq, &timer->act))
- panic("Can't set up timer IRQ\n");
+ ret = setup_irq(irq, &timer->act);
+ if (ret) {
+ pr_err("Can't set up timer IRQ\n");
+ return ret;
+ }
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
pr_info("bcm2835: system timer (irq = %d)\n", irq);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
bcm2835_timer_init);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index e717e87..7e3fd37 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -20,7 +20,6 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <asm/mach/time.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -163,16 +162,11 @@ static struct irqaction kona_timer_irq = {
.handler = kona_timer_interrupt,
};
-static void __init kona_timer_init(struct device_node *node)
+static int __init kona_timer_init(struct device_node *node)
{
u32 freq;
struct clk *external_clk;
- if (!of_device_is_available(node)) {
- pr_info("Kona Timer v1 marked as disabled in device tree\n");
- return;
- }
-
external_clk = of_clk_get_by_name(node, NULL);
if (!IS_ERR(external_clk)) {
@@ -182,7 +176,7 @@ static void __init kona_timer_init(struct device_node *node)
arch_timer_rate = freq;
} else {
pr_err("Kona Timer v1 unable to determine clock-frequency");
- return;
+ return -EINVAL;
}
/* Setup IRQ numbers */
@@ -196,6 +190,8 @@ static void __init kona_timer_init(struct device_node *node)
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 9be6018..fbfbdec 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -322,22 +322,22 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
+static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
u32 timer_width)
{
struct ttc_timer_clocksource *ttccs;
int err;
ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
- if (WARN_ON(!ttccs))
- return;
+ if (!ttccs)
+ return -ENOMEM;
ttccs->ttc.clk = clk;
err = clk_prepare_enable(ttccs->ttc.clk);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttccs);
- return;
+ return err;
}
ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
@@ -345,8 +345,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
ttccs->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clocksource_cb;
ttccs->ttc.clk_rate_change_nb.next = NULL;
- if (clk_notifier_register(ttccs->ttc.clk,
- &ttccs->ttc.clk_rate_change_nb))
+
+ err = clk_notifier_register(ttccs->ttc.clk,
+ &ttccs->ttc.clk_rate_change_nb);
+ if (err)
pr_warn("Unable to register clock notifier.\n");
ttccs->ttc.base_addr = base;
@@ -368,14 +370,16 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttccs);
- return;
+ return err;
}
ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
sched_clock_register(ttc_sched_clock_read, timer_width,
ttccs->ttc.freq / PRESCALE);
+
+ return 0;
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -401,30 +405,35 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
}
}
-static void __init ttc_setup_clockevent(struct clk *clk,
- void __iomem *base, u32 irq)
+static int __init ttc_setup_clockevent(struct clk *clk,
+ void __iomem *base, u32 irq)
{
struct ttc_timer_clockevent *ttcce;
int err;
ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
- if (WARN_ON(!ttcce))
- return;
+ if (!ttcce)
+ return -ENOMEM;
ttcce->ttc.clk = clk;
err = clk_prepare_enable(ttcce->ttc.clk);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttcce);
- return;
+ return err;
}
ttcce->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clockevent_cb;
ttcce->ttc.clk_rate_change_nb.next = NULL;
- if (clk_notifier_register(ttcce->ttc.clk,
- &ttcce->ttc.clk_rate_change_nb))
+
+ err = clk_notifier_register(ttcce->ttc.clk,
+ &ttcce->ttc.clk_rate_change_nb);
+ if (err) {
pr_warn("Unable to register clock notifier.\n");
+ return err;
+ }
+
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
ttcce->ttc.base_addr = base;
@@ -451,13 +460,15 @@ static void __init ttc_setup_clockevent(struct clk *clk,
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttcce);
- return;
+ return err;
}
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
+
+ return 0;
}
/**
@@ -466,17 +477,17 @@ static void __init ttc_setup_clockevent(struct clk *clk,
* Initializes the timer hardware and register the clock source and clock event
* timers with Linux kernal timer framework
*/
-static void __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_init(struct device_node *timer)
{
unsigned int irq;
void __iomem *timer_baseaddr;
struct clk *clk_cs, *clk_ce;
static int initialized;
- int clksel;
+ int clksel, ret;
u32 timer_width = 16;
if (initialized)
- return;
+ return 0;
initialized = 1;
@@ -488,13 +499,13 @@ static void __init ttc_timer_init(struct device_node *timer)
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
pr_err("ERROR: invalid timer base address\n");
- BUG();
+ return -ENXIO;
}
irq = irq_of_parse_and_map(timer, 1);
if (irq <= 0) {
pr_err("ERROR: invalid interrupt number\n");
- BUG();
+ return -EINVAL;
}
of_property_read_u32(timer, "timer-width", &timer_width);
@@ -504,7 +515,7 @@ static void __init ttc_timer_init(struct device_node *timer)
clk_cs = of_clk_get(timer, clksel);
if (IS_ERR(clk_cs)) {
pr_err("ERROR: timer input clock not found\n");
- BUG();
+ return PTR_ERR(clk_cs);
}
clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
@@ -512,13 +523,20 @@ static void __init ttc_timer_init(struct device_node *timer)
clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) {
pr_err("ERROR: timer input clock not found\n");
- BUG();
+ return PTR_ERR(clk_ce);
}
- ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
- ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+ ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
+ if (ret)
+ return ret;
+
+ ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+ if (ret)
+ return ret;
pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index dfad6eb..77a365f 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -64,7 +64,7 @@ static u64 notrace dbx500_prcmu_sched_clock_read(void)
#endif
-static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
+static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
{
clksrc_dbx500_timer_base = of_iomap(node, 0);
@@ -84,7 +84,7 @@ static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
#endif
- clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
+ return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
clksrc_dbx500_prcmu_init);
diff --git a/drivers/clocksource/clksrc-probe.c b/drivers/clocksource/clksrc-probe.c
index 7cb6c92..bc62be9 100644
--- a/drivers/clocksource/clksrc-probe.c
+++ b/drivers/clocksource/clksrc-probe.c
@@ -28,15 +28,23 @@ void __init clocksource_probe(void)
{
struct device_node *np;
const struct of_device_id *match;
- of_init_fn_1 init_func;
+ of_init_fn_1_ret init_func_ret;
unsigned clocksources = 0;
+ int ret;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
if (!of_device_is_available(np))
continue;
- init_func = match->data;
- init_func(np);
+ init_func_ret = match->data;
+
+ ret = init_func_ret(np);
+ if (ret) {
+ pr_err("Failed to initialize '%s': %d",
+ of_node_full_name(np), ret);
+ continue;
+ }
+
clocksources++;
}
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
index 65ec467..03cc492 100644
--- a/drivers/clocksource/clksrc_st_lpc.c
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -92,7 +92,7 @@ static int __init st_clksrc_setup_clk(struct device_node *np)
return 0;
}
-static void __init st_clksrc_of_register(struct device_node *np)
+static int __init st_clksrc_of_register(struct device_node *np)
{
int ret;
uint32_t mode;
@@ -100,32 +100,36 @@ static void __init st_clksrc_of_register(struct device_node *np)
ret = of_property_read_u32(np, "st,lpc-mode", &mode);
if (ret) {
pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
- return;
+ return ret;
}
/* LPC can either run as a Clocksource or in RTC or WDT mode */
if (mode != ST_LPC_MODE_CLKSRC)
- return;
+ return 0;
ddata.base = of_iomap(np, 0);
if (!ddata.base) {
pr_err("clksrc-st-lpc: Unable to map iomem\n");
- return;
+ return -ENXIO;
}
- if (st_clksrc_setup_clk(np)) {
+ ret = st_clksrc_setup_clk(np);
+ if (ret) {
iounmap(ddata.base);
- return;
+ return ret;
}
- if (st_clksrc_init()) {
+ ret = st_clksrc_init();
+ if (ret) {
clk_disable_unprepare(ddata.clk);
clk_put(ddata.clk);
iounmap(ddata.base);
- return;
+ return ret;
}
pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
clk_get_rate(ddata.clk));
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index cdd86e3..84aed78 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -104,7 +104,7 @@ void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
}
#ifdef CONFIG_CLKSRC_OF
-static void __init clps711x_timer_init(struct device_node *np)
+static int __init clps711x_timer_init(struct device_node *np)
{
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
@@ -112,13 +112,11 @@ static void __init clps711x_timer_init(struct device_node *np)
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
- BUG_ON(_clps711x_clksrc_init(clock, base));
- break;
+ return _clps711x_clksrc_init(clock, base);
case CLPS711X_CLKSRC_CLOCKEVENT:
- BUG_ON(_clps711x_clkevt_init(clock, base, irq));
- break;
+ return _clps711x_clkevt_init(clock, base, irq);
default:
- break;
+ return -EINVAL;
}
}
CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 860843c..aee6c0d 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -143,7 +143,7 @@ static struct delay_timer dw_apb_delay_timer = {
#endif
static int num_called;
-static void __init dw_apb_timer_init(struct device_node *timer)
+static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
case 0:
@@ -164,6 +164,8 @@ static void __init dw_apb_timer_init(struct device_node *timer)
}
num_called++;
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index be09bc0..0d18dd4b 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -232,7 +232,7 @@ static cycles_t exynos4_read_current_timer(void)
return exynos4_read_count_32();
}
-static void __init exynos4_clocksource_init(void)
+static int __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start();
@@ -244,6 +244,8 @@ static void __init exynos4_clocksource_init(void)
panic("%s: can't register clocksource\n", mct_frc.name);
sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
+
+ return 0;
}
static void exynos4_mct_comp0_stop(void)
@@ -335,12 +337,14 @@ static struct irqaction mct_comp_event_irq = {
.dev_id = &mct_comp_device,
};
-static void exynos4_clockevent_init(void)
+static int exynos4_clockevent_init(void)
{
mct_comp_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&mct_comp_device, clk_rate,
0xf, 0xffffffff);
setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
+
+ return 0;
}
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
@@ -516,7 +520,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
.notifier_call = exynos4_mct_cpu_notify,
};
-static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
{
int err, cpu;
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
@@ -572,15 +576,17 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
/* Immediately configure the timer on the boot CPU */
exynos4_local_timer_setup(mevt);
- return;
+ return 0;
out_irq:
free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ return err;
}
-static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
+static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
u32 nr_irqs, i;
+ int ret;
mct_int_type = int_type;
@@ -600,18 +606,24 @@ static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
- exynos4_timer_resources(np, of_iomap(np, 0));
- exynos4_clocksource_init();
- exynos4_clockevent_init();
+ ret = exynos4_timer_resources(np, of_iomap(np, 0));
+ if (ret)
+ return ret;
+
+ ret = exynos4_clocksource_init();
+ if (ret)
+ return ret;
+
+ return exynos4_clockevent_init();
}
-static void __init mct_init_spi(struct device_node *np)
+static int __init mct_init_spi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_SPI);
}
-static void __init mct_init_ppi(struct device_node *np)
+static int __init mct_init_ppi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_PPI);
}
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 517e1c7..738515b 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -316,15 +316,16 @@ static int __init ftm_calc_closest_round_cyc(unsigned long freq)
return 0;
}
-static void __init ftm_timer_init(struct device_node *np)
+static int __init ftm_timer_init(struct device_node *np)
{
unsigned long freq;
- int irq;
+ int ret, irq;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
- return;
+ return -ENOMEM;
+ ret = -ENXIO;
priv->clkevt_base = of_iomap(np, 0);
if (!priv->clkevt_base) {
pr_err("ftm: unable to map event timer registers\n");
@@ -337,6 +338,7 @@ static void __init ftm_timer_init(struct device_node *np)
goto err;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
@@ -349,18 +351,22 @@ static void __init ftm_timer_init(struct device_node *np)
if (!freq)
goto err;
- if (ftm_calc_closest_round_cyc(freq))
+ ret = ftm_calc_closest_round_cyc(freq);
+ if (ret)
goto err;
- if (ftm_clocksource_init(freq))
+ ret = ftm_clocksource_init(freq);
+ if (ret)
goto err;
- if (ftm_clockevent_init(freq, irq))
+ ret = ftm_clockevent_init(freq, irq);
+ if (ret)
goto err;
- return;
+ return 0;
err:
kfree(priv);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index 75c4407..07d9d5b 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -126,7 +126,7 @@ static struct timer16_priv timer16_priv = {
#define REG_CH 0
#define REG_COMM 1
-static void __init h8300_16timer_init(struct device_node *node)
+static int __init h8300_16timer_init(struct device_node *node)
{
void __iomem *base[2];
int ret, irq;
@@ -136,9 +136,10 @@ static void __init h8300_16timer_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
- return;
+ return PTR_ERR(clk);
}
+ ret = -ENXIO;
base[REG_CH] = of_iomap(node, 0);
if (!base[REG_CH]) {
pr_err("failed to map registers for clocksource\n");
@@ -151,6 +152,7 @@ static void __init h8300_16timer_init(struct device_node *node)
goto unmap_ch;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
@@ -174,7 +176,7 @@ static void __init h8300_16timer_init(struct device_node *node)
clocksource_register_hz(&timer16_priv.cs,
clk_get_rate(clk) / 8);
- return;
+ return 0;
unmap_comm:
iounmap(base[REG_COMM]);
@@ -182,6 +184,8 @@ unmap_ch:
iounmap(base[REG_CH]);
free_clk:
clk_put(clk);
+ return ret;
}
-CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init);
+CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer",
+ h8300_16timer_init);
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index c151941..546bb18 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -164,24 +164,26 @@ static struct timer8_priv timer8_priv = {
},
};
-static void __init h8300_8timer_init(struct device_node *node)
+static int __init h8300_8timer_init(struct device_node *node)
{
void __iomem *base;
- int irq;
+ int irq, ret;
struct clk *clk;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clockevent\n");
- return;
+ return PTR_ERR(clk);
}
+ ret = ENXIO;
base = of_iomap(node, 0);
if (!base) {
pr_err("failed to map registers for clockevent\n");
goto free_clk;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
@@ -205,11 +207,12 @@ static void __init h8300_8timer_init(struct device_node *node)
clockevents_config_and_register(&timer8_priv.ced,
timer8_priv.rate, 1, 0x0000ffff);
- return;
+ return 0;
unmap_reg:
iounmap(base);
free_clk:
clk_put(clk);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index d4c1a28..7bdf199 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -119,15 +119,16 @@ static struct tpu_priv tpu_priv = {
#define CH_L 0
#define CH_H 1
-static void __init h8300_tpu_init(struct device_node *node)
+static int __init h8300_tpu_init(struct device_node *node)
{
void __iomem *base[2];
struct clk *clk;
+ int ret = -ENXIO;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
- return;
+ return PTR_ERR(clk);
}
base[CH_L] = of_iomap(node, CH_L);
@@ -144,14 +145,13 @@ static void __init h8300_tpu_init(struct device_node *node)
tpu_priv.mapbase1 = base[CH_L];
tpu_priv.mapbase2 = base[CH_H];
- clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
-
- return;
+ return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
unmap_L:
iounmap(base[CH_H]);
free_clk:
clk_put(clk);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c
index 1fa22c4..52af591 100644
--- a/drivers/clocksource/meson6_timer.c
+++ b/drivers/clocksource/meson6_timer.c
@@ -126,18 +126,22 @@ static struct irqaction meson6_timer_irq = {
.dev_id = &meson6_clockevent,
};
-static void __init meson6_timer_init(struct device_node *node)
+static int __init meson6_timer_init(struct device_node *node)
{
u32 val;
int ret, irq;
timer_base = of_io_request_and_map(node, 0, "meson6-timer");
- if (IS_ERR(timer_base))
- panic("Can't map registers");
+ if (IS_ERR(timer_base)) {
+ pr_err("Can't map registers");
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
/* Set 1us for timer E */
val = readl(timer_base + TIMER_ISA_MUX);
@@ -158,14 +162,17 @@ static void __init meson6_timer_init(struct device_node *node)
meson6_clkevt_time_stop(CED_ID);
ret = setup_irq(irq, &meson6_timer_irq);
- if (ret)
+ if (ret) {
pr_warn("failed to setup irq %d\n", irq);
+ return ret;
+ }
meson6_clockevent.cpumask = cpu_possible_mask;
meson6_clockevent.irq = irq;
clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
1, 0xfffe);
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer",
meson6_timer_init);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 89d3e4d..1572c7a 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -146,7 +146,7 @@ static struct clocksource gic_clocksource = {
.archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC },
};
-static void __init __gic_clocksource_init(void)
+static int __init __gic_clocksource_init(void)
{
int ret;
@@ -159,6 +159,8 @@ static void __init __gic_clocksource_init(void)
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
pr_warn("GIC: Unable to register clocksource\n");
+
+ return ret;
}
void __init gic_clocksource_init(unsigned int frequency)
@@ -179,31 +181,35 @@ static void __init gic_clocksource_of_init(struct device_node *node)
struct clk *clk;
int ret;
- if (WARN_ON(!gic_present || !node->parent ||
- !of_device_is_compatible(node->parent, "mti,gic")))
- return;
+ if (!gic_present || !node->parent ||
+ !of_device_is_compatible(node->parent, "mti,gic")) {
+ pr_warn("No DT definition for the mips gic driver");
+ return -ENXIO;
+ }
clk = of_clk_get(node, 0);
if (!IS_ERR(clk)) {
if (clk_prepare_enable(clk) < 0) {
pr_err("GIC failed to enable clock\n");
clk_put(clk);
- return;
+ return PTR_ERR(clk);
}
gic_frequency = clk_get_rate(clk);
} else if (of_property_read_u32(node, "clock-frequency",
&gic_frequency)) {
pr_err("GIC frequency not specified.\n");
- return;
+ return -EINVAL;;
}
gic_timer_irq = irq_of_parse_and_map(node, 0);
if (!gic_timer_irq) {
pr_err("GIC timer IRQ not specified.\n");
- return;
+ return -EINVAL;;
}
- __gic_clocksource_init();
+ ret = __gic_clocksource_init();
+ if (ret)
+ return ret;
ret = gic_clockevent_init();
if (!ret && !IS_ERR(clk)) {
@@ -213,6 +219,8 @@ static void __init gic_clocksource_of_init(struct device_node *node)
/* And finally start the counter */
gic_start_count();
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
gic_clocksource_of_init);
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
index 19857af..8414544 100644
--- a/drivers/clocksource/moxart_timer.c
+++ b/drivers/clocksource/moxart_timer.c
@@ -119,34 +119,45 @@ static struct irqaction moxart_timer_irq = {
.dev_id = &moxart_clockevent,
};
-static void __init moxart_timer_init(struct device_node *node)
+static int __init moxart_timer_init(struct device_node *node)
{
int ret, irq;
unsigned long pclk;
struct clk *clk;
base = of_iomap(node, 0);
- if (!base)
- panic("%s: of_iomap failed\n", node->full_name);
+ if (!base) {
+ pr_err("%s: of_iomap failed\n", node->full_name);
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("%s: irq_of_parse_and_map failed\n", node->full_name);
+ if (irq <= 0) {
+ pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
+ return -EINVAL;
+ }
ret = setup_irq(irq, &moxart_timer_irq);
- if (ret)
- panic("%s: setup_irq failed\n", node->full_name);
+ if (ret) {
+ pr_err("%s: setup_irq failed\n", node->full_name);
+ return ret;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("%s: of_clk_get failed\n", node->full_name);
+ if (IS_ERR(clk)) {
+ pr_err("%s: of_clk_get failed\n", node->full_name);
+ return PTR_ERR(clk);
+ }
pclk = clk_get_rate(clk);
- if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
- "moxart_timer", pclk, 200, 32,
- clocksource_mmio_readl_down))
- panic("%s: clocksource_mmio_init failed\n", node->full_name);
+ ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
+ "moxart_timer", pclk, 200, 32,
+ clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
+ return ret;
+ }
clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
@@ -164,5 +175,7 @@ static void __init moxart_timer_init(struct device_node *node)
*/
clockevents_config_and_register(&moxart_clockevent, pclk,
0x4, 0xfffffffe);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
index 3d33a5e..3e4431e 100644
--- a/drivers/clocksource/mps2-timer.c
+++ b/drivers/clocksource/mps2-timer.c
@@ -250,7 +250,7 @@ out:
return ret;
}
-static void __init mps2_timer_init(struct device_node *np)
+static int __init mps2_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
int ret;
@@ -259,7 +259,7 @@ static void __init mps2_timer_init(struct device_node *np)
ret = mps2_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -267,9 +267,11 @@ static void __init mps2_timer_init(struct device_node *np)
ret = mps2_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 7e583f8..9065949 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -181,7 +181,7 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
evt->gpt_base + GPT_IRQ_EN_REG);
}
-static void __init mtk_timer_init(struct device_node *node)
+static int __init mtk_timer_init(struct device_node *node)
{
struct mtk_clock_event_device *evt;
struct resource res;
@@ -190,7 +190,7 @@ static void __init mtk_timer_init(struct device_node *node)
evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt)
- return;
+ return -ENOMEM;
evt->dev.name = "mtk_tick";
evt->dev.rating = 300;
@@ -248,7 +248,7 @@ static void __init mtk_timer_init(struct device_node *node)
mtk_timer_enable_irq(evt, GPT_CLK_EVT);
- return;
+ return 0;
err_clk_disable:
clk_disable_unprepare(clk);
@@ -262,5 +262,7 @@ err_mem:
release_mem_region(res.start, resource_size(&res));
err_kzalloc:
kfree(evt);
+
+ return -EINVAL;
}
CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index f5ce296..0ba0a91 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -31,8 +31,6 @@
#include <linux/stmp_device.h>
#include <linux/sched_clock.h>
-#include <asm/mach/time.h>
-
/*
* There are 2 versions of the timrot on Freescale MXS-based SoCs.
* The v1 on MX23 only gets 16 bits counter, while v2 on MX28
@@ -226,10 +224,10 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
return 0;
}
-static void __init mxs_timer_init(struct device_node *np)
+static int __init mxs_timer_init(struct device_node *np)
{
struct clk *timer_clk;
- int irq;
+ int irq, ret;
mxs_timrot_base = of_iomap(np, 0);
WARN_ON(!mxs_timrot_base);
@@ -237,10 +235,12 @@ static void __init mxs_timer_init(struct device_node *np)
timer_clk = of_clk_get(np, 0);
if (IS_ERR(timer_clk)) {
pr_err("%s: failed to get clk\n", __func__);
- return;
+ return PTR_ERR(timer_clk);
}
- clk_prepare_enable(timer_clk);
+ ret = clk_prepare_enable(timer_clk);
+ if (ret)
+ return ret;
/*
* Initialize timers to a known state
@@ -278,11 +278,19 @@ static void __init mxs_timer_init(struct device_node *np)
mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
/* init and register the timer to the framework */
- mxs_clocksource_init(timer_clk);
- mxs_clockevent_init(timer_clk);
+ ret = mxs_clocksource_init(timer_clk);
+ if (ret)
+ return ret;
+
+ ret = mxs_clockevent_init(timer_clk);
+ if (ret)
+ return ret;
/* Make irqs happen */
irq = irq_of_parse_and_map(np, 0);
- setup_irq(irq, &mxs_timer_irq);
+ if (irq <= 0)
+ return -EINVAL;
+
+ return setup_irq(irq, &mxs_timer_irq);
}
CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index bc8dd44..3c124d1 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -193,10 +193,11 @@ static struct irqaction nmdk_timer_irq = {
.dev_id = &nmdk_clkevt,
};
-static void __init nmdk_timer_init(void __iomem *base, int irq,
+static int __init nmdk_timer_init(void __iomem *base, int irq,
struct clk *pclk, struct clk *clk)
{
unsigned long rate;
+ int ret;
mtu_base = base;
@@ -226,10 +227,12 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
/* Timer 0 is the free running clocksource */
nmdk_clksrc_reset();
- if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
- rate, 200, 32, clocksource_mmio_readl_down))
- pr_err("timer: failed to initialize clock source %s\n",
- "mtu_0");
+ ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
+ rate, 200, 32, clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("timer: failed to initialize clock source %s\n", "mtu_0");
+ return ret;
+ }
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
sched_clock_register(nomadik_read_sched_clock, 32, rate);
@@ -244,9 +247,11 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
mtu_delay_timer.freq = rate;
register_current_timer_delay(&mtu_delay_timer);
+
+ return 0;
}
-static void __init nmdk_timer_of_init(struct device_node *node)
+static int __init nmdk_timer_of_init(struct device_node *node)
{
struct clk *pclk;
struct clk *clk;
@@ -254,22 +259,30 @@ static void __init nmdk_timer_of_init(struct device_node *node)
int irq;
base = of_iomap(node, 0);
- if (!base)
- panic("Can't remap registers");
+ if (!base) {
+ pr_err("Can't remap registers");
+ return -ENXIO;
+ }
pclk = of_clk_get_by_name(node, "apb_pclk");
- if (IS_ERR(pclk))
- panic("could not get apb_pclk");
+ if (IS_ERR(pclk)) {
+ pr_err("could not get apb_pclk");
+ return PTR_ERR(pclk);
+ }
clk = of_clk_get_by_name(node, "timclk");
- if (IS_ERR(clk))
- panic("could not get timclk");
+ if (IS_ERR(clk)) {
+ pr_err("could not get timclk");
+ return PTR_ERR(clk);
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
- nmdk_timer_init(base, irq, pclk, clk);
+ return nmdk_timer_init(base, irq, pclk, clk);
}
CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
nmdk_timer_of_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 45b6a49..937e10b 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -150,8 +150,10 @@ static struct irqaction pxa_ost0_irq = {
.dev_id = &ckevt_pxa_osmr0,
};
-static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
{
+ int ret;
+
timer_writel(0, OIER);
timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
@@ -159,39 +161,57 @@ static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
ckevt_pxa_osmr0.cpumask = cpumask_of(0);
- setup_irq(irq, &pxa_ost0_irq);
+ ret = setup_irq(irq, &pxa_ost0_irq);
+ if (ret) {
+ pr_err("Failed to setup irq");
+ return ret;
+ }
+
+ ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
+ 32, clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("Failed to init clocksource");
+ return ret;
+ }
- clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
- 32, clocksource_mmio_readl_up);
clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
MIN_OSCR_DELTA * 2, 0x7fffffff);
+
+ return 0;
}
-static void __init pxa_timer_dt_init(struct device_node *np)
+static int __init pxa_timer_dt_init(struct device_node *np)
{
struct clk *clk;
- int irq;
+ int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
- if (!timer_base)
- panic("%s: unable to map resource\n", np->name);
+ if (!timer_base) {
+ pr_err("%s: unable to map resource\n", np->name);
+ return -ENXIO;
+ }
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_crit("%s: unable to get clk\n", np->name);
- return;
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_crit("Failed to prepare clock");
+ return ret;
}
- clk_prepare_enable(clk);
/* we are only interested in OS-timer0 irq */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
- return;
+ return -EINVAL;
}
- pxa_timer_common_init(irq, clk_get_rate(clk));
+ return pxa_timer_common_init(irq, clk_get_rate(clk));
}
CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index f8e09f9..6625763 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -178,7 +178,7 @@ static struct delay_timer msm_delay_timer = {
.read_current_timer = msm_read_current_timer,
};
-static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
+static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
bool percpu)
{
struct clocksource *cs = &msm_clocksource;
@@ -218,12 +218,14 @@ err:
sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
msm_delay_timer.freq = dgt_hz;
register_current_timer_delay(&msm_delay_timer);
+
+ return res;
}
-static void __init msm_dt_timer_init(struct device_node *np)
+static int __init msm_dt_timer_init(struct device_node *np)
{
u32 freq;
- int irq;
+ int irq, ret;
struct resource res;
u32 percpu_offset;
void __iomem *base;
@@ -232,34 +234,35 @@ static void __init msm_dt_timer_init(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_err("Failed to map event base\n");
- return;
+ return -ENXIO;
}
/* We use GPT0 for the clockevent */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
pr_err("Can't get irq\n");
- return;
+ return -EINVAL;
}
/* We use CPU0's DGT for the clocksource */
if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
percpu_offset = 0;
- if (of_address_to_resource(np, 0, &res)) {
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
pr_err("Failed to parse DGT resource\n");
- return;
+ return ret;
}
cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
if (!cpu0_base) {
pr_err("Failed to map source base\n");
- return;
+ return -EINVAL;
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
pr_err("Unknown frequency\n");
- return;
+ return -EINVAL;
}
event_base = base + 0x4;
@@ -268,7 +271,7 @@ static void __init msm_dt_timer_init(struct device_node *np)
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
- msm_timer_init(freq, 32, irq, !!percpu_offset);
+ return msm_timer_init(freq, 32, irq, !!percpu_offset);
}
CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index b991b28..23e267a 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -19,7 +19,8 @@
#define TIMER_LOAD_COUNT0 0x00
#define TIMER_LOAD_COUNT1 0x04
-#define TIMER_CONTROL_REG 0x10
+#define TIMER_CONTROL_REG3288 0x10
+#define TIMER_CONTROL_REG3399 0x1c
#define TIMER_INT_STATUS 0x18
#define TIMER_DISABLE 0x0
@@ -31,6 +32,7 @@
struct bc_timer {
struct clock_event_device ce;
void __iomem *base;
+ void __iomem *ctrl;
u32 freq;
};
@@ -46,15 +48,20 @@ static inline void __iomem *rk_base(struct clock_event_device *ce)
return rk_timer(ce)->base;
}
+static inline void __iomem *rk_ctrl(struct clock_event_device *ce)
+{
+ return rk_timer(ce)->ctrl;
+}
+
static inline void rk_timer_disable(struct clock_event_device *ce)
{
- writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG);
+ writel_relaxed(TIMER_DISABLE, rk_ctrl(ce));
}
static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags)
{
writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags,
- rk_base(ce) + TIMER_CONTROL_REG);
+ rk_ctrl(ce));
}
static void rk_timer_update_counter(unsigned long cycles,
@@ -106,37 +113,42 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __init rk_timer_init(struct device_node *np)
+static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg)
{
struct clock_event_device *ce = &bc_timer.ce;
struct clk *timer_clk;
struct clk *pclk;
- int ret, irq;
+ int ret = -EINVAL, irq;
bc_timer.base = of_iomap(np, 0);
if (!bc_timer.base) {
pr_err("Failed to get base address for '%s'\n", TIMER_NAME);
- return;
+ return -ENXIO;
}
+ bc_timer.ctrl = bc_timer.base + ctrl_reg;
pclk = of_clk_get_by_name(np, "pclk");
if (IS_ERR(pclk)) {
+ ret = PTR_ERR(pclk);
pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
- if (clk_prepare_enable(pclk)) {
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) {
+ ret = PTR_ERR(timer_clk);
pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
goto out_timer_clk;
}
- if (clk_prepare_enable(timer_clk)) {
+ ret = clk_prepare_enable(timer_clk);
+ if (ret) {
pr_err("Failed to enable timer clock\n");
goto out_timer_clk;
}
@@ -145,17 +157,19 @@ static void __init rk_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
+ ret = -EINVAL;
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
goto out_irq;
}
ce->name = TIMER_NAME;
- ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ;
ce->set_next_event = rk_timer_set_next_event;
ce->set_state_shutdown = rk_timer_shutdown;
ce->set_state_periodic = rk_timer_set_periodic;
ce->irq = irq;
- ce->cpumask = cpumask_of(0);
+ ce->cpumask = cpu_possible_mask;
ce->rating = 250;
rk_timer_interrupt_clear(ce);
@@ -169,7 +183,7 @@ static void __init rk_timer_init(struct device_node *np)
clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
- return;
+ return 0;
out_irq:
clk_disable_unprepare(timer_clk);
@@ -177,6 +191,21 @@ out_timer_clk:
clk_disable_unprepare(pclk);
out_unmap:
iounmap(bc_timer.base);
+
+ return ret;
+}
+
+static int __init rk3288_timer_init(struct device_node *np)
+{
+ return rk_timer_init(np, TIMER_CONTROL_REG3288);
+}
+
+static int __init rk3399_timer_init(struct device_node *np)
+{
+ return rk_timer_init(np, TIMER_CONTROL_REG3399);
}
-CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
+CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer",
+ rk3288_timer_init);
+CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer",
+ rk3399_timer_init);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 9502bc4..54565bd 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -130,9 +130,9 @@ static void samsung_time_stop(unsigned int channel)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_START(channel);
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -148,14 +148,14 @@ static void samsung_time_setup(unsigned int channel, unsigned long tcnt)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan));
tcon |= TCON_MANUALUPDATE(tcon_chan);
- __raw_writel(tcnt, pwm.base + REG_TCNTB(channel));
- __raw_writel(tcnt, pwm.base + REG_TCMPB(channel));
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel));
+ writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel));
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -170,7 +170,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_MANUALUPDATE(channel);
tcon |= TCON_START(channel);
@@ -180,7 +180,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
else
tcon &= ~TCON_AUTORELOAD(channel);
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -333,11 +333,10 @@ static u64 notrace samsung_read_sched_clock(void)
return samsung_clocksource_read(NULL);
}
-static void __init samsung_clocksource_init(void)
+static int __init samsung_clocksource_init(void)
{
unsigned long pclk;
unsigned long clock_rate;
- int ret;
pclk = clk_get_rate(pwm.timerclk);
@@ -358,9 +357,7 @@ static void __init samsung_clocksource_init(void)
pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
- ret = clocksource_register_hz(&samsung_clocksource, clock_rate);
- if (ret)
- panic("samsung_clocksource_timer: can't register clocksource\n");
+ return clocksource_register_hz(&samsung_clocksource, clock_rate);
}
static void __init samsung_timer_resources(void)
@@ -380,26 +377,31 @@ static void __init samsung_timer_resources(void)
/*
* PWM master driver
*/
-static void __init _samsung_pwm_clocksource_init(void)
+static int __init _samsung_pwm_clocksource_init(void)
{
u8 mask;
int channel;
mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1);
channel = fls(mask) - 1;
- if (channel < 0)
- panic("failed to find PWM channel for clocksource");
+ if (channel < 0) {
+ pr_crit("failed to find PWM channel for clocksource");
+ return -EINVAL;
+ }
pwm.source_id = channel;
mask &= ~(1 << channel);
channel = fls(mask) - 1;
- if (channel < 0)
- panic("failed to find PWM channel for clock event");
+ if (channel < 0) {
+ pr_crit("failed to find PWM channel for clock event");
+ return -EINVAL;
+ }
pwm.event_id = channel;
samsung_timer_resources();
samsung_clockevent_init();
- samsung_clocksource_init();
+
+ return samsung_clocksource_init();
}
void __init samsung_pwm_clocksource_init(void __iomem *base,
@@ -417,8 +419,8 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
}
#ifdef CONFIG_CLKSRC_OF
-static void __init samsung_pwm_alloc(struct device_node *np,
- const struct samsung_pwm_variant *variant)
+static int __init samsung_pwm_alloc(struct device_node *np,
+ const struct samsung_pwm_variant *variant)
{
struct property *prop;
const __be32 *cur;
@@ -441,14 +443,16 @@ static void __init samsung_pwm_alloc(struct device_node *np,
pwm.base = of_iomap(np, 0);
if (!pwm.base) {
pr_err("%s: failed to map PWM registers\n", __func__);
- return;
+ return -ENXIO;
}
pwm.timerclk = of_clk_get_by_name(np, "timers");
- if (IS_ERR(pwm.timerclk))
- panic("failed to get timers clock for timer");
+ if (IS_ERR(pwm.timerclk)) {
+ pr_crit("failed to get timers clock for timer");
+ return PTR_ERR(pwm.timerclk);
+ }
- _samsung_pwm_clocksource_init();
+ return _samsung_pwm_clocksource_init();
}
static const struct samsung_pwm_variant s3c24xx_variant = {
@@ -458,9 +462,9 @@ static const struct samsung_pwm_variant s3c24xx_variant = {
.tclk_mask = (1 << 4),
};
-static void __init s3c2410_pwm_clocksource_init(struct device_node *np)
+static int __init s3c2410_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s3c24xx_variant);
+ return samsung_pwm_alloc(np, &s3c24xx_variant);
}
CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
@@ -471,9 +475,9 @@ static const struct samsung_pwm_variant s3c64xx_variant = {
.tclk_mask = (1 << 7) | (1 << 6) | (1 << 5),
};
-static void __init s3c64xx_pwm_clocksource_init(struct device_node *np)
+static int __init s3c64xx_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s3c64xx_variant);
+ return samsung_pwm_alloc(np, &s3c64xx_variant);
}
CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
@@ -484,9 +488,9 @@ static const struct samsung_pwm_variant s5p64x0_variant = {
.tclk_mask = 0,
};
-static void __init s5p64x0_pwm_clocksource_init(struct device_node *np)
+static int __init s5p64x0_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s5p64x0_variant);
+ return samsung_pwm_alloc(np, &s5p64x0_variant);
}
CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
@@ -497,9 +501,9 @@ static const struct samsung_pwm_variant s5p_variant = {
.tclk_mask = (1 << 5),
};
-static void __init s5p_pwm_clocksource_init(struct device_node *np)
+static int __init s5p_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s5p_variant);
+ return samsung_pwm_alloc(np, &s5p_variant);
}
CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
#endif
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 6f3719d..97669ee 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -146,7 +146,7 @@ static u64 notrace sun4i_timer_sched_read(void)
return ~readl(timer_base + TIMER_CNTVAL_REG(1));
}
-static void __init sun4i_timer_init(struct device_node *node)
+static int __init sun4i_timer_init(struct device_node *node)
{
unsigned long rate = 0;
struct clk *clk;
@@ -154,17 +154,28 @@ static void __init sun4i_timer_init(struct device_node *node)
u32 val;
timer_base = of_iomap(node, 0);
- if (!timer_base)
- panic("Can't map registers");
+ if (!timer_base) {
+ pr_crit("Can't map registers");
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_crit("Can't parse IRQ");
+ return -EINVAL;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("Can't get timer clock");
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_crit("Can't get timer clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to prepare clock");
+ return ret;
+ }
rate = clk_get_rate(clk);
@@ -182,8 +193,12 @@ static void __init sun4i_timer_init(struct device_node *node)
of_machine_is_compatible("allwinner,sun5i-a10s"))
sched_clock_register(sun4i_timer_sched_read, 32, rate);
- clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
- rate, 350, 32, clocksource_mmio_readl_down);
+ ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
+ rate, 350, 32, clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
@@ -200,12 +215,16 @@ static void __init sun4i_timer_init(struct device_node *node)
TIMER_SYNC_TICKS, 0xffffffff);
ret = setup_irq(irq, &sun4i_timer_irq);
- if (ret)
- pr_warn("failed to setup irq %d\n", irq);
+ if (ret) {
+ pr_err("failed to setup irq %d\n", irq);
+ return ret;
+ }
/* Enable timer0 interrupt */
val = readl(timer_base + TIMER_IRQ_EN_REG);
writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index c407c47..12fcef8 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -19,7 +19,7 @@ static u64 notrace read_sched_clock(void)
return read_xtal_counter();
}
-static void __init tango_clocksource_init(struct device_node *np)
+static int __init tango_clocksource_init(struct device_node *np)
{
struct clk *clk;
int xtal_freq, ret;
@@ -27,13 +27,13 @@ static void __init tango_clocksource_init(struct device_node *np)
xtal_in_cnt = of_iomap(np, 0);
if (xtal_in_cnt == NULL) {
pr_err("%s: invalid address\n", np->full_name);
- return;
+ return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: invalid clock\n", np->full_name);
- return;
+ return PTR_ERR(clk);
}
xtal_freq = clk_get_rate(clk);
@@ -44,11 +44,13 @@ static void __init tango_clocksource_init(struct device_node *np)
32, clocksource_mmio_readl_up);
if (ret) {
pr_err("%s: registration failed\n", np->full_name);
- return;
+ return ret;
}
sched_clock_register(read_sched_clock, 32, xtal_freq);
register_current_timer_delay(&delay_timer);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 7b94ad2..f960891 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -165,7 +165,7 @@ static struct irqaction tegra_timer_irq = {
.dev_id = &tegra_clockevent,
};
-static void __init tegra20_init_timer(struct device_node *np)
+static int __init tegra20_init_timer(struct device_node *np)
{
struct clk *clk;
unsigned long rate;
@@ -174,13 +174,13 @@ static void __init tegra20_init_timer(struct device_node *np)
timer_reg_base = of_iomap(np, 0);
if (!timer_reg_base) {
pr_err("Can't map timer registers\n");
- BUG();
+ return -ENXIO;
}
tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
if (tegra_timer_irq.irq <= 0) {
pr_err("Failed to map timer IRQ\n");
- BUG();
+ return -EINVAL;
}
clk = of_clk_get(np, 0);
@@ -211,10 +211,12 @@ static void __init tegra20_init_timer(struct device_node *np)
sched_clock_register(tegra_read_sched_clock, 32, 1000000);
- if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
- "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
+ ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+ "timer_us", 1000000, 300, 32,
+ clocksource_mmio_readl_up);
+ if (ret) {
pr_err("Failed to register clocksource\n");
- BUG();
+ return ret;
}
tegra_delay_timer.read_current_timer =
@@ -225,24 +227,26 @@ static void __init tegra20_init_timer(struct device_node *np)
ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
if (ret) {
pr_err("Failed to register timer IRQ: %d\n", ret);
- BUG();
+ return ret;
}
tegra_clockevent.cpumask = cpu_all_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
-static void __init tegra20_init_rtc(struct device_node *np)
+static int __init tegra20_init_rtc(struct device_node *np)
{
struct clk *clk;
rtc_base = of_iomap(np, 0);
if (!rtc_base) {
pr_err("Can't map RTC registers");
- BUG();
+ return -ENXIO;
}
/*
@@ -255,6 +259,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- register_persistent_clock(NULL, tegra_read_persistent_clock64);
+ return register_persistent_clock(NULL, tegra_read_persistent_clock64);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d93ec3c..20ec066 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -246,7 +246,7 @@ static void armada_370_xp_timer_resume(void)
writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
-struct syscore_ops armada_370_xp_timer_syscore_ops = {
+static struct syscore_ops armada_370_xp_timer_syscore_ops = {
.suspend = armada_370_xp_timer_suspend,
.resume = armada_370_xp_timer_resume,
};
@@ -260,14 +260,22 @@ static struct delay_timer armada_370_delay_timer = {
.read_current_timer = armada_370_delay_timer_read,
};
-static void __init armada_370_xp_timer_common_init(struct device_node *np)
+static int __init armada_370_xp_timer_common_init(struct device_node *np)
{
u32 clr = 0, set = 0;
int res;
timer_base = of_iomap(np, 0);
- WARN_ON(!timer_base);
+ if (!timer_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
+
local_base = of_iomap(np, 1);
+ if (!local_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
if (timer25Mhz) {
set = TIMER0_25MHZ;
@@ -306,14 +314,19 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
*/
sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
- clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
- "armada_370_xp_clocksource",
- timer_clk, 300, 32, clocksource_mmio_readl_down);
+ res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
+ "armada_370_xp_clocksource",
+ timer_clk, 300, 32, clocksource_mmio_readl_down);
+ if (res) {
+ pr_err("Failed to initialize clocksource mmio");
+ return res;
+ }
register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
-
+ if (!armada_370_xp_evt)
+ return -ENOMEM;
/*
* Setup clockevent timer (interrupt-driven).
@@ -323,33 +336,54 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
"armada_370_xp_per_cpu_tick",
armada_370_xp_evt);
/* Immediately configure the timer on the boot CPU */
- if (!res)
- armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+ if (res) {
+ pr_err("Failed to request percpu irq");
+ return res;
+ }
+
+ res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+ if (res) {
+ pr_err("Failed to setup timer");
+ return res;
+ }
register_syscore_ops(&armada_370_xp_timer_syscore_ops);
+
+ return 0;
}
-static void __init armada_xp_timer_init(struct device_node *np)
+static int __init armada_xp_timer_init(struct device_node *np)
{
struct clk *clk = of_clk_get_by_name(np, "fixed");
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
- /* The 25Mhz fixed clock is mandatory, and must always be available */
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
timer_clk = clk_get_rate(clk);
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
armada_xp_timer_init);
-static void __init armada_375_timer_init(struct device_node *np)
+static int __init armada_375_timer_init(struct device_node *np)
{
struct clk *clk;
+ int ret;
clk = of_clk_get_by_name(np, "fixed");
if (!IS_ERR(clk)) {
- clk_prepare_enable(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
timer_clk = clk_get_rate(clk);
} else {
@@ -360,27 +394,43 @@ static void __init armada_375_timer_init(struct device_node *np)
clk = of_clk_get(np, 0);
/* Must have at least a clock */
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
}
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
armada_375_timer_init);
-static void __init armada_370_timer_init(struct device_node *np)
+static int __init armada_370_timer_init(struct device_node *np)
{
- struct clk *clk = of_clk_get(np, 0);
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
armada_370_timer_init);
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index b06e4c2..5ac344b 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -233,10 +233,15 @@ static int __init efm32_clockevent_init(struct device_node *np)
DIV_ROUND_CLOSEST(rate, 1024),
0xf, 0xffff);
- setup_irq(irq, &efm32_clock_event_irq);
+ ret = setup_irq(irq, &efm32_clock_event_irq);
+ if (ret) {
+ pr_err("Failed setup irq");
+ goto err_setup_irq;
+ }
return 0;
+err_setup_irq:
err_get_irq:
iounmap(base);
@@ -255,16 +260,16 @@ err_clk_get:
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
-static void __init efm32_timer_init(struct device_node *np)
+static int __init efm32_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
- int ret;
+ int ret = 0;
if (!has_clocksource) {
ret = efm32_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -272,9 +277,11 @@ static void __init efm32_timer_init(struct device_node *np)
ret = efm32_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
index daae61e..9649cfd 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/time-lpc32xx.c
@@ -288,16 +288,16 @@ err_clk_enable:
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
-static void __init lpc32xx_timer_init(struct device_node *np)
+static int __init lpc32xx_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
- int ret;
+ int ret = 0;
if (!has_clocksource) {
ret = lpc32xx_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -305,8 +305,10 @@ static void __init lpc32xx_timer_init(struct device_node *np)
ret = lpc32xx_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 0ece742..a28f496 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -104,25 +104,36 @@ static struct irqaction orion_clkevt_irq = {
.handler = orion_clkevt_irq_handler,
};
-static void __init orion_timer_init(struct device_node *np)
+static int __init orion_timer_init(struct device_node *np)
{
struct clk *clk;
- int irq;
+ int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
- if (!timer_base)
- panic("%s: unable to map resource\n", np->name);
+ if (!timer_base) {
+ pr_err("%s: unable to map resource\n", np->name);
+ return -ENXIO;
+ }
clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- panic("%s: unable to get clk\n", np->name);
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_err("%s: unable to get clk\n", np->name);
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to prepare clock");
+ return ret;
+ }
/* we are only interested in timer1 irq */
irq = irq_of_parse_and_map(np, 1);
- if (irq <= 0)
- panic("%s: unable to parse timer1 irq\n", np->name);
+ if (irq <= 0) {
+ pr_err("%s: unable to parse timer1 irq\n", np->name);
+ return -EINVAL;
+ }
/* setup timer0 as free-running clocksource */
writel(~0, timer_base + TIMER0_VAL);
@@ -130,19 +141,30 @@ static void __init orion_timer_init(struct device_node *np)
atomic_io_modify(timer_base + TIMER_CTRL,
TIMER0_RELOAD_EN | TIMER0_EN,
TIMER0_RELOAD_EN | TIMER0_EN);
- clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
- clk_get_rate(clk), 300, 32,
- clocksource_mmio_readl_down);
+
+ ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
+ clk_get_rate(clk), 300, 32,
+ clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("Failed to initialize mmio timer");
+ return ret;
+ }
+
sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
/* setup timer1 as clockevent timer */
- if (setup_irq(irq, &orion_clkevt_irq))
- panic("%s: unable to setup irq\n", np->name);
+ ret = setup_irq(irq, &orion_clkevt_irq);
+ if (ret) {
+ pr_err("%s: unable to setup irq\n", np->name);
+ return ret;
+ }
ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
orion_clkevt.cpumask = cpumask_of(0);
orion_clkevt.irq = irq;
clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk),
ORION_ONESHOT_MIN, ORION_ONESHOT_MAX);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index 376e59b..a7d9a08 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -148,7 +148,7 @@ static struct pistachio_clocksource pcs_gpt = {
},
};
-static void __init pistachio_clksrc_of_init(struct device_node *node)
+static int __init pistachio_clksrc_of_init(struct device_node *node)
{
struct clk *sys_clk, *fast_clk;
struct regmap *periph_regs;
@@ -158,45 +158,45 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
pcs_gpt.base = of_iomap(node, 0);
if (!pcs_gpt.base) {
pr_err("cannot iomap\n");
- return;
+ return -ENXIO;
}
periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
if (IS_ERR(periph_regs)) {
pr_err("cannot get peripheral regmap (%ld)\n",
PTR_ERR(periph_regs));
- return;
+ return PTR_ERR(periph_regs);
}
/* Switch to using the fast counter clock */
ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
0xf, 0x0);
if (ret)
- return;
+ return ret;
sys_clk = of_clk_get_by_name(node, "sys");
if (IS_ERR(sys_clk)) {
pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
- return;
+ return PTR_ERR(sys_clk);
}
fast_clk = of_clk_get_by_name(node, "fast");
if (IS_ERR(fast_clk)) {
pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
- return;
+ return PTR_ERR(fast_clk);
}
ret = clk_prepare_enable(sys_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
- return;
+ return ret;
}
ret = clk_prepare_enable(fast_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
clk_disable_unprepare(sys_clk);
- return;
+ return ret;
}
rate = clk_get_rate(fast_clk);
@@ -212,7 +212,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
raw_spin_lock_init(&pcs_gpt.lock);
sched_clock_register(pistachio_read_sched_clock, 32, rate);
- clocksource_register_hz(&pcs_gpt.cs, rate);
+ return clocksource_register_hz(&pcs_gpt.cs, rate);
}
CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
pistachio_clksrc_of_init);
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 27fa136..90f8fbc 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -238,7 +238,7 @@ static struct notifier_block sirfsoc_cpu_nb = {
.notifier_call = sirfsoc_cpu_notify,
};
-static void __init sirfsoc_clockevent_init(void)
+static int __init sirfsoc_clockevent_init(void)
{
sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
BUG_ON(!sirfsoc_clockevent);
@@ -246,11 +246,11 @@ static void __init sirfsoc_clockevent_init(void)
BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
/* Immediately configure the timer on the boot CPU */
- sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
+ return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
}
/* initialize the kernel jiffy timer source */
-static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
+static int __init sirfsoc_atlas7_timer_init(struct device_node *np)
{
struct clk *clk;
@@ -279,23 +279,29 @@ static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
- sirfsoc_clockevent_init();
+ return sirfsoc_clockevent_init();
}
-static void __init sirfsoc_of_timer_init(struct device_node *np)
+static int __init sirfsoc_of_timer_init(struct device_node *np)
{
sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base)
- panic("unable to map timer cpu registers\n");
+ if (!sirfsoc_timer_base) {
+ pr_err("unable to map timer cpu registers\n");
+ return -ENXIO;
+ }
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
- if (!sirfsoc_timer_irq.irq)
- panic("No irq passed for timer0 via DT\n");
+ if (!sirfsoc_timer_irq.irq) {
+ pr_err("No irq passed for timer0 via DT\n");
+ return -EINVAL;
+ }
sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
- if (!sirfsoc_timer1_irq.irq)
- panic("No irq passed for timer1 via DT\n");
+ if (!sirfsoc_timer1_irq.irq) {
+ pr_err("No irq passed for timer1 via DT\n");
+ return -EINVAL;
+ }
- sirfsoc_atlas7_timer_init(np);
+ return sirfsoc_atlas7_timer_init(np);
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index d911c5d..1ffac0c 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -177,7 +177,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
/*
* Set up both clocksource and clockevent support.
*/
-static void __init at91sam926x_pit_common_init(struct pit_data *data)
+static int __init at91sam926x_pit_common_init(struct pit_data *data)
{
unsigned long pit_rate;
unsigned bits;
@@ -204,14 +204,21 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clksrc.rating = 175;
data->clksrc.read = read_pit_clk;
data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
- clocksource_register_hz(&data->clksrc, pit_rate);
+
+ ret = clocksource_register_hz(&data->clksrc, pit_rate);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
/* Set up irq handler */
ret = request_irq(data->irq, at91sam926x_pit_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", data);
- if (ret)
- panic(pr_fmt("Unable to setup IRQ\n"));
+ if (ret) {
+ pr_err("Unable to setup IRQ\n");
+ return ret;
+ }
/* Set up and register clockevents */
data->clkevt.name = "pit";
@@ -226,34 +233,42 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clkevt.resume = at91sam926x_pit_resume;
data->clkevt.suspend = at91sam926x_pit_suspend;
clockevents_register_device(&data->clkevt);
+
+ return 0;
}
-static void __init at91sam926x_pit_dt_init(struct device_node *node)
+static int __init at91sam926x_pit_dt_init(struct device_node *node)
{
struct pit_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- panic(pr_fmt("Unable to allocate memory\n"));
+ return -ENOMEM;
data->base = of_iomap(node, 0);
- if (!data->base)
- panic(pr_fmt("Could not map PIT address\n"));
+ if (!data->base) {
+ pr_err("Could not map PIT address\n");
+ return -ENXIO;
+ }
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck))
/* Fallback on clkdev for !CCF-based boards */
data->mck = clk_get(NULL, "mck");
- if (IS_ERR(data->mck))
- panic(pr_fmt("Unable to get mck clk\n"));
+ if (IS_ERR(data->mck)) {
+ pr_err("Unable to get mck clk\n");
+ return PTR_ERR(data->mck);
+ }
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
- if (!data->irq)
- panic(pr_fmt("Unable to get IRQ from DT\n"));
+ if (!data->irq) {
+ pr_err("Unable to get IRQ from DT\n");
+ return -EINVAL;
+ }
- at91sam926x_pit_common_init(data);
+ return at91sam926x_pit_common_init(data);
}
CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index 29d21d6..e90ab5b 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -194,15 +194,17 @@ static struct clock_event_device clkevt = {
/*
* ST (system timer) module supports both clockevents and clocksource.
*/
-static void __init atmel_st_timer_init(struct device_node *node)
+static int __init atmel_st_timer_init(struct device_node *node)
{
struct clk *sclk;
unsigned int sclk_rate, val;
int irq, ret;
regmap_st = syscon_node_to_regmap(node);
- if (IS_ERR(regmap_st))
- panic(pr_fmt("Unable to get regmap\n"));
+ if (IS_ERR(regmap_st)) {
+ pr_err("Unable to get regmap\n");
+ return PTR_ERR(regmap_st);
+ }
/* Disable all timer interrupts, and clear any pending ones */
regmap_write(regmap_st, AT91_ST_IDR,
@@ -211,27 +213,37 @@ static void __init atmel_st_timer_init(struct device_node *node)
/* Get the interrupts property */
irq = irq_of_parse_and_map(node, 0);
- if (!irq)
- panic(pr_fmt("Unable to get IRQ from DT\n"));
+ if (!irq) {
+ pr_err("Unable to get IRQ from DT\n");
+ return -EINVAL;
+ }
/* Make IRQs happen for the system timer */
ret = request_irq(irq, at91rm9200_timer_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", regmap_st);
- if (ret)
- panic(pr_fmt("Unable to setup IRQ\n"));
+ if (ret) {
+ pr_err("Unable to setup IRQ\n");
+ return ret;
+ }
sclk = of_clk_get(node, 0);
- if (IS_ERR(sclk))
- panic(pr_fmt("Unable to get slow clock\n"));
+ if (IS_ERR(sclk)) {
+ pr_err("Unable to get slow clock\n");
+ return PTR_ERR(sclk);
+ }
- clk_prepare_enable(sclk);
- if (ret)
- panic(pr_fmt("Could not enable slow clock\n"));
+ ret = clk_prepare_enable(sclk);
+ if (ret) {
+ pr_err("Could not enable slow clock\n");
+ return ret;
+ }
sclk_rate = clk_get_rate(sclk);
- if (!sclk_rate)
- panic(pr_fmt("Invalid slow clock rate\n"));
+ if (!sclk_rate) {
+ pr_err("Invalid slow clock rate\n");
+ return -EINVAL;
+ }
timer_latch = (sclk_rate + HZ / 2) / HZ;
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
@@ -246,7 +258,7 @@ static void __init atmel_st_timer_init(struct device_node *node)
2, AT91_ST_ALMV);
/* register clocksource */
- clocksource_register_hz(&clk32k, sclk_rate);
+ return clocksource_register_hz(&clk32k, sclk_rate);
}
CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
atmel_st_timer_init);
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index a536eeb..10318cc 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -63,7 +63,7 @@ struct digicolor_timer {
int timer_id; /* one of TIMER_* */
};
-struct digicolor_timer *dc_timer(struct clock_event_device *ce)
+static struct digicolor_timer *dc_timer(struct clock_event_device *ce)
{
return container_of(ce, struct digicolor_timer, ce);
}
@@ -148,7 +148,7 @@ static u64 notrace digicolor_timer_sched_read(void)
return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
}
-static void __init digicolor_timer_init(struct device_node *node)
+static int __init digicolor_timer_init(struct device_node *node)
{
unsigned long rate;
struct clk *clk;
@@ -161,19 +161,19 @@ static void __init digicolor_timer_init(struct device_node *node)
dc_timer_dev.base = of_iomap(node, 0);
if (!dc_timer_dev.base) {
pr_err("Can't map registers");
- return;
+ return -ENXIO;
}
irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
if (irq <= 0) {
pr_err("Can't parse IRQ");
- return;
+ return -EINVAL;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock");
- return;
+ return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
@@ -190,13 +190,17 @@ static void __init digicolor_timer_init(struct device_node *node)
ret = request_irq(irq, digicolor_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC",
&dc_timer_dev.ce);
- if (ret)
+ if (ret) {
pr_warn("request of timer irq %d failed (%d)\n", irq, ret);
+ return ret;
+ }
dc_timer_dev.ce.cpumask = cpu_possible_mask;
dc_timer_dev.ce.irq = irq;
clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
digicolor_timer_init);
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 99ec967..f595460 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -407,8 +407,10 @@ static const struct imx_gpt_data imx6dl_gpt_data = {
.set_next_event = v2_set_next_event,
};
-static void __init _mxc_timer_init(struct imx_timer *imxtm)
+static int __init _mxc_timer_init(struct imx_timer *imxtm)
{
+ int ret;
+
switch (imxtm->type) {
case GPT_TYPE_IMX1:
imxtm->gpt = &imx1_gpt_data;
@@ -423,12 +425,12 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
imxtm->gpt = &imx6dl_gpt_data;
break;
default:
- BUG();
+ return -EINVAL;
}
if (IS_ERR(imxtm->clk_per)) {
pr_err("i.MX timer: unable to get clk\n");
- return;
+ return PTR_ERR(imxtm->clk_per);
}
if (!IS_ERR(imxtm->clk_ipg))
@@ -446,8 +448,11 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
imxtm->gpt->gpt_setup_tctl(imxtm);
/* init and register the timer to the framework */
- mxc_clocksource_init(imxtm);
- mxc_clockevent_init(imxtm);
+ ret = mxc_clocksource_init(imxtm);
+ if (ret)
+ return ret;
+
+ return mxc_clockevent_init(imxtm);
}
void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
@@ -469,21 +474,27 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
_mxc_timer_init(imxtm);
}
-static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
+static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
{
struct imx_timer *imxtm;
static int initialized;
+ int ret;
/* Support one instance only */
if (initialized)
- return;
+ return 0;
imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
- BUG_ON(!imxtm);
+ if (!imxtm)
+ return -ENOMEM;
imxtm->base = of_iomap(np, 0);
- WARN_ON(!imxtm->base);
+ if (!imxtm->base)
+ return -ENXIO;
+
imxtm->irq = irq_of_parse_and_map(np, 0);
+ if (imxtm->irq <= 0)
+ return -EINVAL;
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
@@ -494,22 +505,26 @@ static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type
imxtm->type = type;
- _mxc_timer_init(imxtm);
+ ret = _mxc_timer_init(imxtm);
+ if (ret)
+ return ret;
initialized = 1;
+
+ return 0;
}
-static void __init imx1_timer_init_dt(struct device_node *np)
+static int __init imx1_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX1);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
}
-static void __init imx21_timer_init_dt(struct device_node *np)
+static int __init imx21_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX21);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
}
-static void __init imx31_timer_init_dt(struct device_node *np)
+static int __init imx31_timer_init_dt(struct device_node *np)
{
enum imx_gpt_type type = GPT_TYPE_IMX31;
@@ -522,12 +537,12 @@ static void __init imx31_timer_init_dt(struct device_node *np)
if (of_machine_is_compatible("fsl,imx6dl"))
type = GPT_TYPE_IMX6DL;
- mxc_timer_init_dt(np, type);
+ return mxc_timer_init_dt(np, type);
}
-static void __init imx6dl_timer_init_dt(struct device_node *np)
+static int __init imx6dl_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
}
CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 3f59ac2..df6e672 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -36,11 +36,12 @@ static u64 notrace integrator_read_sched_clock(void)
return -readl(sched_clk_base + TIMER_VALUE);
}
-static void integrator_clocksource_init(unsigned long inrate,
- void __iomem *base)
+static int integrator_clocksource_init(unsigned long inrate,
+ void __iomem *base)
{
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
unsigned long rate = inrate;
+ int ret;
if (rate >= 1500000) {
rate /= 16;
@@ -50,11 +51,15 @@ static void integrator_clocksource_init(unsigned long inrate,
writel(0xffff, base + TIMER_LOAD);
writel(ctrl, base + TIMER_CTRL);
- clocksource_mmio_init(base + TIMER_VALUE, "timer2",
- rate, 200, 16, clocksource_mmio_readl_down);
+ ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2",
+ rate, 200, 16, clocksource_mmio_readl_down);
+ if (ret)
+ return ret;
sched_clk_base = base;
sched_clock_register(integrator_read_sched_clock, 16, rate);
+
+ return 0;
}
static unsigned long timer_reload;
@@ -138,11 +143,12 @@ static struct irqaction integrator_timer_irq = {
.dev_id = &integrator_clockevent,
};
-static void integrator_clockevent_init(unsigned long inrate,
- void __iomem *base, int irq)
+static int integrator_clockevent_init(unsigned long inrate,
+ void __iomem *base, int irq)
{
unsigned long rate = inrate;
unsigned int ctrl = 0;
+ int ret;
clkevt_base = base;
/* Calculate and program a divisor */
@@ -156,14 +162,18 @@ static void integrator_clockevent_init(unsigned long inrate,
timer_reload = rate / HZ;
writel(ctrl, clkevt_base + TIMER_CTRL);
- setup_irq(irq, &integrator_timer_irq);
+ ret = setup_irq(irq, &integrator_timer_irq);
+ if (ret)
+ return ret;
+
clockevents_config_and_register(&integrator_clockevent,
rate,
1,
0xffffU);
+ return 0;
}
-static void __init integrator_ap_timer_init_of(struct device_node *node)
+static int __init integrator_ap_timer_init_of(struct device_node *node)
{
const char *path;
void __iomem *base;
@@ -176,12 +186,12 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
base = of_io_request_and_map(node, 0, "integrator-timer");
if (IS_ERR(base))
- return;
+ return PTR_ERR(base);
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("No clock for %s\n", node->name);
- return;
+ return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
@@ -189,30 +199,37 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
err = of_property_read_string(of_aliases,
"arm,timer-primary", &path);
- if (WARN_ON(err))
- return;
+ if (err) {
+ pr_warn("Failed to read property");
+ return err;
+ }
+
pri_node = of_find_node_by_path(path);
+
err = of_property_read_string(of_aliases,
"arm,timer-secondary", &path);
- if (WARN_ON(err))
- return;
+ if (err) {
+ pr_warn("Failed to read property");
+ return err;
+ }
+
+
sec_node = of_find_node_by_path(path);
- if (node == pri_node) {
+ if (node == pri_node)
/* The primary timer lacks IRQ, use as clocksource */
- integrator_clocksource_init(rate, base);
- return;
- }
+ return integrator_clocksource_init(rate, base);
if (node == sec_node) {
/* The secondary timer will drive the clock event */
irq = irq_of_parse_and_map(node, 0);
- integrator_clockevent_init(rate, base, irq);
- return;
+ return integrator_clockevent_init(rate, base, irq);
}
pr_info("Timer @%p unused\n", base);
clk_disable_unprepare(clk);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 1cea08c..ab68a47 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -144,7 +144,7 @@ static int keystone_set_periodic(struct clock_event_device *evt)
return 0;
}
-static void __init keystone_timer_init(struct device_node *np)
+static int __init keystone_timer_init(struct device_node *np)
{
struct clock_event_device *event_dev = &timer.event_dev;
unsigned long rate;
@@ -154,20 +154,20 @@ static void __init keystone_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
- return;
+ return -EINVAL;
}
timer.base = of_iomap(np, 0);
if (!timer.base) {
pr_err("%s: failed to map registers\n", __func__);
- return;
+ return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: failed to get clock\n", __func__);
iounmap(timer.base);
- return;
+ return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
@@ -219,11 +219,12 @@ static void __init keystone_timer_init(struct device_node *np)
clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
pr_info("keystone timer clock @%lu Hz\n", rate);
- return;
+ return 0;
err:
clk_put(clk);
iounmap(timer.base);
+ return error;
}
CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
- keystone_timer_init);
+ keystone_timer_init);
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index d461089..70c149a 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -55,8 +55,8 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
}
-static void __init nps_setup_clocksource(struct device_node *node,
- struct clk *clk)
+static int __init nps_setup_clocksource(struct device_node *node,
+ struct clk *clk)
{
int ret, cluster;
@@ -68,7 +68,7 @@ static void __init nps_setup_clocksource(struct device_node *node,
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Couldn't enable parent clock\n");
- return;
+ return ret;
}
nps_timer_rate = clk_get_rate(clk);
@@ -79,19 +79,21 @@ static void __init nps_setup_clocksource(struct device_node *node,
pr_err("Couldn't register clock source.\n");
clk_disable_unprepare(clk);
}
+
+ return ret;
}
-static void __init nps_timer_init(struct device_node *node)
+static int __init nps_timer_init(struct device_node *node)
{
struct clk *clk;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock.\n");
- return;
+ return PTR_ERR(clk);
}
- nps_setup_clocksource(node, clk);
+ return nps_setup_clocksource(node, clk);
}
CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
new file mode 100644
index 0000000..bd887e2
--- /dev/null
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -0,0 +1,297 @@
+/*
+ * drivers/clocksource/timer-oxnas-rps.c
+ *
+ * Copyright (C) 2009 Oxford Semiconductor Ltd
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+
+/* TIMER1 used as tick
+ * TIMER2 used as clocksource
+ */
+
+/* Registers definitions */
+
+#define TIMER_LOAD_REG 0x0
+#define TIMER_CURR_REG 0x4
+#define TIMER_CTRL_REG 0x8
+#define TIMER_CLRINT_REG 0xC
+
+#define TIMER_BITS 24
+
+#define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1)
+
+#define TIMER_PERIODIC BIT(6)
+#define TIMER_ENABLE BIT(7)
+
+#define TIMER_DIV1 (0)
+#define TIMER_DIV16 (1 << 2)
+#define TIMER_DIV256 (2 << 2)
+
+#define TIMER1_REG_OFFSET 0
+#define TIMER2_REG_OFFSET 0x20
+
+/* Clockevent & Clocksource data */
+
+struct oxnas_rps_timer {
+ struct clock_event_device clkevent;
+ void __iomem *clksrc_base;
+ void __iomem *clkevt_base;
+ unsigned long timer_period;
+ unsigned int timer_prescaler;
+ struct clk *clk;
+ int irq;
+};
+
+static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id)
+{
+ struct oxnas_rps_timer *rps = dev_id;
+
+ writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
+
+ rps->clkevent.event_handler(&rps->clkevent);
+
+ return IRQ_HANDLED;
+}
+
+static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps,
+ unsigned long period,
+ unsigned int periodic)
+{
+ uint32_t cfg = rps->timer_prescaler;
+
+ if (period)
+ cfg |= TIMER_ENABLE;
+
+ if (periodic)
+ cfg |= TIMER_PERIODIC;
+
+ writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG);
+ writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG);
+}
+
+static int oxnas_rps_timer_shutdown(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, 0, 0);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, rps->timer_period, 1);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, rps->timer_period, 0);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, delta, 0);
+
+ return 0;
+}
+
+static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps)
+{
+ ulong clk_rate = clk_get_rate(rps->clk);
+ ulong timer_rate;
+
+ /* Start with prescaler 1 */
+ rps->timer_prescaler = TIMER_DIV1;
+ rps->timer_period = DIV_ROUND_UP(clk_rate, HZ);
+ timer_rate = clk_rate;
+
+ if (rps->timer_period > TIMER_MAX_VAL) {
+ rps->timer_prescaler = TIMER_DIV16;
+ timer_rate = clk_rate / 16;
+ rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
+ }
+ if (rps->timer_period > TIMER_MAX_VAL) {
+ rps->timer_prescaler = TIMER_DIV256;
+ timer_rate = clk_rate / 256;
+ rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
+ }
+
+ rps->clkevent.name = "oxnas-rps";
+ rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ;
+ rps->clkevent.tick_resume = oxnas_rps_timer_shutdown;
+ rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown;
+ rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic;
+ rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot;
+ rps->clkevent.set_next_event = oxnas_rps_timer_next_event;
+ rps->clkevent.rating = 200;
+ rps->clkevent.cpumask = cpu_possible_mask;
+ rps->clkevent.irq = rps->irq;
+ clockevents_config_and_register(&rps->clkevent,
+ timer_rate,
+ 1,
+ TIMER_MAX_VAL);
+
+ pr_info("Registered clock event rate %luHz prescaler %x period %lu\n",
+ clk_rate,
+ rps->timer_prescaler,
+ rps->timer_period);
+
+ return 0;
+}
+
+/* Clocksource */
+
+static void __iomem *timer_sched_base;
+
+static u64 notrace oxnas_rps_read_sched_clock(void)
+{
+ return ~readl_relaxed(timer_sched_base);
+}
+
+static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps)
+{
+ ulong clk_rate = clk_get_rate(rps->clk);
+ int ret;
+
+ /* use prescale 16 */
+ clk_rate = clk_rate / 16;
+
+ writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG);
+ writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
+ rps->clksrc_base + TIMER_CTRL_REG);
+
+ timer_sched_base = rps->clksrc_base + TIMER_CURR_REG;
+ sched_clock_register(oxnas_rps_read_sched_clock,
+ TIMER_BITS, clk_rate);
+ ret = clocksource_mmio_init(timer_sched_base,
+ "oxnas_rps_clocksource_timer",
+ clk_rate, 250, TIMER_BITS,
+ clocksource_mmio_readl_down);
+ if (WARN_ON(ret)) {
+ pr_err("can't register clocksource\n");
+ return ret;
+ }
+
+ pr_info("Registered clocksource rate %luHz\n", clk_rate);
+
+ return 0;
+}
+
+static int __init oxnas_rps_timer_init(struct device_node *np)
+{
+ struct oxnas_rps_timer *rps;
+ void __iomem *base;
+ int ret;
+
+ rps = kzalloc(sizeof(*rps), GFP_KERNEL);
+ if (!rps)
+ return -ENOMEM;
+
+ rps->clk = of_clk_get(np, 0);
+ if (IS_ERR(rps->clk)) {
+ ret = PTR_ERR(rps->clk);
+ goto err_alloc;
+ }
+
+ ret = clk_prepare_enable(rps->clk);
+ if (ret)
+ goto err_clk;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -ENXIO;
+ goto err_clk_prepare;
+ }
+
+ rps->irq = irq_of_parse_and_map(np, 0);
+ if (rps->irq < 0) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
+ rps->clkevt_base = base + TIMER1_REG_OFFSET;
+ rps->clksrc_base = base + TIMER2_REG_OFFSET;
+
+ /* Disable timers */
+ writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG);
+ writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG);
+ writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG);
+
+ ret = request_irq(rps->irq, oxnas_rps_timer_irq,
+ IRQF_TIMER | IRQF_IRQPOLL,
+ "rps-timer", rps);
+ if (ret)
+ goto err_iomap;
+
+ ret = oxnas_rps_clocksource_init(rps);
+ if (ret)
+ goto err_irqreq;
+
+ ret = oxnas_rps_clockevent_init(rps);
+ if (ret)
+ goto err_irqreq;
+
+ return 0;
+
+err_irqreq:
+ free_irq(rps->irq, rps);
+err_iomap:
+ iounmap(base);
+err_clk_prepare:
+ clk_disable_unprepare(rps->clk);
+err_clk:
+ clk_put(rps->clk);
+err_alloc:
+ kfree(rps);
+
+ return ret;
+}
+
+CLOCKSOURCE_OF_DECLARE(ox810se_rps,
+ "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index 2854c66..c32148e 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -19,7 +19,6 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
-#include <asm/mach/time.h>
#define PRIMA2_CLOCK_FREQ 1000000
@@ -189,24 +188,36 @@ static void __init sirfsoc_clockevent_init(void)
}
/* initialize the kernel jiffy timer source */
-static void __init sirfsoc_prima2_timer_init(struct device_node *np)
+static int __init sirfsoc_prima2_timer_init(struct device_node *np)
{
unsigned long rate;
struct clk *clk;
+ int ret;
clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(clk));
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
- BUG_ON(clk_prepare_enable(clk));
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to enable clock");
+ return ret;
+ }
rate = clk_get_rate(clk);
- BUG_ON(rate < PRIMA2_CLOCK_FREQ);
- BUG_ON(rate % PRIMA2_CLOCK_FREQ);
+ if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) {
+ pr_err("Invalid clock rate");
+ return -EINVAL;
+ }
sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base)
- panic("unable to map timer cpu registers\n");
+ if (!sirfsoc_timer_base) {
+ pr_err("unable to map timer cpu registers\n");
+ return -ENXIO;
+ }
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
@@ -216,14 +227,23 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
- BUG_ON(clocksource_register_hz(&sirfsoc_clocksource,
- PRIMA2_CLOCK_FREQ));
+ ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
- BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
+ ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
+ if (ret) {
+ pr_err("Failed to setup irq");
+ return ret;
+ }
sirfsoc_clockevent_init();
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer,
"sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 5f45b9a..d078633 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -77,7 +77,7 @@ void __init sp804_timer_disable(void __iomem *base)
writel(0, base + TIMER_CTRL);
}
-void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
+int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
int use_sched_clock)
@@ -89,14 +89,13 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
if (IS_ERR(clk)) {
pr_err("sp804: clock not found: %d\n",
(int)PTR_ERR(clk));
- return;
+ return PTR_ERR(clk);
}
}
rate = sp804_get_clock_rate(clk);
-
if (rate < 0)
- return;
+ return -EINVAL;
/* setup timer 0 as free-running clocksource */
writel(0, base + TIMER_CTRL);
@@ -112,6 +111,8 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
sched_clock_base = base;
sched_clock_register(sp804_read, 32, rate);
}
+
+ return 0;
}
@@ -186,7 +187,7 @@ static struct irqaction sp804_timer_irq = {
.dev_id = &sp804_clockevent,
};
-void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
+int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
long rate;
@@ -196,12 +197,12 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
if (IS_ERR(clk)) {
pr_err("sp804: %s clock not found: %d\n", name,
(int)PTR_ERR(clk));
- return;
+ return PTR_ERR(clk);
}
rate = sp804_get_clock_rate(clk);
if (rate < 0)
- return;
+ return -EINVAL;
clkevt_base = base;
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
@@ -213,27 +214,31 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
setup_irq(irq, &sp804_timer_irq);
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
+
+ return 0;
}
-static void __init sp804_of_init(struct device_node *np)
+static int __init sp804_of_init(struct device_node *np)
{
static bool initialized = false;
void __iomem *base;
- int irq;
+ int irq, ret = -EINVAL;
u32 irq_num = 0;
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
base = of_iomap(np, 0);
- if (WARN_ON(!base))
- return;
+ if (!base)
+ return -ENXIO;
/* Ensure timers are disabled */
writel(0, base + TIMER_CTRL);
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
- if (initialized || !of_device_is_available(np))
+ if (initialized || !of_device_is_available(np)) {
+ ret = -EINVAL;
goto err;
+ }
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
@@ -256,35 +261,53 @@ static void __init sp804_of_init(struct device_node *np)
of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
if (irq_num == 2) {
- __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
- __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+
+ ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
+ if (ret)
+ goto err;
+
+ ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+ if (ret)
+ goto err;
} else {
- __sp804_clockevents_init(base, irq, clk1 , name);
- __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
- name, clk2, 1);
+
+ ret = __sp804_clockevents_init(base, irq, clk1 , name);
+ if (ret)
+ goto err;
+
+ ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
+ name, clk2, 1);
+ if (ret)
+ goto err;
}
initialized = true;
- return;
+ return 0;
err:
iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
-static void __init integrator_cp_of_init(struct device_node *np)
+static int __init integrator_cp_of_init(struct device_node *np)
{
static int init_count = 0;
void __iomem *base;
- int irq;
+ int irq, ret = -EINVAL;
const char *name = of_get_property(np, "compatible", NULL);
struct clk *clk;
base = of_iomap(np, 0);
- if (WARN_ON(!base))
- return;
+ if (!base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
+
clk = of_clk_get(np, 0);
- if (WARN_ON(IS_ERR(clk)))
- return;
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
/* Ensure timer is disabled */
writel(0, base + TIMER_CTRL);
@@ -292,19 +315,24 @@ static void __init integrator_cp_of_init(struct device_node *np)
if (init_count == 2 || !of_device_is_available(np))
goto err;
- if (!init_count)
- __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
- else {
+ if (!init_count) {
+ ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
+ if (ret)
+ goto err;
+ } else {
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
goto err;
- __sp804_clockevents_init(base, irq, clk, name);
+ ret = __sp804_clockevents_init(base, irq, clk, name);
+ if (ret)
+ goto err;
}
init_count++;
- return;
+ return 0;
err:
iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index f3dcb76..1b2574c 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -98,7 +98,7 @@ static struct stm32_clock_event_ddata clock_event_ddata = {
},
};
-static void __init stm32_clockevent_init(struct device_node *np)
+static int __init stm32_clockevent_init(struct device_node *np)
{
struct stm32_clock_event_ddata *data = &clock_event_ddata;
struct clk *clk;
@@ -130,12 +130,14 @@ static void __init stm32_clockevent_init(struct device_node *np)
data->base = of_iomap(np, 0);
if (!data->base) {
+ ret = -ENXIO;
pr_err("failed to map registers for clockevent\n");
goto err_iomap;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
+ ret = -EINVAL;
pr_err("%s: failed to get irq.\n", np->full_name);
goto err_get_irq;
}
@@ -173,7 +175,7 @@ static void __init stm32_clockevent_init(struct device_node *np)
pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
np->full_name, bits);
- return;
+ return ret;
err_get_irq:
iounmap(data->base);
@@ -182,7 +184,7 @@ err_iomap:
err_clk_enable:
clk_put(clk);
err_clk_get:
- return;
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 24c83f9..c184eb8 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -311,33 +311,42 @@ err_free:
return ret;
}
-static void __init sun5i_timer_init(struct device_node *node)
+static int __init sun5i_timer_init(struct device_node *node)
{
struct reset_control *rstc;
void __iomem *timer_base;
struct clk *clk;
- int irq;
+ int irq, ret;
timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(timer_base))
- panic("Can't map registers");
+ if (IS_ERR(timer_base)) {
+ pr_err("Can't map registers");
+ return PTR_ERR(timer_base);;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("Can't get timer clock");
+ if (IS_ERR(clk)) {
+ pr_err("Can't get timer clock");
+ return PTR_ERR(clk);
+ }
rstc = of_reset_control_get(node, NULL);
if (!IS_ERR(rstc))
reset_control_deassert(rstc);
- sun5i_setup_clocksource(node, timer_base, clk, irq);
- sun5i_setup_clockevent(node, timer_base, clk, irq);
+ ret = sun5i_setup_clocksource(node, timer_base, clk, irq);
+ if (ret)
+ return ret;
+
+ return sun5i_setup_clockevent(node, timer_base, clk, irq);
}
CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
- sun5i_timer_init);
+ sun5i_timer_init);
CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
- sun5i_timer_init);
+ sun5i_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 8518d9d..92b7e39 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -88,14 +88,14 @@ static u64 notrace omap_32k_read_sched_clock(void)
return ti_32k_read_cycles(&ti_32k_timer.cs);
}
-static void __init ti_32k_timer_init(struct device_node *np)
+static int __init ti_32k_timer_init(struct device_node *np)
{
int ret;
ti_32k_timer.base = of_iomap(np, 0);
if (!ti_32k_timer.base) {
pr_err("Can't ioremap 32k timer base\n");
- return;
+ return -ENXIO;
}
ti_32k_timer.counter = ti_32k_timer.base;
@@ -116,11 +116,13 @@ static void __init ti_32k_timer_init(struct device_node *np)
ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
if (ret) {
pr_err("32k_counter: can't register clocksource\n");
- return;
+ return ret;
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
ti_32k_timer_init);
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
index 1744b24..704e40c 100644
--- a/drivers/clocksource/timer-u300.c
+++ b/drivers/clocksource/timer-u300.c
@@ -359,27 +359,37 @@ static struct delay_timer u300_delay_timer;
/*
* This sets up the system timers, clock source and clock event.
*/
-static void __init u300_timer_init_of(struct device_node *np)
+static int __init u300_timer_init_of(struct device_node *np)
{
unsigned int irq;
struct clk *clk;
unsigned long rate;
+ int ret;
u300_timer_base = of_iomap(np, 0);
- if (!u300_timer_base)
- panic("could not ioremap system timer\n");
+ if (!u300_timer_base) {
+ pr_err("could not ioremap system timer\n");
+ return -ENXIO;
+ }
/* Get the IRQ for the GP1 timer */
irq = irq_of_parse_and_map(np, 2);
- if (!irq)
- panic("no IRQ for system timer\n");
+ if (!irq) {
+ pr_err("no IRQ for system timer\n");
+ return -EINVAL;
+ }
pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
/* Clock the interrupt controller */
clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
rate = clk_get_rate(clk);
u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
@@ -410,7 +420,9 @@ static void __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_RGPT1);
/* Set up the IRQ handler */
- setup_irq(irq, &u300_timer_irq);
+ ret = setup_irq(irq, &u300_timer_irq);
+ if (ret)
+ return ret;
/* Reset the General Purpose timer 2 */
writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
@@ -428,9 +440,12 @@ static void __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_EGPT2);
/* Use general purpose timer 2 as clock source */
- if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
- "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
+ ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
+ "GPT2", rate, 300, 32, clocksource_mmio_readl_up);
+ if (ret) {
pr_err("timer: failed to initialize U300 clock source\n");
+ return ret;
+ }
/* Configure and register the clockevent */
clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
@@ -440,6 +455,7 @@ static void __init u300_timer_init_of(struct device_node *np)
* TODO: init and register the rest of the timers too, they can be
* used by hrtimers!
*/
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c
index 0a26d3d..220b490 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/versatile.c
@@ -25,16 +25,18 @@ static u64 notrace versatile_sys_24mhz_read(void)
return readl(versatile_sys_24mhz);
}
-static void __init versatile_sched_clock_init(struct device_node *node)
+static int __init versatile_sched_clock_init(struct device_node *node)
{
void __iomem *base = of_iomap(node, 0);
if (!base)
- return;
+ return -ENXIO;
versatile_sys_24mhz = base + SYS_24MHZ;
sched_clock_register(versatile_sys_24mhz_read, 32, 24000000);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
versatile_sched_clock_init);
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index a0e6c68..55d8d84 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -156,15 +156,18 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
return 0;
}
-static void __init pit_timer_init(struct device_node *np)
+static int __init pit_timer_init(struct device_node *np)
{
struct clk *pit_clk;
void __iomem *timer_base;
unsigned long clk_rate;
- int irq;
+ int irq, ret;
timer_base = of_iomap(np, 0);
- BUG_ON(!timer_base);
+ if (!timer_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
/*
* PIT0 and PIT1 can be chained to build a 64-bit timer,
@@ -175,12 +178,16 @@ static void __init pit_timer_init(struct device_node *np)
clkevt_base = timer_base + PITn_OFFSET(3);
irq = irq_of_parse_and_map(np, 0);
- BUG_ON(irq <= 0);
+ if (irq <= 0)
+ return -EINVAL;
pit_clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(pit_clk));
+ if (IS_ERR(pit_clk))
+ return PTR_ERR(pit_clk);
- BUG_ON(clk_prepare_enable(pit_clk));
+ ret = clk_prepare_enable(pit_clk);
+ if (ret)
+ return ret;
clk_rate = clk_get_rate(pit_clk);
cycle_per_jiffy = clk_rate / (HZ);
@@ -188,8 +195,10 @@ static void __init pit_timer_init(struct device_node *np)
/* enable the pit module */
__raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
- BUG_ON(pit_clocksource_init(clk_rate));
+ ret = pit_clocksource_init(clk_rate);
+ if (ret)
+ return ret;
- pit_clockevent_init(clk_rate, irq);
+ return pit_clockevent_init(clk_rate, irq);
}
CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index ddb4092..b150694 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -121,38 +121,48 @@ static struct irqaction irq = {
.dev_id = &clockevent,
};
-static void __init vt8500_timer_init(struct device_node *np)
+static int __init vt8500_timer_init(struct device_node *np)
{
- int timer_irq;
+ int timer_irq, ret;
regbase = of_iomap(np, 0);
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
- return;
+ return -ENXIO;
}
+
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
- return;
+ return -EINVAL;
}
writel(1, regbase + TIMER_CTRL_VAL);
writel(0xf, regbase + TIMER_STATUS_VAL);
writel(~0, regbase + TIMER_MATCH_VAL);
- if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
+ ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ);
+ if (ret) {
pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
- __func__, clocksource.name);
+ __func__, clocksource.name);
+ return ret;
+ }
clockevent.cpumask = cpumask_of(0);
- if (setup_irq(timer_irq, &irq))
+ ret = setup_irq(timer_irq, &irq);
+ if (ret) {
pr_err("%s: setup_irq failed for %s\n", __func__,
clockevent.name);
+ return ret;
+ }
+
clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
MIN_OSCR_DELTA * 2, 0xf0000000);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index ceaa613..9a53f5e 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -210,9 +210,9 @@ error_free:
return ret;
}
-static void __init zevio_timer_init(struct device_node *node)
+static int __init zevio_timer_init(struct device_node *node)
{
- BUG_ON(zevio_timer_add(node));
+ return zevio_timer_add(node);
}
CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 54c4536..6bd715b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -530,8 +530,7 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
else
timer_interval = GPSTATE_TIMER_INTERVAL;
- mod_timer_pinned(&gpstates->timer, jiffies +
- msecs_to_jiffies(timer_interval));
+ mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
}
/**
@@ -699,7 +698,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = gpstates;
/* initialize timer */
- init_timer_deferrable(&gpstates->timer);
+ init_timer_pinned_deferrable(&gpstates->timer);
gpstates->timer.data = (unsigned long)policy;
gpstates->timer.function = gpstate_timer_handler;
gpstates->timer.expires = jiffies +
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 03ddf0e..684087d 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_clock_disable(host);
setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
(unsigned long)host);
- /* It is not important when it times out, it just needs to timeout. */
- set_timer_slack(&host->timeout_timer, HZ);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 922a443..4ef605a 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
{
if (!info->egress_timer_scheduled) {
- mod_timer_pinned(&info->egress_timer, jiffies + 1);
+ mod_timer(&info->egress_timer, jiffies + 1);
info->egress_timer_scheduled = true;
}
}
@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr)
BUG();
/* Initialize the egress timer. */
- init_timer(&info->egress_timer);
+ init_timer_pinned(&info->egress_timer);
info->egress_timer.data = (long)info;
info->egress_timer.function = tile_net_handle_egress_timer;
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
index 45f6ebf..e90b3f3 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct work_struct *work)
bq27xxx_battery_update(di);
- if (poll_interval > 0) {
- /* The timer does not have to be accurate. */
- set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+ if (poll_interval > 0)
schedule_delayed_work(&di->work, poll_interval * HZ);
- }
}
/*
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 9325262..25ccef2 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long ignored)
if (channel >= 0)
fetch_data(channel);
- mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
+ mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
}
static void add_poll_timer(struct timer_list *poll_timer)
{
- setup_timer(poll_timer, dashtty_timer, 0);
+ setup_pinned_timer(poll_timer, dashtty_timer, 0);
poll_timer->expires = jiffies + DA_TTY_POLL;
/*
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index a119176..234123b 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
mips_ejtag_fdc_handle(priv);
if (!priv->removing)
- mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
+ mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
}
/* TTY Port operations */
@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
raw_spin_unlock_irq(&priv->lock);
} else {
/* If we didn't get an usable IRQ, poll instead */
- setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+ setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
(unsigned long)priv);
priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
/*
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 0449235..1700908 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci)
setup_timer(&ohci->io_watchdog, io_watchdog_func,
(unsigned long) ohci);
- set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 9da9832..01d96c9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
- set_timer_slack(&xhci->comp_mode_recovery_timer,
- msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
add_timer(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance mode recovery timer initialized");
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 053818d..9ae4abb 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -390,6 +390,11 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
clockid != CLOCK_BOOTTIME_ALARM))
return -EINVAL;
+ if (!capable(CAP_WAKE_ALARM) &&
+ (clockid == CLOCK_REALTIME_ALARM ||
+ clockid == CLOCK_BOOTTIME_ALARM))
+ return -EPERM;
+
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -433,6 +438,11 @@ static int do_timerfd_settime(int ufd, int flags,
return ret;
ctx = f.file->private_data;
+ if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
+ fdput(f);
+ return -EPERM;
+ }
+
timerfd_setup_cancel(ctx, flags);
/*
diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h
index 1f8a1ca..7654d71 100644
--- a/include/clocksource/timer-sp804.h
+++ b/include/clocksource/timer-sp804.h
@@ -3,10 +3,10 @@
struct clk;
-void __sp804_clocksource_and_sched_clock_init(void __iomem *,
- const char *, struct clk *, int);
-void __sp804_clockevents_init(void __iomem *, unsigned int,
- struct clk *, const char *);
+int __sp804_clocksource_and_sched_clock_init(void __iomem *,
+ const char *, struct clk *, int);
+int __sp804_clockevents_init(void __iomem *, unsigned int,
+ struct clk *, const char *);
void sp804_timer_disable(void __iomem *);
static inline void sp804_clocksource_init(void __iomem *base, const char *name)
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 52f3b7d..9d80312 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -26,10 +26,10 @@ enum alarmtimer_restart {
* struct alarm - Alarm timer structure
* @node: timerqueue node for adding to the event list this value
* also includes the expiration time.
- * @period: Period for recuring alarms
+ * @timer: hrtimer used to schedule events while running
* @function: Function pointer to be executed when the timer fires.
- * @type: Alarm type (BOOTTIME/REALTIME)
- * @enabled: Flag that represents if the alarm is set to fire or not
+ * @type: Alarm type (BOOTTIME/REALTIME).
+ * @state: Flag that represents if the alarm is set to fire or not.
* @data: Internal data value.
*/
struct alarm {
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0df4a51..834179f 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -461,6 +461,10 @@ static inline struct clk *clk_get_parent(struct clk *clk)
return NULL;
}
+static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+{
+ return NULL;
+}
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 44a1aff..0839818 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -244,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *,
extern int clocksource_i8253_init(void);
#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
- OF_DECLARE_1(clksrc, name, compat, fn)
+ OF_DECLARE_1_RET(clksrc, name, compat, fn)
#ifdef CONFIG_CLKSRC_PROBE
extern void clocksource_probe(void);
diff --git a/include/linux/list.h b/include/linux/list.h
index 5356f4d..5183138 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hlist_node *h)
}
/*
+ * Check whether the node is the only node of the head without
+ * accessing head:
+ */
+static inline bool
+hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
+{
+ return !n->next && n->pprev == &h->first;
+}
+
+/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
diff --git a/include/linux/of.h b/include/linux/of.h
index 74eb28c..15c43f0 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1009,10 +1009,13 @@ static inline int of_get_available_child_count(const struct device_node *np)
#endif
typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
+typedef int (*of_init_fn_1_ret)(struct device_node *);
typedef void (*of_init_fn_1)(struct device_node *);
#define OF_DECLARE_1(table, name, compat, fn) \
_OF_DECLARE(table, name, compat, fn, of_init_fn_1)
+#define OF_DECLARE_1_RET(table, name, compat, fn) \
+ _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret)
#define OF_DECLARE_2(table, name, compat, fn) \
_OF_DECLARE(table, name, compat, fn, of_init_fn_2)
diff --git a/include/linux/time.h b/include/linux/time.h
index 297f09f..4cea09d 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -205,7 +205,20 @@ struct tm {
int tm_yday;
};
-void time_to_tm(time_t totalsecs, int offset, struct tm *result);
+void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
+
+/**
+ * time_to_tm - converts the calendar time to local broken-down time
+ *
+ * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
+ * Coordinated Universal Time (UTC).
+ * @offset offset seconds adding to totalsecs.
+ * @result pointer to struct tm variable to receive broken-down time
+ */
+static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
+{
+ time64_to_tm(totalsecs, offset, result);
+}
/**
* timespec_to_ns - Convert timespec to nanoseconds
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 20ac746..4419506 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -19,7 +19,6 @@ struct timer_list {
void (*function)(unsigned long);
unsigned long data;
u32 flags;
- int slack;
#ifdef CONFIG_TIMER_STATS
int start_pid;
@@ -58,11 +57,14 @@ struct timer_list {
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*/
-#define TIMER_CPUMASK 0x0007FFFF
-#define TIMER_MIGRATING 0x00080000
+#define TIMER_CPUMASK 0x0003FFFF
+#define TIMER_MIGRATING 0x00040000
#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
-#define TIMER_DEFERRABLE 0x00100000
+#define TIMER_DEFERRABLE 0x00080000
+#define TIMER_PINNED 0x00100000
#define TIMER_IRQSAFE 0x00200000
+#define TIMER_ARRAYSHIFT 22
+#define TIMER_ARRAYMASK 0xFFC00000
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
.entry = { .next = TIMER_ENTRY_STATIC }, \
@@ -70,7 +72,6 @@ struct timer_list {
.expires = (_expires), \
.data = (_data), \
.flags = (_flags), \
- .slack = -1, \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
}
@@ -78,9 +79,15 @@ struct timer_list {
#define TIMER_INITIALIZER(_function, _expires, _data) \
__TIMER_INITIALIZER((_function), (_expires), (_data), 0)
+#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \
+ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED)
+
#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \
__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
+#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \
+ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED)
+
#define DEFINE_TIMER(_name, _function, _expires, _data) \
struct timer_list _name = \
TIMER_INITIALIZER(_function, _expires, _data)
@@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
#define init_timer(timer) \
__init_timer((timer), 0)
+#define init_timer_pinned(timer) \
+ __init_timer((timer), TIMER_PINNED)
#define init_timer_deferrable(timer) \
__init_timer((timer), TIMER_DEFERRABLE)
+#define init_timer_pinned_deferrable(timer) \
+ __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED)
#define init_timer_on_stack(timer) \
__init_timer_on_stack((timer), 0)
@@ -145,12 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
#define setup_timer(timer, fn, data) \
__setup_timer((timer), (fn), (data), 0)
+#define setup_pinned_timer(timer, fn, data) \
+ __setup_timer((timer), (fn), (data), TIMER_PINNED)
#define setup_deferrable_timer(timer, fn, data) \
__setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer(timer, fn, data) \
+ __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
#define setup_timer_on_stack(timer, fn, data) \
__setup_timer_on_stack((timer), (fn), (data), 0)
+#define setup_pinned_timer_on_stack(timer, fn, data) \
+ __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
#define setup_deferrable_timer_on_stack(timer, fn, data) \
__setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
+ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
/**
* timer_pending - is a timer pending?
@@ -171,12 +190,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu);
extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
-extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
-
-extern void set_timer_slack(struct timer_list *time, int slack_hz);
-#define TIMER_NOT_PINNED 0
-#define TIMER_PINNED 1
/*
* The jiffies value which is added to now, when there is no timer
* in the timer wheel:
diff --git a/kernel/signal.c b/kernel/signal.c
index 96e9bc4..af21afc 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2751,23 +2751,18 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
* @ts: upper bound on process time suspension
*/
int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
- const struct timespec *ts)
+ const struct timespec *ts)
{
+ ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
struct task_struct *tsk = current;
- long timeout = MAX_SCHEDULE_TIMEOUT;
sigset_t mask = *which;
- int sig;
+ int sig, ret = 0;
if (ts) {
if (!timespec_valid(ts))
return -EINVAL;
- timeout = timespec_to_jiffies(ts);
- /*
- * We can be close to the next tick, add another one
- * to ensure we will wait at least the time asked for.
- */
- if (ts->tv_sec || ts->tv_nsec)
- timeout++;
+ timeout = timespec_to_ktime(*ts);
+ to = &timeout;
}
/*
@@ -2778,7 +2773,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
spin_lock_irq(&tsk->sighand->siglock);
sig = dequeue_signal(tsk, &mask, info);
- if (!sig && timeout) {
+ if (!sig && timeout.tv64) {
/*
* None ready, temporarily unblock those we're interested
* while we are sleeping in so that we'll be awakened when
@@ -2790,8 +2785,9 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
- timeout = freezable_schedule_timeout_interruptible(timeout);
-
+ __set_current_state(TASK_INTERRUPTIBLE);
+ ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
+ HRTIMER_MODE_REL);
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
sigemptyset(&tsk->real_blocked);
@@ -2801,7 +2797,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
if (sig)
return sig;
- return timeout ? -EINTR : -EAGAIN;
+ return ret ? -EINTR : -EAGAIN;
}
/**
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index e840ed8..c3aad68 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -30,7 +30,6 @@
* struct alarm_base - Alarm timer bases
* @lock: Lock for syncrhonized access to the base
* @timerqueue: Timerqueue head managing the list of events
- * @timer: hrtimer used to schedule events while running
* @gettime: Function to read the time correlating to the base
* @base_clockid: clockid for the base
*/
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index a9b76a4..2c5bc77 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -645,7 +645,7 @@ void tick_cleanup_dead_cpu(int cpu)
#endif
#ifdef CONFIG_SYSFS
-struct bus_type clockevents_subsys = {
+static struct bus_type clockevents_subsys = {
.name = "clockevents",
.dev_name = "clockevent",
};
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 56ece14..6a5a310 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -669,10 +669,12 @@ static void clocksource_enqueue(struct clocksource *cs)
struct list_head *entry = &clocksource_list;
struct clocksource *tmp;
- list_for_each_entry(tmp, &clocksource_list, list)
+ list_for_each_entry(tmp, &clocksource_list, list) {
/* Keep track of the place, where to insert */
- if (tmp->rating >= cs->rating)
- entry = &tmp->list;
+ if (tmp->rating < cs->rating)
+ break;
+ entry = &tmp->list;
+ }
list_add(&cs->list, entry);
}
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e99df0f..d13c9ae 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -177,7 +177,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
#endif
}
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#ifdef CONFIG_NO_HZ_COMMON
static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
int pinned)
diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c
index e622ba3..b0928ab 100644
--- a/kernel/time/test_udelay.c
+++ b/kernel/time/test_udelay.c
@@ -43,13 +43,13 @@ static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
int allowed_error_ns = usecs * 5;
for (i = 0; i < iters; ++i) {
- struct timespec ts1, ts2;
+ s64 kt1, kt2;
int time_passed;
- ktime_get_ts(&ts1);
+ kt1 = ktime_get_ns();
udelay(usecs);
- ktime_get_ts(&ts2);
- time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1);
+ kt2 = ktime_get_ns();
+ time_passed = kt2 - kt1;
if (i == 0 || time_passed < min)
min = time_passed;
@@ -87,11 +87,11 @@ static int udelay_test_show(struct seq_file *s, void *v)
if (usecs > 0 && iters > 0) {
return udelay_test_single(s, usecs, iters);
} else if (usecs == 0) {
- struct timespec ts;
+ struct timespec64 ts;
- ktime_get_ts(&ts);
- seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n",
- loops_per_jiffy, ts.tv_sec, ts.tv_nsec);
+ ktime_get_ts64(&ts);
+ seq_printf(s, "udelay() test (lpj=%ld kt=%lld.%09ld)\n",
+ loops_per_jiffy, (s64)ts.tv_sec, ts.tv_nsec);
seq_puts(s, "usage:\n");
seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n");
seq_puts(s, "cat " DEBUGFS_FILENAME "\n");
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 53d7184..690b797 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -75,6 +75,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
}
static struct clock_event_device ce_broadcast_hrtimer = {
+ .name = "bc_hrtimer",
.set_state_shutdown = bc_shutdown,
.set_next_ktime = bc_set_next,
.features = CLOCK_EVT_FEAT_ONESHOT |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 966a5a6..f738251 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -164,3 +164,4 @@ static inline void timers_update_migration(bool update_nohz) { }
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
+void timer_clear_idle(void);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 536ada8..204fdc8 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -31,7 +31,7 @@
#include <trace/events/timer.h>
/*
- * Per cpu nohz control structure
+ * Per-CPU nohz control structure
*/
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
@@ -61,7 +61,7 @@ static void tick_do_update_jiffies64(ktime_t now)
if (delta.tv64 < tick_period.tv64)
return;
- /* Reevalute with jiffies_lock held */
+ /* Reevaluate with jiffies_lock held */
write_seqlock(&jiffies_lock);
delta = ktime_sub(now, last_jiffies_update);
@@ -116,8 +116,8 @@ static void tick_sched_do_timer(ktime_t now)
#ifdef CONFIG_NO_HZ_COMMON
/*
* Check if the do_timer duty was dropped. We don't care about
- * concurrency: This happens only when the cpu in charge went
- * into a long sleep. If two cpus happen to assign themself to
+ * concurrency: This happens only when the CPU in charge went
+ * into a long sleep. If two CPUs happen to assign themselves to
* this duty, then the jiffies update is still serialized by
* jiffies_lock.
*/
@@ -349,7 +349,7 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi
/*
* Re-evaluate the need for the tick as we switch the current task.
* It might need the tick due to per task/process properties:
- * perf events, posix cpu timers, ...
+ * perf events, posix CPU timers, ...
*/
void __tick_nohz_task_switch(void)
{
@@ -509,8 +509,8 @@ int tick_nohz_tick_stopped(void)
*
* In case the sched_tick was stopped on this CPU, we have to check if jiffies
* must be updated. Otherwise an interrupt handler could use a stale jiffy
- * value. We do this unconditionally on any cpu, as we don't know whether the
- * cpu, which has the update task assigned is in a long sleep.
+ * value. We do this unconditionally on any CPU, as we don't know whether the
+ * CPU, which has the update task assigned is in a long sleep.
*/
static void tick_nohz_update_jiffies(ktime_t now)
{
@@ -526,7 +526,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
}
/*
- * Updates the per cpu time idle statistics counters
+ * Updates the per-CPU time idle statistics counters
*/
static void
update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
@@ -566,12 +566,12 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
}
/**
- * get_cpu_idle_time_us - get the total idle time of a cpu
+ * get_cpu_idle_time_us - get the total idle time of a CPU
* @cpu: CPU number to query
* @last_update_time: variable to store update time in. Do not update
* counters if NULL.
*
- * Return the cummulative idle time (since boot) for a given
+ * Return the cumulative idle time (since boot) for a given
* CPU, in microseconds.
*
* This time is measured via accounting rather than sampling,
@@ -607,12 +607,12 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
/**
- * get_cpu_iowait_time_us - get the total iowait time of a cpu
+ * get_cpu_iowait_time_us - get the total iowait time of a CPU
* @cpu: CPU number to query
* @last_update_time: variable to store update time in. Do not update
* counters if NULL.
*
- * Return the cummulative iowait time (since boot) for a given
+ * Return the cumulative iowait time (since boot) for a given
* CPU, in microseconds.
*
* This time is measured via accounting rather than sampling,
@@ -700,6 +700,12 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
delta = next_tick - basemono;
if (delta <= (u64)TICK_NSEC) {
tick.tv64 = 0;
+
+ /*
+ * Tell the timer code that the base is not idle, i.e. undo
+ * the effect of get_next_timer_interrupt():
+ */
+ timer_clear_idle();
/*
* We've not stopped the tick yet, and there's a timer in the
* next period, so no point in stopping it either, bail.
@@ -726,14 +732,14 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
}
/*
- * If this cpu is the one which updates jiffies, then give up
- * the assignment and let it be taken by the cpu which runs
- * the tick timer next, which might be this cpu as well. If we
+ * If this CPU is the one which updates jiffies, then give up
+ * the assignment and let it be taken by the CPU which runs
+ * the tick timer next, which might be this CPU as well. If we
* don't drop this here the jiffies might be stale and
* do_timer() never invoked. Keep track of the fact that it
- * was the one which had the do_timer() duty last. If this cpu
+ * was the one which had the do_timer() duty last. If this CPU
* is the one which had the do_timer() duty last, we limit the
- * sleep time to the timekeeping max_deferement value.
+ * sleep time to the timekeeping max_deferment value.
* Otherwise we can sleep as long as we want.
*/
delta = timekeeping_max_deferment();
@@ -809,6 +815,12 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
tick_do_update_jiffies64(now);
cpu_load_update_nohz_stop();
+ /*
+ * Clear the timer idle flag, so we avoid IPIs on remote queueing and
+ * the clock forward checks in the enqueue path:
+ */
+ timer_clear_idle();
+
calc_load_exit_idle();
touch_softlockup_watchdog_sched();
/*
@@ -841,9 +853,9 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
{
/*
- * If this cpu is offline and it is the one which updates
+ * If this CPU is offline and it is the one which updates
* jiffies, then give up the assignment and let it be taken by
- * the cpu which runs the tick timer next. If we don't drop
+ * the CPU which runs the tick timer next. If we don't drop
* this here the jiffies might be stale and do_timer() never
* invoked.
*/
@@ -896,11 +908,10 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
ktime_t now, expires;
int cpu = smp_processor_id();
- now = tick_nohz_start_idle(ts);
-
if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped;
+ now = tick_nohz_start_idle(ts);
ts->idle_calls++;
expires = tick_nohz_stop_sched_tick(ts, now, cpu);
@@ -933,11 +944,11 @@ void tick_nohz_idle_enter(void)
WARN_ON_ONCE(irqs_disabled());
/*
- * Update the idle state in the scheduler domain hierarchy
- * when tick_nohz_stop_sched_tick() is called from the idle loop.
- * State will be updated to busy during the first busy tick after
- * exiting idle.
- */
+ * Update the idle state in the scheduler domain hierarchy
+ * when tick_nohz_stop_sched_tick() is called from the idle loop.
+ * State will be updated to busy during the first busy tick after
+ * exiting idle.
+ */
set_cpu_sd_state_idle();
local_irq_disable();
@@ -1092,35 +1103,6 @@ static void tick_nohz_switch_to_nohz(void)
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
-/*
- * When NOHZ is enabled and the tick is stopped, we need to kick the
- * tick timer from irq_enter() so that the jiffies update is kept
- * alive during long running softirqs. That's ugly as hell, but
- * correctness is key even if we need to fix the offending softirq in
- * the first place.
- *
- * Note, this is different to tick_nohz_restart. We just kick the
- * timer and do not touch the other magic bits which need to be done
- * when idle is left.
- */
-static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
-{
-#if 0
- /* Switch back to 2.6.27 behaviour */
- ktime_t delta;
-
- /*
- * Do not touch the tick device, when the next expiry is either
- * already reached or less/equal than the tick period.
- */
- delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
- if (delta.tv64 <= tick_period.tv64)
- return;
-
- tick_nohz_restart(ts, now);
-#endif
-}
-
static inline void tick_nohz_irq_enter(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
@@ -1131,10 +1113,8 @@ static inline void tick_nohz_irq_enter(void)
now = ktime_get();
if (ts->idle_active)
tick_nohz_stop_idle(ts, now);
- if (ts->tick_stopped) {
+ if (ts->tick_stopped)
tick_nohz_update_jiffies(now);
- tick_nohz_kick_tick(ts, now);
- }
}
#else
@@ -1211,7 +1191,7 @@ void tick_setup_sched_timer(void)
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function = tick_sched_timer;
- /* Get the next period (per cpu) */
+ /* Get the next period (per-CPU) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
/* Offset the tick to avert jiffies_lock contention. */
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
index 86628e7..7142580 100644
--- a/kernel/time/timeconv.c
+++ b/kernel/time/timeconv.c
@@ -67,20 +67,21 @@ static const unsigned short __mon_yday[2][13] = {
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
/**
- * time_to_tm - converts the calendar time to local broken-down time
+ * time64_to_tm - converts the calendar time to local broken-down time
*
* @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
* Coordinated Universal Time (UTC).
* @offset offset seconds adding to totalsecs.
* @result pointer to struct tm variable to receive broken-down time
*/
-void time_to_tm(time_t totalsecs, int offset, struct tm *result)
+void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
{
long days, rem, y;
+ int remainder;
const unsigned short *ip;
- days = totalsecs / SECS_PER_DAY;
- rem = totalsecs % SECS_PER_DAY;
+ days = div_s64_rem(totalsecs, SECS_PER_DAY, &remainder);
+ rem = remainder;
rem += offset;
while (rem < 0) {
rem += SECS_PER_DAY;
@@ -124,4 +125,4 @@ void time_to_tm(time_t totalsecs, int offset, struct tm *result)
result->tm_mon = y;
result->tm_mday = days + 1;
}
-EXPORT_SYMBOL(time_to_tm);
+EXPORT_SYMBOL(time64_to_tm);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 255e225..3b65746 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -480,10 +480,12 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
* users are removed, this can be killed.
*/
remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
- tk->tkr_mono.xtime_nsec -= remainder;
- tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
- tk->ntp_error += remainder << tk->ntp_error_shift;
- tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
+ if (remainder != 0) {
+ tk->tkr_mono.xtime_nsec -= remainder;
+ tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
+ tk->ntp_error += remainder << tk->ntp_error_shift;
+ tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
+ }
}
#else
#define old_vsyscall_fixup(tk)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 3a95f97..cb9ab40 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -59,43 +59,153 @@ __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
/*
- * per-CPU timer vector definitions:
+ * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
+ * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
+ * level has a different granularity.
+ *
+ * The level granularity is: LVL_CLK_DIV ^ lvl
+ * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
+ *
+ * The array level of a newly armed timer depends on the relative expiry
+ * time. The farther the expiry time is away the higher the array level and
+ * therefor the granularity becomes.
+ *
+ * Contrary to the original timer wheel implementation, which aims for 'exact'
+ * expiry of the timers, this implementation removes the need for recascading
+ * the timers into the lower array levels. The previous 'classic' timer wheel
+ * implementation of the kernel already violated the 'exact' expiry by adding
+ * slack to the expiry time to provide batched expiration. The granularity
+ * levels provide implicit batching.
+ *
+ * This is an optimization of the original timer wheel implementation for the
+ * majority of the timer wheel use cases: timeouts. The vast majority of
+ * timeout timers (networking, disk I/O ...) are canceled before expiry. If
+ * the timeout expires it indicates that normal operation is disturbed, so it
+ * does not matter much whether the timeout comes with a slight delay.
+ *
+ * The only exception to this are networking timers with a small expiry
+ * time. They rely on the granularity. Those fit into the first wheel level,
+ * which has HZ granularity.
+ *
+ * We don't have cascading anymore. timers with a expiry time above the
+ * capacity of the last wheel level are force expired at the maximum timeout
+ * value of the last wheel level. From data sampling we know that the maximum
+ * value observed is 5 days (network connection tracking), so this should not
+ * be an issue.
+ *
+ * The currently chosen array constants values are a good compromise between
+ * array size and granularity.
+ *
+ * This results in the following granularity and range levels:
+ *
+ * HZ 1000 steps
+ * Level Offset Granularity Range
+ * 0 0 1 ms 0 ms - 63 ms
+ * 1 64 8 ms 64 ms - 511 ms
+ * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
+ * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
+ * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
+ * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
+ * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
+ * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
+ * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
+ *
+ * HZ 300
+ * Level Offset Granularity Range
+ * 0 0 3 ms 0 ms - 210 ms
+ * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
+ * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
+ * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
+ * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
+ * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
+ * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
+ * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
+ * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
+ *
+ * HZ 250
+ * Level Offset Granularity Range
+ * 0 0 4 ms 0 ms - 255 ms
+ * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
+ * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
+ * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
+ * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
+ * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
+ * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
+ * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
+ * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
+ *
+ * HZ 100
+ * Level Offset Granularity Range
+ * 0 0 10 ms 0 ms - 630 ms
+ * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
+ * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
+ * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
+ * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
+ * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
+ * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
+ * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
*/
-#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
-#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
-#define TVN_SIZE (1 << TVN_BITS)
-#define TVR_SIZE (1 << TVR_BITS)
-#define TVN_MASK (TVN_SIZE - 1)
-#define TVR_MASK (TVR_SIZE - 1)
-#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
-
-struct tvec {
- struct hlist_head vec[TVN_SIZE];
-};
-struct tvec_root {
- struct hlist_head vec[TVR_SIZE];
-};
+/* Clock divisor for the next level */
+#define LVL_CLK_SHIFT 3
+#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
+#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
+#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
+#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
-struct tvec_base {
- spinlock_t lock;
- struct timer_list *running_timer;
- unsigned long timer_jiffies;
- unsigned long next_timer;
- unsigned long active_timers;
- unsigned long all_timers;
- int cpu;
- bool migration_enabled;
- bool nohz_active;
- struct tvec_root tv1;
- struct tvec tv2;
- struct tvec tv3;
- struct tvec tv4;
- struct tvec tv5;
-} ____cacheline_aligned;
+/*
+ * The time start value for each level to select the bucket at enqueue
+ * time.
+ */
+#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
+
+/* Size of each clock level */
+#define LVL_BITS 6
+#define LVL_SIZE (1UL << LVL_BITS)
+#define LVL_MASK (LVL_SIZE - 1)
+#define LVL_OFFS(n) ((n) * LVL_SIZE)
+
+/* Level depth */
+#if HZ > 100
+# define LVL_DEPTH 9
+# else
+# define LVL_DEPTH 8
+#endif
+
+/* The cutoff (max. capacity of the wheel) */
+#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
+#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
+
+/*
+ * The resulting wheel size. If NOHZ is configured we allocate two
+ * wheels so we have a separate storage for the deferrable timers.
+ */
+#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
+
+#ifdef CONFIG_NO_HZ_COMMON
+# define NR_BASES 2
+# define BASE_STD 0
+# define BASE_DEF 1
+#else
+# define NR_BASES 1
+# define BASE_STD 0
+# define BASE_DEF 0
+#endif
+struct timer_base {
+ spinlock_t lock;
+ struct timer_list *running_timer;
+ unsigned long clk;
+ unsigned long next_expiry;
+ unsigned int cpu;
+ bool migration_enabled;
+ bool nohz_active;
+ bool is_idle;
+ DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+ struct hlist_head vectors[WHEEL_SIZE];
+} ____cacheline_aligned;
-static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
+static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
@@ -106,15 +216,17 @@ void timers_update_migration(bool update_nohz)
unsigned int cpu;
/* Avoid the loop, if nothing to update */
- if (this_cpu_read(tvec_bases.migration_enabled) == on)
+ if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
return;
for_each_possible_cpu(cpu) {
- per_cpu(tvec_bases.migration_enabled, cpu) = on;
+ per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
+ per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
if (!update_nohz)
continue;
- per_cpu(tvec_bases.nohz_active, cpu) = true;
+ per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
+ per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
per_cpu(hrtimer_bases.nohz_active, cpu) = true;
}
}
@@ -133,20 +245,6 @@ int timer_migration_handler(struct ctl_table *table, int write,
mutex_unlock(&mutex);
return ret;
}
-
-static inline struct tvec_base *get_target_base(struct tvec_base *base,
- int pinned)
-{
- if (pinned || !base->migration_enabled)
- return this_cpu_ptr(&tvec_bases);
- return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
-}
-#else
-static inline struct tvec_base *get_target_base(struct tvec_base *base,
- int pinned)
-{
- return this_cpu_ptr(&tvec_bases);
-}
#endif
static unsigned long round_jiffies_common(unsigned long j, int cpu,
@@ -351,101 +449,126 @@ unsigned long round_jiffies_up_relative(unsigned long j)
}
EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
-/**
- * set_timer_slack - set the allowed slack for a timer
- * @timer: the timer to be modified
- * @slack_hz: the amount of time (in jiffies) allowed for rounding
- *
- * Set the amount of time, in jiffies, that a certain timer has
- * in terms of slack. By setting this value, the timer subsystem
- * will schedule the actual timer somewhere between
- * the time mod_timer() asks for, and that time plus the slack.
- *
- * By setting the slack to -1, a percentage of the delay is used
- * instead.
- */
-void set_timer_slack(struct timer_list *timer, int slack_hz)
+
+static inline unsigned int timer_get_idx(struct timer_list *timer)
{
- timer->slack = slack_hz;
+ return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
}
-EXPORT_SYMBOL_GPL(set_timer_slack);
-static void
-__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
{
- unsigned long expires = timer->expires;
- unsigned long idx = expires - base->timer_jiffies;
- struct hlist_head *vec;
+ timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
+ idx << TIMER_ARRAYSHIFT;
+}
- if (idx < TVR_SIZE) {
- int i = expires & TVR_MASK;
- vec = base->tv1.vec + i;
- } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
- int i = (expires >> TVR_BITS) & TVN_MASK;
- vec = base->tv2.vec + i;
- } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
- int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
- vec = base->tv3.vec + i;
- } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
- int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
- vec = base->tv4.vec + i;
- } else if ((signed long) idx < 0) {
- /*
- * Can happen if you add a timer with expires == jiffies,
- * or you set a timer to go off in the past
- */
- vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+/*
+ * Helper function to calculate the array index for a given expiry
+ * time.
+ */
+static inline unsigned calc_index(unsigned expires, unsigned lvl)
+{
+ expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+ return LVL_OFFS(lvl) + (expires & LVL_MASK);
+}
+
+static int calc_wheel_index(unsigned long expires, unsigned long clk)
+{
+ unsigned long delta = expires - clk;
+ unsigned int idx;
+
+ if (delta < LVL_START(1)) {
+ idx = calc_index(expires, 0);
+ } else if (delta < LVL_START(2)) {
+ idx = calc_index(expires, 1);
+ } else if (delta < LVL_START(3)) {
+ idx = calc_index(expires, 2);
+ } else if (delta < LVL_START(4)) {
+ idx = calc_index(expires, 3);
+ } else if (delta < LVL_START(5)) {
+ idx = calc_index(expires, 4);
+ } else if (delta < LVL_START(6)) {
+ idx = calc_index(expires, 5);
+ } else if (delta < LVL_START(7)) {
+ idx = calc_index(expires, 6);
+ } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
+ idx = calc_index(expires, 7);
+ } else if ((long) delta < 0) {
+ idx = clk & LVL_MASK;
} else {
- int i;
- /* If the timeout is larger than MAX_TVAL (on 64-bit
- * architectures or with CONFIG_BASE_SMALL=1) then we
- * use the maximum timeout.
+ /*
+ * Force expire obscene large timeouts to expire at the
+ * capacity limit of the wheel.
*/
- if (idx > MAX_TVAL) {
- idx = MAX_TVAL;
- expires = idx + base->timer_jiffies;
- }
- i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
- vec = base->tv5.vec + i;
+ if (expires >= WHEEL_TIMEOUT_CUTOFF)
+ expires = WHEEL_TIMEOUT_MAX;
+
+ idx = calc_index(expires, LVL_DEPTH - 1);
}
+ return idx;
+}
- hlist_add_head(&timer->entry, vec);
+/*
+ * Enqueue the timer into the hash bucket, mark it pending in
+ * the bitmap and store the index in the timer flags.
+ */
+static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
+ unsigned int idx)
+{
+ hlist_add_head(&timer->entry, base->vectors + idx);
+ __set_bit(idx, base->pending_map);
+ timer_set_idx(timer, idx);
}
-static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+static void
+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
{
- /* Advance base->jiffies, if the base is empty */
- if (!base->all_timers++)
- base->timer_jiffies = jiffies;
+ unsigned int idx;
+
+ idx = calc_wheel_index(timer->expires, base->clk);
+ enqueue_timer(base, timer, idx);
+}
+
+static void
+trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
+{
+ if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+ return;
- __internal_add_timer(base, timer);
/*
- * Update base->active_timers and base->next_timer
+ * TODO: This wants some optimizing similar to the code below, but we
+ * will do that when we switch from push to pull for deferrable timers.
*/
- if (!(timer->flags & TIMER_DEFERRABLE)) {
- if (!base->active_timers++ ||
- time_before(timer->expires, base->next_timer))
- base->next_timer = timer->expires;
+ if (timer->flags & TIMER_DEFERRABLE) {
+ if (tick_nohz_full_cpu(base->cpu))
+ wake_up_nohz_cpu(base->cpu);
+ return;
}
/*
- * Check whether the other CPU is in dynticks mode and needs
- * to be triggered to reevaluate the timer wheel.
- * We are protected against the other CPU fiddling
- * with the timer by holding the timer base lock. This also
- * makes sure that a CPU on the way to stop its tick can not
- * evaluate the timer wheel.
- *
- * Spare the IPI for deferrable timers on idle targets though.
- * The next busy ticks will take care of it. Except full dynticks
- * require special care against races with idle_cpu(), lets deal
- * with that later.
+ * We might have to IPI the remote CPU if the base is idle and the
+ * timer is not deferrable. If the other CPU is on the way to idle
+ * then it can't set base->is_idle as we hold the base lock:
*/
- if (base->nohz_active) {
- if (!(timer->flags & TIMER_DEFERRABLE) ||
- tick_nohz_full_cpu(base->cpu))
- wake_up_nohz_cpu(base->cpu);
- }
+ if (!base->is_idle)
+ return;
+
+ /* Check whether this is the new first expiring timer: */
+ if (time_after_eq(timer->expires, base->next_expiry))
+ return;
+
+ /*
+ * Set the next expiry time and kick the CPU so it can reevaluate the
+ * wheel:
+ */
+ base->next_expiry = timer->expires;
+ wake_up_nohz_cpu(base->cpu);
+}
+
+static void
+internal_add_timer(struct timer_base *base, struct timer_list *timer)
+{
+ __internal_add_timer(base, timer);
+ trigger_dyntick_cpu(base, timer);
}
#ifdef CONFIG_TIMER_STATS
@@ -666,7 +789,6 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
{
timer->entry.pprev = NULL;
timer->flags = flags | raw_smp_processor_id();
- timer->slack = -1;
#ifdef CONFIG_TIMER_STATS
timer->start_site = NULL;
timer->start_pid = -1;
@@ -706,54 +828,125 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending)
entry->next = LIST_POISON2;
}
-static inline void
-detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
-{
- detach_timer(timer, true);
- if (!(timer->flags & TIMER_DEFERRABLE))
- base->active_timers--;
- base->all_timers--;
-}
-
-static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
+static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
bool clear_pending)
{
+ unsigned idx = timer_get_idx(timer);
+
if (!timer_pending(timer))
return 0;
+ if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
+ __clear_bit(idx, base->pending_map);
+
detach_timer(timer, clear_pending);
- if (!(timer->flags & TIMER_DEFERRABLE)) {
- base->active_timers--;
- if (timer->expires == base->next_timer)
- base->next_timer = base->timer_jiffies;
- }
- /* If this was the last timer, advance base->jiffies */
- if (!--base->all_timers)
- base->timer_jiffies = jiffies;
return 1;
}
+static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
+{
+ struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
+
+ /*
+ * If the timer is deferrable and nohz is active then we need to use
+ * the deferrable base.
+ */
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+ (tflags & TIMER_DEFERRABLE))
+ base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
+ return base;
+}
+
+static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
+{
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+ /*
+ * If the timer is deferrable and nohz is active then we need to use
+ * the deferrable base.
+ */
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+ (tflags & TIMER_DEFERRABLE))
+ base = this_cpu_ptr(&timer_bases[BASE_DEF]);
+ return base;
+}
+
+static inline struct timer_base *get_timer_base(u32 tflags)
+{
+ return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
+}
+
+#ifdef CONFIG_NO_HZ_COMMON
+static inline struct timer_base *
+__get_target_base(struct timer_base *base, unsigned tflags)
+{
+#ifdef CONFIG_SMP
+ if ((tflags & TIMER_PINNED) || !base->migration_enabled)
+ return get_timer_this_cpu_base(tflags);
+ return get_timer_cpu_base(tflags, get_nohz_timer_target());
+#else
+ return get_timer_this_cpu_base(tflags);
+#endif
+}
+
+static inline void forward_timer_base(struct timer_base *base)
+{
+ /*
+ * We only forward the base when it's idle and we have a delta between
+ * base clock and jiffies.
+ */
+ if (!base->is_idle || (long) (jiffies - base->clk) < 2)
+ return;
+
+ /*
+ * If the next expiry value is > jiffies, then we fast forward to
+ * jiffies otherwise we forward to the next expiry value.
+ */
+ if (time_after(base->next_expiry, jiffies))
+ base->clk = jiffies;
+ else
+ base->clk = base->next_expiry;
+}
+#else
+static inline struct timer_base *
+__get_target_base(struct timer_base *base, unsigned tflags)
+{
+ return get_timer_this_cpu_base(tflags);
+}
+
+static inline void forward_timer_base(struct timer_base *base) { }
+#endif
+
+static inline struct timer_base *
+get_target_base(struct timer_base *base, unsigned tflags)
+{
+ struct timer_base *target = __get_target_base(base, tflags);
+
+ forward_timer_base(target);
+ return target;
+}
+
/*
- * We are using hashed locking: holding per_cpu(tvec_bases).lock
- * means that all timers which are tied to this base via timer->base are
- * locked, and the base itself is locked too.
+ * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
+ * that all timers which are tied to this base are locked, and the base itself
+ * is locked too.
*
* So __run_timers/migrate_timers can safely modify all timers which could
- * be found on ->tvX lists.
+ * be found in the base->vectors array.
*
- * When the timer's base is locked and removed from the list, the
- * TIMER_MIGRATING flag is set, FIXME
+ * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
+ * to wait until the migration is done.
*/
-static struct tvec_base *lock_timer_base(struct timer_list *timer,
- unsigned long *flags)
+static struct timer_base *lock_timer_base(struct timer_list *timer,
+ unsigned long *flags)
__acquires(timer->base->lock)
{
for (;;) {
+ struct timer_base *base;
u32 tf = timer->flags;
- struct tvec_base *base;
if (!(tf & TIMER_MIGRATING)) {
- base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
+ base = get_timer_base(tf);
spin_lock_irqsave(&base->lock, *flags);
if (timer->flags == tf)
return base;
@@ -764,13 +957,41 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
}
static inline int
-__mod_timer(struct timer_list *timer, unsigned long expires,
- bool pending_only, int pinned)
+__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
{
- struct tvec_base *base, *new_base;
- unsigned long flags;
+ struct timer_base *base, *new_base;
+ unsigned int idx = UINT_MAX;
+ unsigned long clk = 0, flags;
int ret = 0;
+ /*
+ * This is a common optimization triggered by the networking code - if
+ * the timer is re-modified to have the same timeout or ends up in the
+ * same array bucket then just return:
+ */
+ if (timer_pending(timer)) {
+ if (timer->expires == expires)
+ return 1;
+ /*
+ * Take the current timer_jiffies of base, but without holding
+ * the lock!
+ */
+ base = get_timer_base(timer->flags);
+ clk = base->clk;
+
+ idx = calc_wheel_index(expires, clk);
+
+ /*
+ * Retrieve and compare the array index of the pending
+ * timer. If it matches set the expiry to the new value so a
+ * subsequent call will exit in the expires check above.
+ */
+ if (idx == timer_get_idx(timer)) {
+ timer->expires = expires;
+ return 1;
+ }
+ }
+
timer_stats_timer_set_start_info(timer);
BUG_ON(!timer->function);
@@ -782,15 +1003,15 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
debug_activate(timer, expires);
- new_base = get_target_base(base, pinned);
+ new_base = get_target_base(base, timer->flags);
if (base != new_base) {
/*
- * We are trying to schedule the timer on the local CPU.
+ * We are trying to schedule the timer on the new base.
* However we can't change timer's base while it is running,
* otherwise del_timer_sync() can't detect that the timer's
- * handler yet has not finished. This also guarantees that
- * the timer is serialized wrt itself.
+ * handler yet has not finished. This also guarantees that the
+ * timer is serialized wrt itself.
*/
if (likely(base->running_timer != timer)) {
/* See the comment in lock_timer_base() */
@@ -805,7 +1026,18 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
}
timer->expires = expires;
- internal_add_timer(base, timer);
+ /*
+ * If 'idx' was calculated above and the base time did not advance
+ * between calculating 'idx' and taking the lock, only enqueue_timer()
+ * and trigger_dyntick_cpu() is required. Otherwise we need to
+ * (re)calculate the wheel index via internal_add_timer().
+ */
+ if (idx != UINT_MAX && clk == base->clk) {
+ enqueue_timer(base, timer, idx);
+ trigger_dyntick_cpu(base, timer);
+ } else {
+ internal_add_timer(base, timer);
+ }
out_unlock:
spin_unlock_irqrestore(&base->lock, flags);
@@ -825,49 +1057,10 @@ out_unlock:
*/
int mod_timer_pending(struct timer_list *timer, unsigned long expires)
{
- return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
+ return __mod_timer(timer, expires, true);
}
EXPORT_SYMBOL(mod_timer_pending);
-/*
- * Decide where to put the timer while taking the slack into account
- *
- * Algorithm:
- * 1) calculate the maximum (absolute) time
- * 2) calculate the highest bit where the expires and new max are different
- * 3) use this bit to make a mask
- * 4) use the bitmask to round down the maximum time, so that all last
- * bits are zeros
- */
-static inline
-unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
-{
- unsigned long expires_limit, mask;
- int bit;
-
- if (timer->slack >= 0) {
- expires_limit = expires + timer->slack;
- } else {
- long delta = expires - jiffies;
-
- if (delta < 256)
- return expires;
-
- expires_limit = expires + delta / 256;
- }
- mask = expires ^ expires_limit;
- if (mask == 0)
- return expires;
-
- bit = __fls(mask);
-
- mask = (1UL << bit) - 1;
-
- expires_limit = expires_limit & ~(mask);
-
- return expires_limit;
-}
-
/**
* mod_timer - modify a timer's timeout
* @timer: the timer to be modified
@@ -890,49 +1083,11 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
*/
int mod_timer(struct timer_list *timer, unsigned long expires)
{
- expires = apply_slack(timer, expires);
-
- /*
- * This is a common optimization triggered by the
- * networking code - if the timer is re-modified
- * to be the same thing then just return:
- */
- if (timer_pending(timer) && timer->expires == expires)
- return 1;
-
- return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
+ return __mod_timer(timer, expires, false);
}
EXPORT_SYMBOL(mod_timer);
/**
- * mod_timer_pinned - modify a timer's timeout
- * @timer: the timer to be modified
- * @expires: new timeout in jiffies
- *
- * mod_timer_pinned() is a way to update the expire field of an
- * active timer (if the timer is inactive it will be activated)
- * and to ensure that the timer is scheduled on the current CPU.
- *
- * Note that this does not prevent the timer from being migrated
- * when the current CPU goes offline. If this is a problem for
- * you, use CPU-hotplug notifiers to handle it correctly, for
- * example, cancelling the timer when the corresponding CPU goes
- * offline.
- *
- * mod_timer_pinned(timer, expires) is equivalent to:
- *
- * del_timer(timer); timer->expires = expires; add_timer(timer);
- */
-int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
-{
- if (timer->expires == expires && timer_pending(timer))
- return 1;
-
- return __mod_timer(timer, expires, false, TIMER_PINNED);
-}
-EXPORT_SYMBOL(mod_timer_pinned);
-
-/**
* add_timer - start a timer
* @timer: the timer to be added
*
@@ -962,13 +1117,14 @@ EXPORT_SYMBOL(add_timer);
*/
void add_timer_on(struct timer_list *timer, int cpu)
{
- struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
- struct tvec_base *base;
+ struct timer_base *new_base, *base;
unsigned long flags;
timer_stats_timer_set_start_info(timer);
BUG_ON(timer_pending(timer) || !timer->function);
+ new_base = get_timer_cpu_base(timer->flags, cpu);
+
/*
* If @timer was on a different CPU, it should be migrated with the
* old base locked to prevent other operations proceeding with the
@@ -1004,7 +1160,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
*/
int del_timer(struct timer_list *timer)
{
- struct tvec_base *base;
+ struct timer_base *base;
unsigned long flags;
int ret = 0;
@@ -1030,7 +1186,7 @@ EXPORT_SYMBOL(del_timer);
*/
int try_to_del_timer_sync(struct timer_list *timer)
{
- struct tvec_base *base;
+ struct timer_base *base;
unsigned long flags;
int ret = -1;
@@ -1114,27 +1270,6 @@ int del_timer_sync(struct timer_list *timer)
EXPORT_SYMBOL(del_timer_sync);
#endif
-static int cascade(struct tvec_base *base, struct tvec *tv, int index)
-{
- /* cascade all the timers from tv up one level */
- struct timer_list *timer;
- struct hlist_node *tmp;
- struct hlist_head tv_list;
-
- hlist_move_list(tv->vec + index, &tv_list);
-
- /*
- * We are removing _all_ timers from the list, so we
- * don't have to detach them individually.
- */
- hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
- /* No accounting, while moving them */
- __internal_add_timer(base, timer);
- }
-
- return index;
-}
-
static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
unsigned long data)
{
@@ -1178,147 +1313,141 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
}
}
-#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
-
-/**
- * __run_timers - run all expired timers (if any) on this CPU.
- * @base: the timer vector to be processed.
- *
- * This function cascades all vectors and executes all expired timer
- * vectors.
- */
-static inline void __run_timers(struct tvec_base *base)
+static void expire_timers(struct timer_base *base, struct hlist_head *head)
{
- struct timer_list *timer;
+ while (!hlist_empty(head)) {
+ struct timer_list *timer;
+ void (*fn)(unsigned long);
+ unsigned long data;
- spin_lock_irq(&base->lock);
+ timer = hlist_entry(head->first, struct timer_list, entry);
+ timer_stats_account_timer(timer);
- while (time_after_eq(jiffies, base->timer_jiffies)) {
- struct hlist_head work_list;
- struct hlist_head *head = &work_list;
- int index;
+ base->running_timer = timer;
+ detach_timer(timer, true);
- if (!base->all_timers) {
- base->timer_jiffies = jiffies;
- break;
+ fn = timer->function;
+ data = timer->data;
+
+ if (timer->flags & TIMER_IRQSAFE) {
+ spin_unlock(&base->lock);
+ call_timer_fn(timer, fn, data);
+ spin_lock(&base->lock);
+ } else {
+ spin_unlock_irq(&base->lock);
+ call_timer_fn(timer, fn, data);
+ spin_lock_irq(&base->lock);
}
+ }
+}
- index = base->timer_jiffies & TVR_MASK;
+static int __collect_expired_timers(struct timer_base *base,
+ struct hlist_head *heads)
+{
+ unsigned long clk = base->clk;
+ struct hlist_head *vec;
+ int i, levels = 0;
+ unsigned int idx;
- /*
- * Cascade timers:
- */
- if (!index &&
- (!cascade(base, &base->tv2, INDEX(0))) &&
- (!cascade(base, &base->tv3, INDEX(1))) &&
- !cascade(base, &base->tv4, INDEX(2)))
- cascade(base, &base->tv5, INDEX(3));
- ++base->timer_jiffies;
- hlist_move_list(base->tv1.vec + index, head);
- while (!hlist_empty(head)) {
- void (*fn)(unsigned long);
- unsigned long data;
- bool irqsafe;
-
- timer = hlist_entry(head->first, struct timer_list, entry);
- fn = timer->function;
- data = timer->data;
- irqsafe = timer->flags & TIMER_IRQSAFE;
-
- timer_stats_account_timer(timer);
-
- base->running_timer = timer;
- detach_expired_timer(timer, base);
-
- if (irqsafe) {
- spin_unlock(&base->lock);
- call_timer_fn(timer, fn, data);
- spin_lock(&base->lock);
- } else {
- spin_unlock_irq(&base->lock);
- call_timer_fn(timer, fn, data);
- spin_lock_irq(&base->lock);
- }
+ for (i = 0; i < LVL_DEPTH; i++) {
+ idx = (clk & LVL_MASK) + i * LVL_SIZE;
+
+ if (__test_and_clear_bit(idx, base->pending_map)) {
+ vec = base->vectors + idx;
+ hlist_move_list(vec, heads++);
+ levels++;
}
+ /* Is it time to look at the next level? */
+ if (clk & LVL_CLK_MASK)
+ break;
+ /* Shift clock for the next level granularity */
+ clk >>= LVL_CLK_SHIFT;
}
- base->running_timer = NULL;
- spin_unlock_irq(&base->lock);
+ return levels;
}
#ifdef CONFIG_NO_HZ_COMMON
/*
- * Find out when the next timer event is due to happen. This
- * is used on S/390 to stop all activity when a CPU is idle.
- * This function needs to be called with interrupts disabled.
+ * Find the next pending bucket of a level. Search from level start (@offset)
+ * + @clk upwards and if nothing there, search from start of the level
+ * (@offset) up to @offset + clk.
*/
-static unsigned long __next_timer_interrupt(struct tvec_base *base)
-{
- unsigned long timer_jiffies = base->timer_jiffies;
- unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
- int index, slot, array, found = 0;
- struct timer_list *nte;
- struct tvec *varray[4];
-
- /* Look for timer events in tv1. */
- index = slot = timer_jiffies & TVR_MASK;
- do {
- hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
- if (nte->flags & TIMER_DEFERRABLE)
- continue;
-
- found = 1;
- expires = nte->expires;
- /* Look at the cascade bucket(s)? */
- if (!index || slot < index)
- goto cascade;
- return expires;
+static int next_pending_bucket(struct timer_base *base, unsigned offset,
+ unsigned clk)
+{
+ unsigned pos, start = offset + clk;
+ unsigned end = offset + LVL_SIZE;
+
+ pos = find_next_bit(base->pending_map, end, start);
+ if (pos < end)
+ return pos - start;
+
+ pos = find_next_bit(base->pending_map, start, offset);
+ return pos < start ? pos + LVL_SIZE - start : -1;
+}
+
+/*
+ * Search the first expiring timer in the various clock levels. Caller must
+ * hold base->lock.
+ */
+static unsigned long __next_timer_interrupt(struct timer_base *base)
+{
+ unsigned long clk, next, adj;
+ unsigned lvl, offset = 0;
+
+ next = base->clk + NEXT_TIMER_MAX_DELTA;
+ clk = base->clk;
+ for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
+ int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
+
+ if (pos >= 0) {
+ unsigned long tmp = clk + (unsigned long) pos;
+
+ tmp <<= LVL_SHIFT(lvl);
+ if (time_before(tmp, next))
+ next = tmp;
}
- slot = (slot + 1) & TVR_MASK;
- } while (slot != index);
-
-cascade:
- /* Calculate the next cascade event */
- if (index)
- timer_jiffies += TVR_SIZE - index;
- timer_jiffies >>= TVR_BITS;
-
- /* Check tv2-tv5. */
- varray[0] = &base->tv2;
- varray[1] = &base->tv3;
- varray[2] = &base->tv4;
- varray[3] = &base->tv5;
-
- for (array = 0; array < 4; array++) {
- struct tvec *varp = varray[array];
-
- index = slot = timer_jiffies & TVN_MASK;
- do {
- hlist_for_each_entry(nte, varp->vec + slot, entry) {
- if (nte->flags & TIMER_DEFERRABLE)
- continue;
-
- found = 1;
- if (time_before(nte->expires, expires))
- expires = nte->expires;
- }
- /*
- * Do we still search for the first timer or are
- * we looking up the cascade buckets ?
- */
- if (found) {
- /* Look at the cascade bucket(s)? */
- if (!index || slot < index)
- break;
- return expires;
- }
- slot = (slot + 1) & TVN_MASK;
- } while (slot != index);
-
- if (index)
- timer_jiffies += TVN_SIZE - index;
- timer_jiffies >>= TVN_BITS;
+ /*
+ * Clock for the next level. If the current level clock lower
+ * bits are zero, we look at the next level as is. If not we
+ * need to advance it by one because that's going to be the
+ * next expiring bucket in that level. base->clk is the next
+ * expiring jiffie. So in case of:
+ *
+ * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+ * 0 0 0 0 0 0
+ *
+ * we have to look at all levels @index 0. With
+ *
+ * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+ * 0 0 0 0 0 2
+ *
+ * LVL0 has the next expiring bucket @index 2. The upper
+ * levels have the next expiring bucket @index 1.
+ *
+ * In case that the propagation wraps the next level the same
+ * rules apply:
+ *
+ * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+ * 0 0 0 0 F 2
+ *
+ * So after looking at LVL0 we get:
+ *
+ * LVL5 LVL4 LVL3 LVL2 LVL1
+ * 0 0 0 1 0
+ *
+ * So no propagation from LVL1 to LVL2 because that happened
+ * with the add already, but then we need to propagate further
+ * from LVL2 to LVL3.
+ *
+ * So the simple check whether the lower bits of the current
+ * level are 0 or not is sufficient for all cases.
+ */
+ adj = clk & LVL_CLK_MASK ? 1 : 0;
+ clk >>= LVL_CLK_SHIFT;
+ clk += adj;
}
- return expires;
+ return next;
}
/*
@@ -1364,7 +1493,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
*/
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
{
- struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
u64 expires = KTIME_MAX;
unsigned long nextevt;
@@ -1376,19 +1505,80 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
return expires;
spin_lock(&base->lock);
- if (base->active_timers) {
- if (time_before_eq(base->next_timer, base->timer_jiffies))
- base->next_timer = __next_timer_interrupt(base);
- nextevt = base->next_timer;
- if (time_before_eq(nextevt, basej))
- expires = basem;
- else
- expires = basem + (nextevt - basej) * TICK_NSEC;
+ nextevt = __next_timer_interrupt(base);
+ base->next_expiry = nextevt;
+ /*
+ * We have a fresh next event. Check whether we can forward the base:
+ */
+ if (time_after(nextevt, jiffies))
+ base->clk = jiffies;
+ else if (time_after(nextevt, base->clk))
+ base->clk = nextevt;
+
+ if (time_before_eq(nextevt, basej)) {
+ expires = basem;
+ base->is_idle = false;
+ } else {
+ expires = basem + (nextevt - basej) * TICK_NSEC;
+ /*
+ * If we expect to sleep more than a tick, mark the base idle:
+ */
+ if ((expires - basem) > TICK_NSEC)
+ base->is_idle = true;
}
spin_unlock(&base->lock);
return cmp_next_hrtimer_event(basem, expires);
}
+
+/**
+ * timer_clear_idle - Clear the idle state of the timer base
+ *
+ * Called with interrupts disabled
+ */
+void timer_clear_idle(void)
+{
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+ /*
+ * We do this unlocked. The worst outcome is a remote enqueue sending
+ * a pointless IPI, but taking the lock would just make the window for
+ * sending the IPI a few instructions smaller for the cost of taking
+ * the lock in the exit from idle path.
+ */
+ base->is_idle = false;
+}
+
+static int collect_expired_timers(struct timer_base *base,
+ struct hlist_head *heads)
+{
+ /*
+ * NOHZ optimization. After a long idle sleep we need to forward the
+ * base to current jiffies. Avoid a loop by searching the bitfield for
+ * the next expiring timer.
+ */
+ if ((long)(jiffies - base->clk) > 2) {
+ unsigned long next = __next_timer_interrupt(base);
+
+ /*
+ * If the next timer is ahead of time forward to current
+ * jiffies, otherwise forward to the next expiry time:
+ */
+ if (time_after(next, jiffies)) {
+ /* The call site will increment clock! */
+ base->clk = jiffies - 1;
+ return 0;
+ }
+ base->clk = next;
+ }
+ return __collect_expired_timers(base, heads);
+}
+#else
+static inline int collect_expired_timers(struct timer_base *base,
+ struct hlist_head *heads)
+{
+ return __collect_expired_timers(base, heads);
+}
#endif
/*
@@ -1411,15 +1601,42 @@ void update_process_times(int user_tick)
run_posix_cpu_timers(p);
}
+/**
+ * __run_timers - run all expired timers (if any) on this CPU.
+ * @base: the timer vector to be processed.
+ */
+static inline void __run_timers(struct timer_base *base)
+{
+ struct hlist_head heads[LVL_DEPTH];
+ int levels;
+
+ if (!time_after_eq(jiffies, base->clk))
+ return;
+
+ spin_lock_irq(&base->lock);
+
+ while (time_after_eq(jiffies, base->clk)) {
+
+ levels = collect_expired_timers(base, heads);
+ base->clk++;
+
+ while (levels--)
+ expire_timers(base, heads + levels);
+ }
+ base->running_timer = NULL;
+ spin_unlock_irq(&base->lock);
+}
+
/*
* This function runs timers and the timer-tq in bottom half context.
*/
static void run_timer_softirq(struct softirq_action *h)
{
- struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
- if (time_after_eq(jiffies, base->timer_jiffies))
- __run_timers(base);
+ __run_timers(base);
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+ __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
}
/*
@@ -1427,7 +1644,18 @@ static void run_timer_softirq(struct softirq_action *h)
*/
void run_local_timers(void)
{
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
hrtimer_run_queues();
+ /* Raise the softirq only if required. */
+ if (time_before(jiffies, base->clk)) {
+ if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+ return;
+ /* CPU is awake, so check the deferrable base. */
+ base++;
+ if (time_before(jiffies, base->clk))
+ return;
+ }
raise_softirq(TIMER_SOFTIRQ);
}
@@ -1512,7 +1740,7 @@ signed long __sched schedule_timeout(signed long timeout)
expire = timeout + jiffies;
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
- __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
+ __mod_timer(&timer, expire, false);
schedule();
del_singleshot_timer_sync(&timer);
@@ -1563,14 +1791,13 @@ signed long __sched schedule_timeout_idle(signed long timeout)
EXPORT_SYMBOL(schedule_timeout_idle);
#ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
+static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
{
struct timer_list *timer;
int cpu = new_base->cpu;
while (!hlist_empty(head)) {
timer = hlist_entry(head->first, struct timer_list, entry);
- /* We ignore the accounting on the dying cpu */
detach_timer(timer, false);
timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
internal_add_timer(new_base, timer);
@@ -1579,37 +1806,31 @@ static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *he
static void migrate_timers(int cpu)
{
- struct tvec_base *old_base;
- struct tvec_base *new_base;
- int i;
+ struct timer_base *old_base;
+ struct timer_base *new_base;
+ int b, i;
BUG_ON(cpu_online(cpu));
- old_base = per_cpu_ptr(&tvec_bases, cpu);
- new_base = get_cpu_ptr(&tvec_bases);
- /*
- * The caller is globally serialized and nobody else
- * takes two locks at once, deadlock is not possible.
- */
- spin_lock_irq(&new_base->lock);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
- BUG_ON(old_base->running_timer);
-
- for (i = 0; i < TVR_SIZE; i++)
- migrate_timer_list(new_base, old_base->tv1.vec + i);
- for (i = 0; i < TVN_SIZE; i++) {
- migrate_timer_list(new_base, old_base->tv2.vec + i);
- migrate_timer_list(new_base, old_base->tv3.vec + i);
- migrate_timer_list(new_base, old_base->tv4.vec + i);
- migrate_timer_list(new_base, old_base->tv5.vec + i);
- }
- old_base->active_timers = 0;
- old_base->all_timers = 0;
+ for (b = 0; b < NR_BASES; b++) {
+ old_base = per_cpu_ptr(&timer_bases[b], cpu);
+ new_base = get_cpu_ptr(&timer_bases[b]);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+ spin_lock_irq(&new_base->lock);
+ spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+ BUG_ON(old_base->running_timer);
- spin_unlock(&old_base->lock);
- spin_unlock_irq(&new_base->lock);
- put_cpu_ptr(&tvec_bases);
+ for (i = 0; i < WHEEL_SIZE; i++)
+ migrate_timer_list(new_base, old_base->vectors + i);
+
+ spin_unlock(&old_base->lock);
+ spin_unlock_irq(&new_base->lock);
+ put_cpu_ptr(&timer_bases);
+ }
}
static int timer_cpu_notify(struct notifier_block *self,
@@ -1637,13 +1858,15 @@ static inline void timer_register_cpu_notifier(void) { }
static void __init init_timer_cpu(int cpu)
{
- struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
-
- base->cpu = cpu;
- spin_lock_init(&base->lock);
+ struct timer_base *base;
+ int i;
- base->timer_jiffies = jiffies;
- base->next_timer = base->timer_jiffies;
+ for (i = 0; i < NR_BASES; i++) {
+ base = per_cpu_ptr(&timer_bases[i], cpu);
+ base->cpu = cpu;
+ spin_lock_init(&base->lock);
+ base->clk = jiffies;
+ }
}
static void __init init_timer_cpus(void)
@@ -1702,9 +1925,15 @@ static void __sched do_usleep_range(unsigned long min, unsigned long max)
}
/**
- * usleep_range - Drop in replacement for udelay where wakeup is flexible
+ * usleep_range - Sleep for an approximate time
* @min: Minimum time in usecs to sleep
* @max: Maximum time in usecs to sleep
+ *
+ * In non-atomic context where the exact wakeup time is flexible, use
+ * usleep_range() instead of udelay(). The sleep improves responsiveness
+ * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
+ * power usage by allowing hrtimers to take advantage of an already-
+ * scheduled interrupt instead of scheduling a new one just for this sleep.
*/
void __sched usleep_range(unsigned long min, unsigned long max)
{
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 1adecb4..087204c 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -279,7 +279,7 @@ static void print_name_offset(struct seq_file *m, unsigned long addr)
static int tstats_show(struct seq_file *m, void *v)
{
- struct timespec period;
+ struct timespec64 period;
struct entry *entry;
unsigned long ms;
long events = 0;
@@ -295,11 +295,11 @@ static int tstats_show(struct seq_file *m, void *v)
time = ktime_sub(time_stop, time_start);
- period = ktime_to_timespec(time);
+ period = ktime_to_timespec64(time);
ms = period.tv_nsec / 1000000;
seq_puts(m, "Timer Stats Version: v0.3\n");
- seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
+ seq_printf(m, "Sample period: %ld.%03ld s\n", (long)period.tv_sec, ms);
if (atomic_read(&overflow_count))
seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
diff --git a/lib/random32.c b/lib/random32.c
index 510d1ce..69ed593 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -233,7 +233,6 @@ static void __prandom_timer(unsigned long dontcare)
static void __init __prandom_start_seed_timer(void)
{
- set_timer_slack(&seed_timer, HZ);
seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
add_timer(&seed_timer);
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index fa8c398..61a9dee 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -603,7 +603,7 @@ static void reqsk_timer_handler(unsigned long data)
if (req->num_timeout++ == 0)
atomic_dec(&queue->young);
timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
- mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
+ mod_timer(&req->rsk_timer, jiffies + timeo);
return;
}
drop:
@@ -617,8 +617,9 @@ static void reqsk_queue_hash_req(struct request_sock *req,
req->num_timeout = 0;
req->sk = NULL;
- setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
- mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
+ setup_pinned_timer(&req->rsk_timer, reqsk_timer_handler,
+ (unsigned long)req);
+ mod_timer(&req->rsk_timer, jiffies + timeout);
inet_ehash_insert(req_to_sk(req), NULL);
/* before letting lookups find us, make sure all req fields
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2065816..ddcd56c 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -188,7 +188,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
tw->tw_prot = sk->sk_prot_creator;
atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
twsk_net_set(tw, sock_net(sk));
- setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw);
+ setup_pinned_timer(&tw->tw_timer, tw_timer_handler,
+ (unsigned long)tw);
/*
* Because we use RCU lookups, we should not set tw_refcnt
* to a non null value before everything is setup for this
@@ -248,7 +249,7 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
tw->tw_kill = timeo <= 4*HZ;
if (!rearm) {
- BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
+ BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
atomic_inc(&tw->tw_dr->tw_count);
} else {
mod_timer_pending(&tw->tw_timer, jiffies + timeo);
OpenPOWER on IntegriCloud