summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/timers
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-01 13:12:05 +0900
committerPaul Mundt <lethal@linux-sh.org>2006-12-06 10:45:39 +0900
commite74b56800e78a10bc09b56a87831876a1d9d09ae (patch)
tree0468f7bd4324ffe8d6d7f00ffd2fa3376fcc65aa /arch/sh/kernel/timers
parentbca7c20764c83a44c7b8b0831089922d56a3a9a2 (diff)
downloadop-kernel-dev-e74b56800e78a10bc09b56a87831876a1d9d09ae.zip
op-kernel-dev-e74b56800e78a10bc09b56a87831876a1d9d09ae.tar.gz
sh: Turn off IRQs around get_timer_offset() calls.
Since all of the sys_timer sources currently do this on their own within the ->get_offset() path, it's more sensible to just have the caller take care of it when grabbing xtime_lock. Incidentally, this is more in line with what others (ie, ARM) are doing already. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/timers')
-rw-r--r--arch/sh/kernel/timers/timer-cmt.c9
-rw-r--r--arch/sh/kernel/timers/timer-mtu2.c10
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c9
3 files changed, 3 insertions, 25 deletions
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
index 24b0399..95581dc 100644
--- a/arch/sh/kernel/timers/timer-cmt.c
+++ b/arch/sh/kernel/timers/timer-cmt.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/seqlock.h>
#include <asm/timer.h>
#include <asm/rtc.h>
@@ -46,13 +45,9 @@
#error "Unknown CPU SUBTYPE"
#endif
-static DEFINE_SPINLOCK(cmt0_lock);
-
static unsigned long cmt_timer_get_offset(void)
{
int count;
- unsigned long flags;
-
static unsigned short count_p = 0xffff; /* for the first call after boot */
static unsigned long jiffies_p = 0;
@@ -61,7 +56,6 @@ static unsigned long cmt_timer_get_offset(void)
*/
unsigned long jiffies_t;
- spin_lock_irqsave(&cmt0_lock, flags);
/* timer count may underflow right here */
count = ctrl_inw(CMT_CMCOR_0);
count -= ctrl_inw(CMT_CMCNT_0);
@@ -88,7 +82,6 @@ static unsigned long cmt_timer_get_offset(void)
jiffies_p = jiffies_t;
count_p = count;
- spin_unlock_irqrestore(&cmt0_lock, flags);
count = ((LATCH-1) - count) * TICK_SIZE;
count = (count + LATCH/2) / LATCH;
@@ -122,7 +115,7 @@ static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id)
static struct irqaction cmt_irq = {
.name = "timer",
.handler = cmt_timer_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.mask = CPU_MASK_NONE,
};
diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c
index 92c98b5..201f0a6 100644
--- a/arch/sh/kernel/timers/timer-mtu2.c
+++ b/arch/sh/kernel/timers/timer-mtu2.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/seqlock.h>
#include <asm/timer.h>
#include <asm/io.h>
@@ -28,9 +27,6 @@
* However, we can implement channel cascade if we go the overflow route and
* get away with using 2 MTU2 channels as a 32-bit timer.
*/
-
-static DEFINE_SPINLOCK(mtu2_lock);
-
#define MTU2_TSTR 0xfffe4280
#define MTU2_TCR_1 0xfffe4380
#define MTU2_TMDR_1 0xfffe4381
@@ -55,8 +51,6 @@ static DEFINE_SPINLOCK(mtu2_lock);
static unsigned long mtu2_timer_get_offset(void)
{
int count;
- unsigned long flags;
-
static int count_p = 0x7fff; /* for the first call after boot */
static unsigned long jiffies_p = 0;
@@ -65,7 +59,6 @@ static unsigned long mtu2_timer_get_offset(void)
*/
unsigned long jiffies_t;
- spin_lock_irqsave(&mtu2_lock, flags);
/* timer count may underflow right here */
count = ctrl_inw(MTU2_TCNT_1); /* read the latched count */
@@ -90,7 +83,6 @@ static unsigned long mtu2_timer_get_offset(void)
jiffies_p = jiffies_t;
count_p = count;
- spin_unlock_irqrestore(&mtu2_lock, flags);
count = ((LATCH-1) - count) * TICK_SIZE;
count = (count + LATCH/2) / LATCH;
@@ -118,7 +110,7 @@ static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id)
static struct irqaction mtu2_irq = {
.name = "timer",
.handler = mtu2_timer_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.mask = CPU_MASK_NONE,
};
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 06a70db..b9ed8a3 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/seqlock.h>
#include <asm/timer.h>
#include <asm/rtc.h>
@@ -31,13 +30,9 @@
#define TMU0_TCR_CALIB 0x0000
-static DEFINE_SPINLOCK(tmu0_lock);
-
static unsigned long tmu_timer_get_offset(void)
{
int count;
- unsigned long flags;
-
static int count_p = 0x7fffffff; /* for the first call after boot */
static unsigned long jiffies_p = 0;
@@ -46,7 +41,6 @@ static unsigned long tmu_timer_get_offset(void)
*/
unsigned long jiffies_t;
- spin_lock_irqsave(&tmu0_lock, flags);
/* timer count may underflow right here */
count = ctrl_inl(TMU0_TCNT); /* read the latched count */
@@ -72,7 +66,6 @@ static unsigned long tmu_timer_get_offset(void)
jiffies_p = jiffies_t;
count_p = count;
- spin_unlock_irqrestore(&tmu0_lock, flags);
count = ((LATCH-1) - count) * TICK_SIZE;
count = (count + LATCH/2) / LATCH;
@@ -106,7 +99,7 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
static struct irqaction tmu_irq = {
.name = "timer",
.handler = tmu_timer_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.mask = CPU_MASK_NONE,
};
OpenPOWER on IntegriCloud