summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/cevt-r4k.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-14 22:24:51 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-10-14 22:24:51 +0100
commit7e69a8c4d06b7ecb874f571e82b715a9f79bc3c4 (patch)
tree0248fb8f7a3e445cc3c744252abeecabb9205c05 /arch/mips/kernel/cevt-r4k.c
parentb6825d2df55aa7d7341c715b577b73a6a03dc944 (diff)
parentd5120ae72a066b18f98e0c45ce73262f58030851 (diff)
downloadop-kernel-dev-7e69a8c4d06b7ecb874f571e82b715a9f79bc3c4.zip
op-kernel-dev-7e69a8c4d06b7ecb874f571e82b715a9f79bc3c4.tar.gz
Merge branch 's3c-move' into devel
Conflicts: arch/arm/mach-versatile/core.c
Diffstat (limited to 'arch/mips/kernel/cevt-r4k.c')
-rw-r--r--arch/mips/kernel/cevt-r4k.c173
1 files changed, 48 insertions, 125 deletions
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 24a2d90..4a4c59f 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,6 +12,14 @@
#include <asm/smtc_ipi.h>
#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+
+/*
+ * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
+ * of these routines with SMTC-specific variants.
+ */
+
+#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
unsigned int cnt;
int res;
-#ifdef CONFIG_MIPS_MT_SMTC
- {
- unsigned long flags, vpflags;
- local_irq_save(flags);
- vpflags = dvpe();
-#endif
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
-#ifdef CONFIG_MIPS_MT_SMTC
- evpe(vpflags);
- local_irq_restore(flags);
- }
-#endif
return res;
}
-static void mips_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+void mips_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
{
/* Nothing to do ... */
}
-static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
-static int cp0_timer_irq_installed;
+DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+int cp0_timer_irq_installed;
-/*
- * Timer ack for an R4k-compatible timer of a known frequency.
- */
-static void c0_timer_ack(void)
-{
- write_c0_compare(read_c0_compare());
-}
+#ifndef CONFIG_MIPS_MT_SMTC
-/*
- * Possibly handle a performance counter interrupt.
- * Return true if the timer interrupt should not be checked
- */
-static inline int handle_perf_irq(int r2)
-{
- /*
- * The performance counter overflow interrupt may be shared with the
- * timer interrupt (cp0_perfcount_irq < 0). If it is and a
- * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
- * and we can't reliably determine if a counter interrupt has also
- * happened (!r2) then don't check for a timer interrupt.
- */
- return (cp0_perfcount_irq < 0) &&
- perf_irq() == IRQ_HANDLED &&
- !r2;
-}
-
-static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2;
struct clock_event_device *cd;
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
* interrupt. Being the paranoiacs we are we check anyway.
*/
if (!r2 || (read_c0_cause() & (1 << 30))) {
- c0_timer_ack();
-#ifdef CONFIG_MIPS_MT_SMTC
- if (cpu_data[cpu].vpe_id)
- goto out;
- cpu = 0;
-#endif
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
@@ -107,65 +78,16 @@ out:
return IRQ_HANDLED;
}
-static struct irqaction c0_compare_irqaction = {
+#endif /* Not CONFIG_MIPS_MT_SMTC */
+
+struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
-#ifdef CONFIG_MIPS_MT_SMTC
- .flags = IRQF_DISABLED,
-#else
.flags = IRQF_DISABLED | IRQF_PERCPU,
-#endif
.name = "timer",
};
-#ifdef CONFIG_MIPS_MT_SMTC
-DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
-
-static void smtc_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-
-static void mips_broadcast(cpumask_t mask)
-{
- unsigned int cpu;
-
- for_each_cpu_mask(cpu, mask)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
-}
-
-static void setup_smtc_dummy_clockevent_device(void)
-{
- //uint64_t mips_freq = mips_hpt_^frequency;
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
-
- cd->name = "SMTC";
- cd->features = CLOCK_EVT_FEAT_DUMMY;
-
- /* Calculate the min / max delta */
- cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 0; //32;
- cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
-
- cd->rating = 200;
- cd->irq = 17; //-1;
-// if (cpu)
-// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
-// else
- cd->cpumask = cpumask_of_cpu(cpu);
-
- cd->set_mode = smtc_set_mode;
-
- cd->broadcast = mips_broadcast;
-
- clockevents_register_device(cd);
-}
-#endif
-
-static void mips_event_handler(struct clock_event_device *dev)
+void mips_event_handler(struct clock_event_device *dev)
{
}
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
return (read_c0_cause() >> cp0_compare_irq) & 0x100;
}
-static int c0_compare_int_usable(void)
+/*
+ * Compare interrupt can be routed and latched outside the core,
+ * so a single execution hazard barrier may not be enough to give
+ * it time to clear as seen in the Cause register. 4 time the
+ * pipeline depth seems reasonably conservative, and empirically
+ * works better in configurations with high CPU/bus clock ratios.
+ */
+
+#define compare_change_hazard() \
+ do { \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ } while (0)
+
+int c0_compare_int_usable(void)
{
unsigned int delta;
unsigned int cnt;
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
*/
if (c0_compare_int_pending()) {
write_c0_compare(read_c0_count());
- irq_disable_hazard();
+ compare_change_hazard();
if (c0_compare_int_pending())
return 0;
}
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
- irq_disable_hazard();
+ compare_change_hazard();
if ((int)(read_c0_count() - cnt) < 0)
break;
/* increase delta if the timer was already expired */
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
while ((int)(read_c0_count() - cnt) <= 0)
; /* Wait for expiry */
+ compare_change_hazard();
if (!c0_compare_int_pending())
return 0;
write_c0_compare(read_c0_count());
- irq_disable_hazard();
+ compare_change_hazard();
if (c0_compare_int_pending())
return 0;
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
return 1;
}
+#ifndef CONFIG_MIPS_MT_SMTC
+
int __cpuinit mips_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
-#ifdef CONFIG_MIPS_MT_SMTC
- setup_smtc_dummy_clockevent_device();
-
- /*
- * On SMTC we only register VPE0's compare interrupt as clockevent
- * device.
- */
- if (cpu)
- return 0;
-#endif
-
if (!c0_compare_int_usable())
return -ENXIO;
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
cd->rating = 300;
cd->irq = irq;
-#ifdef CONFIG_MIPS_MT_SMTC
- cd->cpumask = CPU_MASK_ALL;
-#else
cd->cpumask = cpumask_of_cpu(cpu);
-#endif
cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_mode;
+ cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
cp0_timer_irq_installed = 1;
-#ifdef CONFIG_MIPS_MT_SMTC
-#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
- setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
-#else
setup_irq(irq, &c0_compare_irqaction);
-#endif
return 0;
}
+
+#endif /* Not CONFIG_MIPS_MT_SMTC */
OpenPOWER on IntegriCloud