summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/time.c')
-rw-r--r--arch/mips/kernel/time.c416
1 files changed, 222 insertions, 194 deletions
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 9a5596b..5892491 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,6 +11,7 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/clockchips.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -24,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/kallsyms.h>
#include <asm/bootinfo.h>
#include <asm/cache.h>
@@ -32,8 +34,11 @@
#include <asm/cpu-features.h>
#include <asm/div64.h>
#include <asm/sections.h>
+#include <asm/smtc_ipi.h>
#include <asm/time.h>
+#include <irq.h>
+
/*
* The integer part of the number of usecs per jiffy is taken from tick,
* but the fractional part is not recorded, so we calculate it using the
@@ -49,32 +54,27 @@
* forward reference
*/
DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
-/*
- * By default we provide the null RTC ops
- */
-static unsigned long null_rtc_get_time(void)
+int __weak rtc_mips_set_time(unsigned long sec)
{
- return mktime(2000, 1, 1, 0, 0, 0);
+ return 0;
}
+EXPORT_SYMBOL(rtc_mips_set_time);
-static int null_rtc_set_time(unsigned long sec)
+int __weak rtc_mips_set_mmss(unsigned long nowtime)
{
- return 0;
+ return rtc_mips_set_time(nowtime);
}
-unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time;
-int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;
-int (*rtc_mips_set_mmss)(unsigned long);
-
+int update_persistent_clock(struct timespec now)
+{
+ return rtc_mips_set_mmss(now.tv_sec);
+}
/* how many counter cycles in a jiffy */
static unsigned long cycles_per_jiffy __read_mostly;
-/* expirelo is the count value for next CPU timer interrupt */
-static unsigned int expirelo;
-
-
/*
* Null timer ack for systems not needing one (e.g. i8254).
*/
@@ -93,18 +93,7 @@ static cycle_t null_hpt_read(void)
*/
static void c0_timer_ack(void)
{
- unsigned int count;
-
- /* Ack this timer interrupt and set the next one. */
- expirelo += cycles_per_jiffy;
- write_c0_compare(expirelo);
-
- /* Check to see if we have missed any timer interrupts. */
- while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
- /* missed_timer_count++; */
- expirelo = count + cycles_per_jiffy;
- write_c0_compare(expirelo);
- }
+ write_c0_compare(read_c0_compare());
}
/*
@@ -115,19 +104,9 @@ static cycle_t c0_hpt_read(void)
return read_c0_count();
}
-/* For use both as a high precision timer and an interrupt source. */
-static void __init c0_hpt_timer_init(void)
-{
- expirelo = read_c0_count() + cycles_per_jiffy;
- write_c0_compare(expirelo);
-}
-
int (*mips_timer_state)(void);
void (*mips_timer_ack)(void);
-/* last time when xtime and rtc are sync'ed up */
-static long last_rtc_update;
-
/*
* local_timer_interrupt() does profiling and process accounting
* on a per-CPU basis.
@@ -144,60 +123,15 @@ void local_timer_interrupt(int irq, void *dev_id)
update_process_times(user_mode(get_irq_regs()));
}
-/*
- * High-level timer interrupt service routines. This function
- * is set as irqaction->handler and is invoked through do_IRQ.
- */
-irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- write_seqlock(&xtime_lock);
-
- mips_timer_ack();
-
- /*
- * call the generic timer interrupt handling
- */
- do_timer(1);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
- last_rtc_update = xtime.tv_sec;
- } else {
- /* do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- }
-
- write_sequnlock(&xtime_lock);
-
- /*
- * In UP mode, we call local_timer_interrupt() to do profiling
- * and process accouting.
- *
- * In SMP mode, local_timer_interrupt() is invoked by appropriate
- * low-level local timer interrupt handler.
- */
- local_timer_interrupt(irq, dev_id);
-
- return IRQ_HANDLED;
-}
-
int null_perf_irq(void)
{
return 0;
}
+EXPORT_SYMBOL(null_perf_irq);
+
int (*perf_irq)(void) = null_perf_irq;
-EXPORT_SYMBOL(null_perf_irq);
EXPORT_SYMBOL(perf_irq);
/*
@@ -215,7 +149,7 @@ EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
-static inline int handle_perf_irq (int r2)
+static inline int handle_perf_irq(int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
@@ -229,63 +163,23 @@ static inline int handle_perf_irq (int r2)
!r2;
}
-asmlinkage void ll_timer_interrupt(int irq)
-{
- int r2 = cpu_has_mips_r2;
-
- irq_enter();
- kstat_this_cpu.irqs[irq]++;
-
- if (handle_perf_irq(r2))
- goto out;
-
- if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
- goto out;
-
- timer_interrupt(irq, NULL);
-
-out:
- irq_exit();
-}
-
-asmlinkage void ll_local_timer_interrupt(int irq)
-{
- irq_enter();
- if (smp_processor_id() != 0)
- kstat_this_cpu.irqs[irq]++;
-
- /* we keep interrupt disabled all the time */
- local_timer_interrupt(irq, NULL);
-
- irq_exit();
-}
-
/*
* time_init() - it does the following things.
*
- * 1) board_time_init() -
+ * 1) plat_time_init() -
* a) (optional) set up RTC routines,
* b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use cpu counter as timer interrupt
* source)
- * 2) setup xtime based on rtc_mips_get_time().
- * 3) calculate a couple of cached variables for later usage
- * 4) plat_timer_setup() -
+ * 2) calculate a couple of cached variables for later usage
+ * 3) plat_timer_setup() -
* a) (optional) over-write any choices made above by time_init().
* b) machine specific code should setup the timer irqaction.
* c) enable the timer interrupt
*/
-void (*board_time_init)(void);
-
unsigned int mips_hpt_frequency;
-static struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU,
- .name = "timer",
-};
-
static unsigned int __init calibrate_hpt(void)
{
cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
@@ -334,6 +228,84 @@ struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ {
+ unsigned long flags, vpflags;
+ local_irq_save(flags);
+ vpflags = dvpe();
+#endif
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
+#ifdef CONFIG_MIPS_MT_SMTC
+ evpe(vpflags);
+ local_irq_restore(flags);
+ }
+#endif
+ return res;
+}
+
+static void mips_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+static int cp0_timer_irq_installed;
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ const int r2 = cpu_has_mips_r2;
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run
+ * the performance counter interrupt handler anyway.
+ */
+ if (handle_perf_irq(r2))
+ goto out;
+
+ /*
+ * The same applies to performance counter interrupts. But with the
+ * above we now know that the reason we got here must be a timer
+ * interrupt. Being the paranoiacs we are we check anyway.
+ */
+ if (!r2 || (read_c0_cause() & (1 << 30))) {
+ c0_timer_ack();
+#ifdef CONFIG_MIPS_MT_SMTC
+ if (cpu_data[cpu].vpe_id)
+ goto out;
+ cpu = 0;
+#endif
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+#ifdef CONFIG_MIPS_MT_SMTC
+ .flags = IRQF_DISABLED,
+#else
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+#endif
+ .name = "timer",
+};
+
static void __init init_mips_clocksource(void)
{
u64 temp;
@@ -357,19 +329,127 @@ static void __init init_mips_clocksource(void)
clocksource_register(&clocksource_mips);
}
-void __init time_init(void)
+void __init __weak plat_time_init(void)
+{
+}
+
+void __init __weak plat_timer_setup(struct irqaction *irq)
+{
+}
+
+#ifdef CONFIG_MIPS_MT_SMTC
+DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
+static void smtc_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+}
+
+int dummycnt[NR_CPUS];
+
+static void mips_broadcast(cpumask_t mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+}
+
+static void setup_smtc_dummy_clockevent_device(void)
+{
+ //uint64_t mips_freq = mips_hpt_^frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+
+ cd->name = "SMTC";
+ cd->features = CLOCK_EVT_FEAT_DUMMY;
+
+ /* Calculate the min / max delta */
+ cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 0; //32;
+ cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
+
+ cd->rating = 200;
+ cd->irq = 17; //-1;
+// if (cpu)
+// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
+// else
+ cd->cpumask = cpumask_of_cpu(cpu);
+
+ cd->set_mode = smtc_set_mode;
+
+ cd->broadcast = mips_broadcast;
+
+ clockevents_register_device(cd);
+}
+#endif
+
+static void mips_event_handler(struct clock_event_device *dev)
{
- if (board_time_init)
- board_time_init();
+}
- if (!rtc_mips_set_mmss)
- rtc_mips_set_mmss = rtc_mips_set_time;
+void __cpuinit mips_clockevent_init(void)
+{
+ uint64_t mips_freq = mips_hpt_frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
- xtime.tv_sec = rtc_mips_get_time();
- xtime.tv_nsec = 0;
+ if (!cpu_has_counter)
+ return;
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+#ifdef CONFIG_MIPS_MT_SMTC
+ setup_smtc_dummy_clockevent_device();
+
+ /*
+ * On SMTC we only register VPE0's compare interrupt as clockevent
+ * device.
+ */
+ if (cpu)
+ return;
+#endif
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 32;
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x30, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+#ifdef CONFIG_MIPS_MT_SMTC
+ cd->cpumask = CPU_MASK_ALL;
+#else
+ cd->cpumask = cpumask_of_cpu(cpu);
+#endif
+ cd->set_next_event = mips_next_event;
+ cd->set_mode = mips_set_mode;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_register_device(cd);
+
+ if (!cp0_timer_irq_installed) {
+#ifdef CONFIG_MIPS_MT_SMTC
+#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
+ setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT);
+#else
+ setup_irq(irq, &timer_irqaction);
+#endif /* CONFIG_MIPS_MT_SMTC */
+ cp0_timer_irq_installed = 1;
+ }
+}
+
+void __init time_init(void)
+{
+ plat_time_init();
/* Choose appropriate high precision timer routines. */
if (!cpu_has_counter && !clocksource_mips.read)
@@ -392,11 +472,6 @@ void __init time_init(void)
/* Calculate cache parameters. */
cycles_per_jiffy =
(mips_hpt_frequency + HZ / 2) / HZ;
- /*
- * This sets up the high precision
- * timer for the first interrupt.
- */
- c0_hpt_timer_init();
}
}
if (!mips_hpt_frequency)
@@ -406,6 +481,10 @@ void __init time_init(void)
printk("Using %u.%03u MHz high precision timer.\n",
((mips_hpt_frequency + 500) / 1000) / 1000,
((mips_hpt_frequency + 500) / 1000) % 1000);
+
+#ifdef CONFIG_IRQ_CPU
+ setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction);
+#endif
}
if (!mips_timer_ack)
@@ -426,56 +505,5 @@ void __init time_init(void)
plat_timer_setup(&timer_irqaction);
init_mips_clocksource();
+ mips_clockevent_init();
}
-
-#define FEBRUARY 2
-#define STARTOFTIME 1970
-#define SECDAY 86400L
-#define SECYR (SECDAY * 365)
-#define leapyear(y) ((!((y) % 4) && ((y) % 100)) || !((y) % 400))
-#define days_in_year(y) (leapyear(y) ? 366 : 365)
-#define days_in_month(m) (month_days[(m) - 1])
-
-static int month_days[12] = {
- 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
-};
-
-void to_tm(unsigned long tim, struct rtc_time *tm)
-{
- long hms, day, gday;
- int i;
-
- gday = day = tim / SECDAY;
- hms = tim % SECDAY;
-
- /* Hours, minutes, seconds are easy */
- tm->tm_hour = hms / 3600;
- tm->tm_min = (hms % 3600) / 60;
- tm->tm_sec = (hms % 3600) % 60;
-
- /* Number of years in days */
- for (i = STARTOFTIME; day >= days_in_year(i); i++)
- day -= days_in_year(i);
- tm->tm_year = i;
-
- /* Number of months in days left */
- if (leapyear(tm->tm_year))
- days_in_month(FEBRUARY) = 29;
- for (i = 1; day >= days_in_month(i); i++)
- day -= days_in_month(i);
- days_in_month(FEBRUARY) = 28;
- tm->tm_mon = i - 1; /* tm_mon starts from 0 to 11 */
-
- /* Days are what is left over (+1) from all that. */
- tm->tm_mday = day + 1;
-
- /*
- * Determine the day of week
- */
- tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */
-}
-
-EXPORT_SYMBOL(rtc_lock);
-EXPORT_SYMBOL(to_tm);
-EXPORT_SYMBOL(rtc_mips_set_time);
-EXPORT_SYMBOL(rtc_mips_get_time);
OpenPOWER on IntegriCloud