summaryrefslogtreecommitdiffstats
path: root/arch/alpha/kernel/time.c
blob: 5b6202a825ff8c48f56f387ac947c6503225f821 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
/*
 *  linux/arch/alpha/kernel/time.c
 *
 *  Copyright (C) 1991, 1992, 1995, 1999, 2000  Linus Torvalds
 *
 * This file contains the clocksource time handling.
 * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
 *		"A Kernel Model for Precision Timekeeping" by Dave Mills
 * 1997-01-09    Adrian Sun
 *      use interval timer if CONFIG_RTC=y
 * 1997-10-29    John Bowman (bowman@math.ualberta.ca)
 *      fixed tick loss calculation in timer_interrupt
 *      (round system clock to nearest tick instead of truncating)
 *      fixed algorithm in time_init for getting time from CMOS clock
 * 1999-04-16	Thorsten Kranzkowski (dl8bcu@gmx.net)
 *	fixed algorithm in do_gettimeofday() for calculating the precise time
 *	from processor cycle counter (now taking lost_ticks into account)
 * 2003-06-03	R. Scott Bailey <scott.bailey@eds.com>
 *	Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
 */
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/profile.h>
#include <linux/irq_work.h>

#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/hwrpb.h>

#include <linux/mc146818rtc.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>

#include "proto.h"
#include "irq_impl.h"

DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);

unsigned long est_cycle_freq;

#ifdef CONFIG_IRQ_WORK

DEFINE_PER_CPU(u8, irq_work_pending);

#define set_irq_work_pending_flag()  __this_cpu_write(irq_work_pending, 1)
#define test_irq_work_pending()      __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending()     __this_cpu_write(irq_work_pending, 0)

void arch_irq_work_raise(void)
{
	set_irq_work_pending_flag();
}

#else  /* CONFIG_IRQ_WORK */

#define test_irq_work_pending()      0
#define clear_irq_work_pending()

#endif /* CONFIG_IRQ_WORK */


static inline __u32 rpcc(void)
{
	return __builtin_alpha_rpcc();
}



/*
 * The RTC as a clock_event_device primitive.
 */

static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);

irqreturn_t
rtc_timer_interrupt(int irq, void *dev)
{
	int cpu = smp_processor_id();
	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);

	/* Don't run the hook for UNUSED or SHUTDOWN.  */
	if (likely(clockevent_state_periodic(ce)))
		ce->event_handler(ce);

	if (test_irq_work_pending()) {
		clear_irq_work_pending();
		irq_work_run();
	}

	return IRQ_HANDLED;
}

static int
rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
{
	/* This hook is for oneshot mode, which we don't support.  */
	return -EINVAL;
}

static void __init
init_rtc_clockevent(void)
{
	int cpu = smp_processor_id();
	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);

	*ce = (struct clock_event_device){
		.name = "rtc",
		.features = CLOCK_EVT_FEAT_PERIODIC,
		.rating = 100,
		.cpumask = cpumask_of(cpu),
		.set_next_event = rtc_ce_set_next_event,
	};

	clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
}


/*
 * The QEMU clock as a clocksource primitive.
 */

static cycle_t
qemu_cs_read(struct clocksource *cs)
{
	return qemu_get_vmtime();
}

static struct clocksource qemu_cs = {
	.name                   = "qemu",
	.rating                 = 400,
	.read                   = qemu_cs_read,
	.mask                   = CLOCKSOURCE_MASK(64),
	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS,
	.max_idle_ns		= LONG_MAX
};


/*
 * The QEMU alarm as a clock_event_device primitive.
 */

static int qemu_ce_shutdown(struct clock_event_device *ce)
{
	/* The mode member of CE is updated for us in generic code.
	   Just make sure that the event is disabled.  */
	qemu_set_alarm_abs(0);
	return 0;
}

static int
qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
{
	qemu_set_alarm_rel(evt);
	return 0;
}

static irqreturn_t
qemu_timer_interrupt(int irq, void *dev)
{
	int cpu = smp_processor_id();
	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);

	ce->event_handler(ce);
	return IRQ_HANDLED;
}

static void __init
init_qemu_clockevent(void)
{
	int cpu = smp_processor_id();
	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);

	*ce = (struct clock_event_device){
		.name = "qemu",
		.features = CLOCK_EVT_FEAT_ONESHOT,
		.rating = 400,
		.cpumask = cpumask_of(cpu),
		.set_state_shutdown = qemu_ce_shutdown,
		.set_state_oneshot = qemu_ce_shutdown,
		.tick_resume = qemu_ce_shutdown,
		.set_next_event = qemu_ce_set_next_event,
	};

	clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX);
}


void __init
common_init_rtc(void)
{
	unsigned char x, sel = 0;

	/* Reset periodic interrupt frequency.  */
#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
 	x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
	/* Test includes known working values on various platforms
	   where 0x26 is wrong; we refuse to change those. */
 	if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
		sel = RTC_REF_CLCK_32KHZ + 6;
	}
#elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32
	sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ);
#else
# error "Unknown HZ from arch/alpha/Kconfig"
#endif
	if (sel) {
		printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n",
		       CONFIG_HZ, sel);
		CMOS_WRITE(sel, RTC_FREQ_SELECT);
 	}

	/* Turn on periodic interrupts.  */
	x = CMOS_READ(RTC_CONTROL);
	if (!(x & RTC_PIE)) {
		printk("Turning on RTC interrupts.\n");
		x |= RTC_PIE;
		x &= ~(RTC_AIE | RTC_UIE);
		CMOS_WRITE(x, RTC_CONTROL);
	}
	(void) CMOS_READ(RTC_INTR_FLAGS);

	outb(0x36, 0x43);	/* pit counter 0: system timer */
	outb(0x00, 0x40);
	outb(0x00, 0x40);

	outb(0xb6, 0x43);	/* pit counter 2: speaker */
	outb(0x31, 0x42);
	outb(0x13, 0x42);

	init_rtc_irq();
}


#ifndef CONFIG_ALPHA_WTINT
/*
 * The RPCC as a clocksource primitive.
 *
 * While we have free-running timecounters running on all CPUs, and we make
 * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter
 * with the wall clock, that initialization isn't kept up-to-date across
 * different time counters in SMP mode.  Therefore we can only use this
 * method when there's only one CPU enabled.
 *
 * When using the WTINT PALcall, the RPCC may shift to a lower frequency,
 * or stop altogether, while waiting for the interrupt.  Therefore we cannot
 * use this method when WTINT is in use.
 */

static cycle_t read_rpcc(struct clocksource *cs)
{
	return rpcc();
}

static struct clocksource clocksource_rpcc = {
	.name                   = "rpcc",
	.rating                 = 300,
	.read                   = read_rpcc,
	.mask                   = CLOCKSOURCE_MASK(32),
	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS
};
#endif /* ALPHA_WTINT */


/* Validate a computed cycle counter result against the known bounds for
   the given processor core.  There's too much brokenness in the way of
   timing hardware for any one method to work everywhere.  :-(

   Return 0 if the result cannot be trusted, otherwise return the argument.  */

static unsigned long __init
validate_cc_value(unsigned long cc)
{
	static struct bounds {
		unsigned int min, max;
	} cpu_hz[] __initdata = {
		[EV3_CPU]    = {   50000000,  200000000 },	/* guess */
		[EV4_CPU]    = {  100000000,  300000000 },
		[LCA4_CPU]   = {  100000000,  300000000 },	/* guess */
		[EV45_CPU]   = {  200000000,  300000000 },
		[EV5_CPU]    = {  250000000,  433000000 },
		[EV56_CPU]   = {  333000000,  667000000 },
		[PCA56_CPU]  = {  400000000,  600000000 },	/* guess */
		[PCA57_CPU]  = {  500000000,  600000000 },	/* guess */
		[EV6_CPU]    = {  466000000,  600000000 },
		[EV67_CPU]   = {  600000000,  750000000 },
		[EV68AL_CPU] = {  750000000,  940000000 },
		[EV68CB_CPU] = { 1000000000, 1333333333 },
		/* None of the following are shipping as of 2001-11-01.  */
		[EV68CX_CPU] = { 1000000000, 1700000000 },	/* guess */
		[EV69_CPU]   = { 1000000000, 1700000000 },	/* guess */
		[EV7_CPU]    = {  800000000, 1400000000 },	/* guess */
		[EV79_CPU]   = { 1000000000, 2000000000 },	/* guess */
	};

	/* Allow for some drift in the crystal.  10MHz is more than enough.  */
	const unsigned int deviation = 10000000;

	struct percpu_struct *cpu;
	unsigned int index;

	cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
	index = cpu->type & 0xffffffff;

	/* If index out of bounds, no way to validate.  */
	if (index >= ARRAY_SIZE(cpu_hz))
		return cc;

	/* If index contains no data, no way to validate.  */
	if (cpu_hz[index].max == 0)
		return cc;

	if (cc < cpu_hz[index].min - deviation
	    || cc > cpu_hz[index].max + deviation)
		return 0;

	return cc;
}


/*
 * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from
 * arch/i386/time.c.
 */

#define CALIBRATE_LATCH	0xffff
#define TIMEOUT_COUNT	0x100000

static unsigned long __init
calibrate_cc_with_pit(void)
{
	int cc, count = 0;

	/* Set the Gate high, disable speaker */
	outb((inb(0x61) & ~0x02) | 0x01, 0x61);

	/*
	 * Now let's take care of CTC channel 2
	 *
	 * Set the Gate high, program CTC channel 2 for mode 0,
	 * (interrupt on terminal count mode), binary count,
	 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
	 */
	outb(0xb0, 0x43);		/* binary, mode 0, LSB/MSB, Ch 2 */
	outb(CALIBRATE_LATCH & 0xff, 0x42);	/* LSB of count */
	outb(CALIBRATE_LATCH >> 8, 0x42);	/* MSB of count */

	cc = rpcc();
	do {
		count++;
	} while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
	cc = rpcc() - cc;

	/* Error: ECTCNEVERSET or ECPUTOOFAST.  */
	if (count <= 1 || count == TIMEOUT_COUNT)
		return 0;

	return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
}

/* The Linux interpretation of the CMOS clock register contents:
   When the Update-In-Progress (UIP) flag goes from 1 to 0, the
   RTC registers show the second which has precisely just started.
   Let's hope other operating systems interpret the RTC the same way.  */

static unsigned long __init
rpcc_after_update_in_progress(void)
{
	do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
	do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);

	return rpcc();
}

void __init
time_init(void)
{
	unsigned int cc1, cc2;
	unsigned long cycle_freq, tolerance;
	long diff;

	if (alpha_using_qemu) {
		clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
		init_qemu_clockevent();

		timer_irqaction.handler = qemu_timer_interrupt;
		init_rtc_irq();
		return;
	}

	/* Calibrate CPU clock -- attempt #1.  */
	if (!est_cycle_freq)
		est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());

	cc1 = rpcc();

	/* Calibrate CPU clock -- attempt #2.  */
	if (!est_cycle_freq) {
		cc1 = rpcc_after_update_in_progress();
		cc2 = rpcc_after_update_in_progress();
		est_cycle_freq = validate_cc_value(cc2 - cc1);
		cc1 = cc2;
	}

	cycle_freq = hwrpb->cycle_freq;
	if (est_cycle_freq) {
		/* If the given value is within 250 PPM of what we calculated,
		   accept it.  Otherwise, use what we found.  */
		tolerance = cycle_freq / 4000;
		diff = cycle_freq - est_cycle_freq;
		if (diff < 0)
			diff = -diff;
		if ((unsigned long)diff > tolerance) {
			cycle_freq = est_cycle_freq;
			printk("HWRPB cycle frequency bogus.  "
			       "Estimated %lu Hz\n", cycle_freq);
		} else {
			est_cycle_freq = 0;
		}
	} else if (! validate_cc_value (cycle_freq)) {
		printk("HWRPB cycle frequency bogus, "
		       "and unable to estimate a proper value!\n");
	}

	/* See above for restrictions on using clocksource_rpcc.  */
#ifndef CONFIG_ALPHA_WTINT
	if (hwrpb->nr_processors == 1)
		clocksource_register_hz(&clocksource_rpcc, cycle_freq);
#endif

	/* Startup the timer source. */
	alpha_mv.init_rtc();
	init_rtc_clockevent();
}

/* Initialize the clock_event_device for secondary cpus.  */
#ifdef CONFIG_SMP
void __init
init_clockevent(void)
{
	if (alpha_using_qemu)
		init_qemu_clockevent();
	else
		init_rtc_clockevent();
}
#endif
OpenPOWER on IntegriCloud