summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/sparc64/include/clock.h8
-rw-r--r--sys/sparc64/include/cpufunc.h9
-rw-r--r--sys/sparc64/include/pcpu.h1
-rw-r--r--sys/sparc64/include/smp.h8
-rw-r--r--sys/sparc64/include/tick.h2
-rw-r--r--sys/sparc64/include/ver.h4
-rw-r--r--sys/sparc64/sparc64/clock.c51
-rw-r--r--sys/sparc64/sparc64/genassym.c5
-rw-r--r--sys/sparc64/sparc64/locore.S1
-rw-r--r--sys/sparc64/sparc64/machdep.c83
-rw-r--r--sys/sparc64/sparc64/mp_locore.S34
-rw-r--r--sys/sparc64/sparc64/mp_machdep.c14
-rw-r--r--sys/sparc64/sparc64/tick.c236
13 files changed, 329 insertions, 127 deletions
diff --git a/sys/sparc64/include/clock.h b/sys/sparc64/include/clock.h
index b0e4c0b..fd57731 100644
--- a/sys/sparc64/include/clock.h
+++ b/sys/sparc64/include/clock.h
@@ -29,8 +29,10 @@
#ifndef _MACHINE_CLOCK_H_
#define _MACHINE_CLOCK_H_
-extern u_long tick_increment;
-extern u_long tick_freq;
-extern u_long tick_MHz;
+extern void (*delay_func)(int usec);
+extern u_long clock_boot;
+
+void delay_boot(int usec);
+void delay_tick(int usec);
#endif /* !_MACHINE_CLOCK_H_ */
diff --git a/sys/sparc64/include/cpufunc.h b/sys/sparc64/include/cpufunc.h
index fca5984..60533f8 100644
--- a/sys/sparc64/include/cpufunc.h
+++ b/sys/sparc64/include/cpufunc.h
@@ -174,6 +174,15 @@ int fasword32(u_long asi, void *addr, uint32_t *val);
} while (0)
/*
+ * Trick GAS/GCC into compiling access to STICK/STICK_COMPARE independently
+ * of the selected instruction set.
+ */
+#define rdstick() rd(asr24)
+#define rdstickcmpr() rd(asr25)
+#define wrstick(val, xor) wr(asr24, (val), (xor))
+#define wrstickcmpr(val, xor) wr(asr25, (val), (xor))
+
+/*
* Macro intended to be used instead of wr(asr23, val, xor) for writing to
* the TICK_COMPARE register in order to avoid a bug in BlackBird CPUs that
* can cause these writes to fail under certain condidtions which in turn
diff --git a/sys/sparc64/include/pcpu.h b/sys/sparc64/include/pcpu.h
index 2ccdbd9..efb5174 100644
--- a/sys/sparc64/include/pcpu.h
+++ b/sys/sparc64/include/pcpu.h
@@ -53,6 +53,7 @@ struct pmap;
vm_offset_t pc_addr; \
u_long pc_tickref; \
u_long pc_tickadj; \
+ u_int pc_clock; \
u_int pc_mid; \
u_int pc_node; \
u_int pc_tlb_ctx; \
diff --git a/sys/sparc64/include/smp.h b/sys/sparc64/include/smp.h
index 3e1d9ac..23076ef 100644
--- a/sys/sparc64/include/smp.h
+++ b/sys/sparc64/include/smp.h
@@ -29,9 +29,10 @@
#ifndef _MACHINE_SMP_H_
#define _MACHINE_SMP_H_
-#define CPU_CLKSYNC 1
-#define CPU_INIT 2
-#define CPU_BOOTSTRAP 3
+#define CPU_TICKSYNC 1
+#define CPU_STICKSYNC 2
+#define CPU_INIT 3
+#define CPU_BOOTSTRAP 4
#ifndef LOCORE
@@ -62,6 +63,7 @@ struct cpu_start_args {
u_int csa_state;
vm_offset_t csa_pcpu;
u_long csa_tick;
+ u_long csa_stick;
u_long csa_ver;
struct tte csa_ttes[PCPU_PAGES];
};
diff --git a/sys/sparc64/include/tick.h b/sys/sparc64/include/tick.h
index 8586a0d..456e9f0 100644
--- a/sys/sparc64/include/tick.h
+++ b/sys/sparc64/include/tick.h
@@ -29,7 +29,7 @@
#ifndef _MACHINE_TICK_H_
#define _MACHINE_TICK_H_
-void tick_init(u_long clock);
+void tick_clear(void);
void tick_start(void);
void tick_stop(void);
diff --git a/sys/sparc64/include/ver.h b/sys/sparc64/include/ver.h
index 78e57d8..0fb7933 100644
--- a/sys/sparc64/include/ver.h
+++ b/sys/sparc64/include/ver.h
@@ -41,6 +41,8 @@
#define VER_MAXTL_SIZE (8)
#define VER_MAXWIN_SIZE (5)
+#ifndef LOCORE
+
#define VER_MANUF_MASK (((1L<<VER_MANUF_SIZE)-1)<<VER_MANUF_SHIFT)
#define VER_IMPL_MASK (((1L<<VER_IMPL_SIZE)-1)<<VER_IMPL_SHIFT)
#define VER_MASK_MASK (((1L<<VER_MASK_SIZE)-1)<<VER_MASK_SHIFT)
@@ -61,6 +63,8 @@
extern int cpu_impl;
extern char sparc64_model[];
+#endif /* !LOCORE */
+
/* Known implementations. */
#define CPU_IMPL_SPARC64 0x01
#define CPU_IMPL_ULTRASPARCI 0x10
diff --git a/sys/sparc64/sparc64/clock.c b/sys/sparc64/sparc64/clock.c
index 146c2a2..abf3274 100644
--- a/sys/sparc64/sparc64/clock.c
+++ b/sys/sparc64/sparc64/clock.c
@@ -29,23 +29,56 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+
#include <machine/clock.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
-u_long tick_increment;
-u_long tick_freq;
-u_long tick_MHz;
+void (*delay_func)(int usec);
+u_long clock_boot;
void
-DELAY(int n)
+DELAY(int usec)
{
- u_long start, end;
- start = rd(tick);
- if (n < 0)
+ (*delay_func)(usec);
+}
+
+void
+delay_boot(int usec)
+{
+ u_long end;
+
+ if (usec < 0)
return;
- end = start + (u_long)n * tick_MHz;
+
+ end = rd(tick) + (u_long)usec * clock_boot / 1000000;
while (rd(tick) < end)
- ;
+ cpu_spinwait();
+}
+
+void
+delay_tick(int usec)
+{
+ u_long end;
+
+ if (usec < 0)
+ return;
+
+ /*
+ * We avoid being migrated to another CPU with a possibly
+ * unsynchronized TICK timer while spinning.
+ */
+ sched_pin();
+
+ end = rd(tick) + (u_long)usec * PCPU_GET(clock) / 1000000;
+ while (rd(tick) < end)
+ cpu_spinwait();
+
+ sched_unpin();
}
void
diff --git a/sys/sparc64/sparc64/genassym.c b/sys/sparc64/sparc64/genassym.c
index d31c84d..eb133e2 100644
--- a/sys/sparc64/sparc64/genassym.c
+++ b/sys/sparc64/sparc64/genassym.c
@@ -83,10 +83,11 @@ ASSYM(PAGE_SIZE_4M, PAGE_SIZE_4M);
ASSYM(CSA_PCPU, offsetof(struct cpu_start_args, csa_pcpu));
ASSYM(CSA_STATE, offsetof(struct cpu_start_args, csa_state));
#ifdef SUN4U
-ASSYM(CSA_TICK, offsetof(struct cpu_start_args, csa_tick));
-ASSYM(CSA_VER, offsetof(struct cpu_start_args, csa_ver));
ASSYM(CSA_MID, offsetof(struct cpu_start_args, csa_mid));
+ASSYM(CSA_STICK, offsetof(struct cpu_start_args, csa_stick));
+ASSYM(CSA_TICK, offsetof(struct cpu_start_args, csa_tick));
ASSYM(CSA_TTES, offsetof(struct cpu_start_args, csa_ttes));
+ASSYM(CSA_VER, offsetof(struct cpu_start_args, csa_ver));
#endif
#ifdef SUN4V
ASSYM(CSA_CPUID, offsetof(struct cpu_start_args, csa_cpuid));
diff --git a/sys/sparc64/sparc64/locore.S b/sys/sparc64/sparc64/locore.S
index ace549e..8c1043e 100644
--- a/sys/sparc64/sparc64/locore.S
+++ b/sys/sparc64/sparc64/locore.S
@@ -54,7 +54,6 @@ ENTRY(_start)
wrpr %g0, 0, %cleanwin
wrpr %g0, 0, %pil
wr %g0, 0, %fprs
- wrpr %g0, 0, %tick
/*
* Get onto our per-CPU panic stack, which precedes the struct pcpu in
diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c
index f5d63cd..f0c5a16 100644
--- a/sys/sparc64/sparc64/machdep.c
+++ b/sys/sparc64/sparc64/machdep.c
@@ -133,18 +133,6 @@ struct kva_md_info kmi;
u_long ofw_vec;
u_long ofw_tba;
-/*
- * Note: timer quality for CPU's is set low to try and prevent them from
- * being chosen as the primary timecounter. The CPU counters are not
- * synchronized among the CPU's so in MP machines this causes problems
- * when calculating the time. With this value the CPU's should only be
- * chosen as the primary timecounter as a last resort.
- */
-
-#define UP_TICK_QUALITY 1000
-#define MP_TICK_QUALITY -100
-static struct timecounter tick_tc;
-
char sparc64_model[32];
static int cpu_use_vis = 1;
@@ -152,7 +140,6 @@ static int cpu_use_vis = 1;
cpu_block_copy_t *cpu_block_copy;
cpu_block_zero_t *cpu_block_zero;
-static timecounter_get_t tick_get_timecount;
void sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3,
ofw_vec_t *vec);
void sparc64_shutdown_final(void *dummy, int howto);
@@ -180,22 +167,6 @@ cpu_startup(void *arg)
vm_paddr_t physsz;
int i;
- tick_tc.tc_get_timecount = tick_get_timecount;
- tick_tc.tc_poll_pps = NULL;
- tick_tc.tc_counter_mask = ~0u;
- tick_tc.tc_frequency = tick_freq;
- tick_tc.tc_name = "tick";
- tick_tc.tc_quality = UP_TICK_QUALITY;
-#ifdef SMP
- /*
- * We do not know if each CPU's tick counter is synchronized.
- */
- if (cpu_mp_probe())
- tick_tc.tc_quality = MP_TICK_QUALITY;
-#endif
-
- tc_init(&tick_tc);
-
physsz = 0;
for (i = 0; i < sparc64_nmemreg; i++)
physsz += sparc64_memreg[i].mr_size;
@@ -217,7 +188,7 @@ cpu_startup(void *arg)
if (bootverbose)
printf("machine: %s\n", sparc64_model);
- cpu_identify(rdpr(ver), tick_freq, PCPU_GET(cpuid));
+ cpu_identify(rdpr(ver), PCPU_GET(clock), curcpu);
}
void
@@ -262,12 +233,6 @@ spinlock_exit(void)
wrpr(pil, td->td_md.md_saved_pil, 0);
}
-unsigned
-tick_get_timecount(struct timecounter *tc)
-{
- return ((unsigned)rd(tick));
-}
-
void
sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
{
@@ -278,7 +243,6 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
caddr_t kmdp;
phandle_t child;
phandle_t root;
- u_int clock;
uint32_t portid;
end = 0;
@@ -291,6 +255,21 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
cpu_impl = VER_IMPL(rdpr(ver));
/*
+ * Clear (S)TICK timer (including NPT).
+ */
+ tick_clear();
+
+ /*
+ * UltraSparc II[e,i] based systems come up with the tick interrupt
+ * enabled and a handler that resets the tick counter, causing DELAY()
+ * to not work properly when used early in boot.
+ * UltraSPARC III based systems come up with the system tick interrupt
+ * enabled, causing an interrupt storm on startup since they are not
+ * handled.
+ */
+ tick_stop();
+
+ /*
* Initialize Open Firmware (needed for console).
*/
OF_init(vec);
@@ -329,7 +308,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
pc->pc_tlb_ctx_max = TLB_CTX_USER_MAX;
/*
- * Determine the OFW node (and ensure the
+ * Determine the OFW node and frequency of the BSP (and ensure the
* BSP is in the device tree in the first place).
*/
pc->pc_node = 0;
@@ -349,17 +328,23 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
}
if (pc->pc_node == 0)
OF_exit();
+ if (OF_getprop(child, "clock-frequency", &pc->pc_clock,
+ sizeof(pc->pc_clock)) <= 0)
+ OF_exit();
/*
- * Initialize the tick counter. Must be before the console is inited
- * in order to provide the low-level console drivers with a working
- * DELAY().
+ * Provide a DELAY() that works before PCPU_REG is set. We can't
+ * set PCPU_REG without also taking over the trap table or the
+ * firmware will overwrite it. Unfortunately, it's way to early
+ * to also take over the trap table at this point.
*/
- OF_getprop(child, "clock-frequency", &clock, sizeof(clock));
- tick_init(clock);
+ clock_boot = pc->pc_clock;
+ delay_func = delay_boot;
/*
* Initialize the console before printing anything.
+ * NB: the low-level console drivers require a working DELAY() at
+ * this point.
*/
cninit();
@@ -445,6 +430,11 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
cpu_setregs(pc);
/*
+ * It's now safe to use the real DELAY().
+ */
+ delay_func = delay_tick;
+
+ /*
* Initialize the message buffer (after setting trap table).
*/
msgbufinit(msgbufp, MSGBUF_SIZE);
@@ -719,8 +709,13 @@ cpu_shutdown(void *args)
int
cpu_est_clockrate(int cpu_id, uint64_t *rate)
{
+ struct pcpu *pc;
- return (ENXIO);
+ pc = pcpu_find(cpu_id);
+ if (pc == NULL || rate == NULL)
+ return (EINVAL);
+ *rate = pc->pc_clock;
+ return (0);
}
/*
diff --git a/sys/sparc64/sparc64/mp_locore.S b/sys/sparc64/sparc64/mp_locore.S
index 7a81637..c17c68e 100644
--- a/sys/sparc64/sparc64/mp_locore.S
+++ b/sys/sparc64/sparc64/mp_locore.S
@@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$");
#include <machine/pstate.h>
#include <machine/smp.h>
#include <machine/upa.h>
+#include <machine/ver.h>
#include "assym.s"
@@ -92,7 +93,7 @@ ENTRY(mp_startup)
SET(cpu_start_args, %l1, %l0)
- mov CPU_CLKSYNC, %l1
+ mov CPU_TICKSYNC, %l1
membar #StoreLoad
stw %l1, [%l0 + CSA_STATE]
@@ -101,7 +102,25 @@ ENTRY(mp_startup)
nop
wrpr %l1, 0, %tick
- UPA_GET_MID(%o0)
+ rdpr %ver, %l1
+ stx %l1, [%l0 + CSA_VER]
+
+ srlx %l1, VER_IMPL_SHIFT, %l1
+ sll %l1, VER_IMPL_SIZE, %l1
+ srl %l1, VER_IMPL_SIZE, %l1
+ cmp %l1, CPU_IMPL_ULTRASPARCIII
+ bl %icc, 3f
+ nop
+ mov CPU_STICKSYNC, %l1
+ membar #StoreLoad
+ stw %l1, [%l0 + CSA_STATE]
+
+2: ldx [%l0 + CSA_STICK], %l1
+ brz %l1, 2b
+ nop
+ wr %l1, 0, %asr24
+
+3: UPA_GET_MID(%o0)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "mp_start: CPU %d entered kernel"
@@ -110,9 +129,6 @@ ENTRY(mp_startup)
9:
#endif
- rdpr %ver, %l1
- stx %l1, [%l0 + CSA_VER]
-
/*
* Inform the boot processor we have inited.
*/
@@ -123,9 +139,9 @@ ENTRY(mp_startup)
/*
* Wait till its our turn to bootstrap.
*/
-2: lduw [%l0 + CSA_MID], %l1
+4: lduw [%l0 + CSA_MID], %l1
cmp %l1, %o0
- bne %xcc, 2b
+ bne %xcc, 4b
nop
#if KTR_COMPILE & KTR_SMP
@@ -141,7 +157,7 @@ ENTRY(mp_startup)
/*
* Map the per-CPU pages.
*/
-3: sllx %l2, TTE_SHIFT, %l3
+5: sllx %l2, TTE_SHIFT, %l3
add %l1, %l3, %l3
ldx [%l3 + TTE_VPN], %l4
@@ -156,7 +172,7 @@ ENTRY(mp_startup)
add %l2, 1, %l2
cmp %l2, PCPU_PAGES
- bne %xcc, 3b
+ bne %xcc, 5b
nop
/*
diff --git a/sys/sparc64/sparc64/mp_machdep.c b/sys/sparc64/sparc64/mp_machdep.c
index 214143a..e40b5ea 100644
--- a/sys/sparc64/sparc64/mp_machdep.c
+++ b/sys/sparc64/sparc64/mp_machdep.c
@@ -81,6 +81,7 @@ __FBSDID("$FreeBSD$");
#include <machine/asi.h>
#include <machine/atomic.h>
#include <machine/bus.h>
+#include <machine/cpu.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/ofw_machdep.h>
@@ -101,7 +102,7 @@ static ih_func_t cpu_ipi_stop;
* since the other processors will use it before the boot CPU enters the
* kernel.
*/
-struct cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0 };
+struct cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0, 0 };
struct ipi_cache_args ipi_cache_args;
struct ipi_tlb_args ipi_tlb_args;
struct pcb stoppcbs[MAXCPU];
@@ -281,13 +282,19 @@ cpu_mp_start(void)
csa->csa_state = 0;
sun4u_startcpu(child, (void *)mp_tramp, 0);
s = intr_disable();
- while (csa->csa_state != CPU_CLKSYNC)
+ while (csa->csa_state != CPU_TICKSYNC)
;
membar(StoreLoad);
csa->csa_tick = rd(tick);
+ if (cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
+ while (csa->csa_state != CPU_STICKSYNC)
+ ;
+ membar(StoreLoad);
+ csa->csa_stick = rdstick();
+ }
while (csa->csa_state != CPU_INIT)
;
- csa->csa_tick = 0;
+ csa->csa_tick = csa->csa_stick = 0;
intr_restore(s);
cpuid = mp_ncpus++;
@@ -298,6 +305,7 @@ cpu_mp_start(void)
pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
pcpu_init(pc, cpuid, sizeof(*pc));
pc->pc_addr = va;
+ pc->pc_clock = clock;
pc->pc_mid = mid;
pc->pc_node = child;
diff --git a/sys/sparc64/sparc64/tick.c b/sys/sparc64/sparc64/tick.c
index 2cc722c..612ff34 100644
--- a/sys/sparc64/sparc64/tick.c
+++ b/sys/sparc64/sparc64/tick.c
@@ -1,5 +1,6 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
+ * Copyright (c) 2005, 2008 Marius Strobl <marius@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,22 +29,30 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
-#include <sys/kernel.h>
#include <sys/systm.h>
-#include <sys/bus.h>
-#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/timetc.h>
-#include <machine/clock.h>
+#include <dev/ofw/openfirm.h>
+
#include <machine/cpu.h>
#include <machine/frame.h>
#include <machine/intr_machdep.h>
#include <machine/tick.h>
#include <machine/ver.h>
-#define TICK_GRACE 10000
+/* 10000 ticks proved okay for 500MHz. */
+#define TICK_GRACE(clock) ((clock) / 1000000 * 2 * 10)
+
+#define TICK_QUALITY_MP 10
+#define TICK_QUALITY_UP 1000
SYSCTL_NODE(_machdep, OID_AUTO, tick, CTLFLAG_RD, 0, "tick statistics");
@@ -63,7 +72,20 @@ static int adjust_ticks = 0;
SYSCTL_INT(_machdep_tick, OID_AUTO, adjust_ticks, CTLFLAG_RD, &adjust_ticks,
0, "total number of tick interrupts with adjustment");
+static struct timecounter tick_tc;
+static u_long tick_increment;
+static u_int hardclock_use_stick;
+
+static uint64_t tick_cputicks(void);
+static timecounter_get_t tick_get_timecount_up;
+#ifdef SMP
+static timecounter_get_t tick_get_timecount_mp;
+#endif
static void tick_hardclock(struct trapframe *tf);
+static inline void tick_hardclock_common(struct trapframe *tf, u_long tick,
+ u_long adj);
+static inline void tick_process(struct trapframe *tf);
+static void stick_hardclock(struct trapframe *tf);
static uint64_t
tick_cputicks(void)
@@ -75,9 +97,85 @@ tick_cputicks(void)
void
cpu_initclocks(void)
{
+ uint32_t clock;
stathz = hz;
+
+ /*
+ * On USIII and later we use the STICK timer instead of the TICK
+ * one if possible in order to ensure hardclock is driven by same
+ * frequency on all CPUs (besides, we no longer need to apply the
+ * workaround for the BlackBird erratum #1 there). Similarly, we
+ * don't provide a CPU ticker in that case as long as we can't
+ * specify the ticker frequency per CPU.
+ * XXX we don't use the STICK timer with all CPUs beyond USIII
+ * unconditionally, yet, due to unsolved problems with USIIIi APs
+ * causing a hang when using it.
+ */
+ switch (cpu_impl) {
+ case CPU_IMPL_ULTRASPARCIII: /* mandatory */
+ case CPU_IMPL_ULTRASPARCIIIp:
+ hardclock_use_stick = 1;
+ break;
+ case CPU_IMPL_ULTRASPARCIIIi:
+#ifdef SMP
+ if (cpu_mp_probe() == 0) {
+#endif
+ hardclock_use_stick = 1;
+ break;
+#ifdef SMP
+ }
+ /* FALLTHROUGH */
+#endif
+ default:
+ hardclock_use_stick = 0;
+ }
+ if (hardclock_use_stick != 0) {
+ if (OF_getprop(OF_parent(PCPU_GET(node)), "stick-frequency",
+ &clock, sizeof(clock)) == -1)
+ panic("%s: could not determine STICK frequency", __func__);
+ intr_setup(PIL_TICK, stick_hardclock, -1, NULL, NULL);
+ } else {
+ clock = PCPU_GET(clock);
+ intr_setup(PIL_TICK, tick_hardclock, -1, NULL, NULL);
+ set_cputicker(tick_cputicks, clock, 0);
+ }
+ tick_increment = clock / hz;
+ /*
+ * Avoid stopping of hardclock in terms of a lost (S)TICK interrupt
+ * by ensuring that the (S)TICK period is at least TICK_GRACE ticks.
+ */
+ if (tick_increment < TICK_GRACE(clock))
+ panic("%s: HZ too high, decrease to at least %d",
+ __func__, clock / TICK_GRACE(clock));
tick_start();
+
+ /*
+ * Initialize the TICK-based timecounter. This must not happen
+ * before SI_SUB_INTRINSIC for tick_get_timecount_mp() to work.
+ */
+ tick_tc.tc_get_timecount = tick_get_timecount_up;
+ tick_tc.tc_poll_pps = NULL;
+ tick_tc.tc_counter_mask = ~0u;
+ tick_tc.tc_frequency = PCPU_GET(clock);
+ tick_tc.tc_name = "tick";
+ tick_tc.tc_quality = TICK_QUALITY_UP;
+ tick_tc.tc_priv = NULL;
+#ifdef SMP
+ /*
+ * We (try to) sync the (S)TICK timers of APs with the BSP during
+ * their startup but not afterwards. The resulting drift can
+ * cause problems when the time is calculated based on (S)TICK
+ * values read on different CPUs. Thus we bind to the BSP for
+ * reading the register and use a low quality for the otherwise
+ * high quality (S)TICK timers in the MP case.
+ */
+ if (cpu_mp_probe()) {
+ tick_tc.tc_get_timecount = tick_get_timecount_mp;
+ tick_tc.tc_quality = TICK_QUALITY_MP;
+ }
+#endif
+ tc_init(&tick_tc);
}
static inline void
@@ -93,27 +191,52 @@ tick_process(struct trapframe *tf)
statclock(TRAPF_USERMODE(tf));
}
+/*
+ * NB: the sequence of reading the (S)TICK register, calculating the value
+ * of the next tick and writing it to the (S)TICK_COMPARE register must not
+ * be interrupted, not even by an IPI, otherwise a value that is in the past
+ * could be written in the worst case, causing hardclock to stop.
+ */
+
static void
tick_hardclock(struct trapframe *tf)
{
- u_long adj, ref, tick;
- long delta;
+ u_long adj, tick;
register_t s;
- int count;
- /*
- * The sequence of reading the TICK register, calculating the value
- * of the next tick and writing it to the TICK_CMPR register must not
- * be interrupted, not even by an IPI, otherwise a value that is in
- * the past could be written in the worst case, causing hardclock to
- * stop.
- */
critical_enter();
adj = PCPU_GET(tickadj);
s = intr_disable();
tick = rd(tick);
wrtickcmpr(tick + tick_increment - adj, 0);
intr_restore(s);
+ tick_hardclock_common(tf, tick, adj);
+ critical_exit();
+}
+
+static void
+stick_hardclock(struct trapframe *tf)
+{
+ u_long adj, stick;
+ register_t s;
+
+ critical_enter();
+ adj = PCPU_GET(tickadj);
+ s = intr_disable();
+ stick = rdstick();
+ wrstickcmpr(stick + tick_increment - adj, 0);
+ intr_restore(s);
+ tick_hardclock_common(tf, stick, adj);
+ critical_exit();
+}
+
+static inline void
+tick_hardclock_common(struct trapframe *tf, u_long tick, u_long adj)
+{
+ u_long ref;
+ long delta;
+ int count;
+
ref = PCPU_GET(tickref);
delta = tick - ref;
count = 0;
@@ -139,29 +262,36 @@ tick_hardclock(struct trapframe *tf)
}
PCPU_SET(tickref, ref);
PCPU_SET(tickadj, adj);
- critical_exit();
}
-void
-tick_init(u_long clock)
+static u_int
+tick_get_timecount_up(struct timecounter *tc)
{
- tick_freq = clock;
- tick_MHz = clock / 1000000;
- tick_increment = clock / hz;
+ return ((u_int)rd(tick));
+}
- /*
- * UltraSparc II[e,i] based systems come up with the tick interrupt
- * enabled and a handler that resets the tick counter, causing DELAY()
- * to not work properly when used early in boot.
- * UltraSPARC III based systems come up with the system tick interrupt
- * enabled, causing an interrupt storm on startup since they are not
- * handled.
- */
- tick_stop();
+#ifdef SMP
+static u_int
+tick_get_timecount_mp(struct timecounter *tc)
+{
+ struct thread *td;
+ u_int tick;
+
+ td = curthread;
+ thread_lock(td);
+ sched_bind(td, 0);
+ thread_unlock(td);
- set_cputicker(tick_cputicks, tick_freq, 0);
+ tick = tick_get_timecount_up(tc);
+
+ thread_lock(td);
+ sched_unbind(td);
+ thread_unlock(td);
+
+ return (tick);
}
+#endif
void
tick_start(void)
@@ -170,32 +300,34 @@ tick_start(void)
register_t s;
/*
- * Avoid stopping of hardclock in terms of a lost tick interrupt
- * by ensuring that the tick period is at least TICK_GRACE ticks.
- * This check would be better placed in tick_init(), however we
- * have to call tick_init() before cninit() in order to provide
- * the low-level console drivers with a working DELAY() which in
- * turn means we cannot use panic() in tick_init().
- */
- if (tick_increment < TICK_GRACE)
- panic("%s: HZ too high, decrease to at least %ld", __func__,
- tick_freq / TICK_GRACE);
-
- if (curcpu == 0)
- intr_setup(PIL_TICK, tick_hardclock, -1, NULL, NULL);
-
- /*
- * Try to make the tick interrupts as synchronously as possible on
- * all CPUs to avoid inaccuracies for migrating processes. Leave out
- * one tick to make sure that it is not missed.
+ * Try to make the (S)TICK interrupts as synchronously as possible
+ * on all CPUs to avoid inaccuracies for migrating processes. Leave
+ * out one tick to make sure that it is not missed.
*/
+ critical_enter();
PCPU_SET(tickadj, 0);
s = intr_disable();
- base = rd(tick);
+ if (hardclock_use_stick != 0)
+ base = rdstick();
+ else
+ base = rd(tick);
base = roundup(base, tick_increment);
PCPU_SET(tickref, base);
- wrtickcmpr(base + tick_increment, 0);
+ if (hardclock_use_stick != 0)
+ wrstickcmpr(base + tick_increment, 0);
+ else
+ wrtickcmpr(base + tick_increment, 0);
intr_restore(s);
+ critical_exit();
+}
+
+void
+tick_clear(void)
+{
+
+ if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
+ wrstick(0, 0);
+ wrpr(tick, 0, 0);
}
void
@@ -203,6 +335,6 @@ tick_stop(void)
{
if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
- wr(asr25, 1L << 63, 0);
+ wrstickcmpr(1L << 63, 0);
wrtickcmpr(1L << 63, 0);
}
OpenPOWER on IntegriCloud