summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_clock.c
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2000-11-19 06:02:32 +0000
committerjake <jake@FreeBSD.org>2000-11-19 06:02:32 +0000
commitf265931038bd5d4784d95756bac9482fea5c237a (patch)
tree011a8ccbfdc24eb4d3ad466439c9241fc3bd25f7 /sys/kern/kern_clock.c
parent9c6b4db333692ded21cd0e118ef680cefb97d0d4 (diff)
downloadFreeBSD-src-f265931038bd5d4784d95756bac9482fea5c237a.zip
FreeBSD-src-f265931038bd5d4784d95756bac9482fea5c237a.tar.gz
- Protect the callout wheel with a separate spin mutex, callout_lock.
- Use the mutex in hardclock to ensure no races between it and softclock. - Make softclock be INTR_MPSAFE and provide a flag, CALLOUT_MPSAFE, which specifies that a callout handler does not need giant. There is still no way to set this flag when regstering a callout. Reviewed by: -smp@, jlemon
Diffstat (limited to 'sys/kern/kern_clock.c')
-rw-r--r--sys/kern/kern_clock.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 74f346e..b021580 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -154,6 +154,7 @@ hardclock(frame)
register struct clockframe *frame;
{
register struct proc *p;
+ int need_softclock = 0;
p = curproc;
if (p != idleproc) {
@@ -187,16 +188,25 @@ hardclock(frame)
statclock(frame);
tc_windup();
- ticks++;
/*
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
+ mtx_enter(&callout_lock, MTX_SPIN);
+ ticks++;
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
- sched_swi(softclock_ih, SWI_NOSWITCH);
+ need_softclock = 1;
} else if (softticks + 1 == ticks)
++softticks;
+ mtx_exit(&callout_lock, MTX_SPIN);
+
+ /*
+ * sched_swi acquires sched_lock, so we don't want to call it with
+ * callout_lock held; incorrect locking order.
+ */
+ if (need_softclock)
+ sched_swi(softclock_ih, SWI_NOSWITCH);
}
/*
OpenPOWER on IntegriCloud