summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2000-11-19 06:02:32 +0000
committerjake <jake@FreeBSD.org>2000-11-19 06:02:32 +0000
commitf265931038bd5d4784d95756bac9482fea5c237a (patch)
tree011a8ccbfdc24eb4d3ad466439c9241fc3bd25f7 /sys
parent9c6b4db333692ded21cd0e118ef680cefb97d0d4 (diff)
downloadFreeBSD-src-f265931038bd5d4784d95756bac9482fea5c237a.zip
FreeBSD-src-f265931038bd5d4784d95756bac9482fea5c237a.tar.gz
- Protect the callout wheel with a separate spin mutex, callout_lock.
- Use the mutex in hardclock to ensure no races between it and softclock. - Make softclock be INTR_MPSAFE and provide a flag, CALLOUT_MPSAFE, which specifies that a callout handler does not need giant. There is still no way to set this flag when regstering a callout. Reviewed by: -smp@, jlemon
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/machdep.c2
-rw-r--r--sys/amd64/amd64/machdep.c2
-rw-r--r--sys/i386/i386/machdep.c2
-rw-r--r--sys/ia64/ia64/machdep.c2
-rw-r--r--sys/kern/kern_clock.c14
-rw-r--r--sys/kern/kern_intr.c2
-rw-r--r--sys/kern/kern_mutex.c1
-rw-r--r--sys/kern/kern_timeout.c41
-rw-r--r--sys/kern/subr_turnstile.c1
-rw-r--r--sys/kern/subr_witness.c1
-rw-r--r--sys/sys/callout.h2
11 files changed, 50 insertions, 20 deletions
diff --git a/sys/alpha/alpha/machdep.c b/sys/alpha/alpha/machdep.c
index 043b903..39f5b34 100644
--- a/sys/alpha/alpha/machdep.c
+++ b/sys/alpha/alpha/machdep.c
@@ -387,6 +387,8 @@ again:
TAILQ_INIT(&callwheel[i]);
}
+ mtx_init(&callout_lock, "callout", MTX_SPIN);
+
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)
if (1)
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 1d15d39..1ea7360 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -409,6 +409,8 @@ again:
TAILQ_INIT(&callwheel[i]);
}
+ mtx_init(&callout_lock, "callout", MTX_SPIN);
+
#if defined(USERCONFIG)
userconfig();
cninit(); /* the preferred console may have changed */
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 1d15d39..1ea7360 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -409,6 +409,8 @@ again:
TAILQ_INIT(&callwheel[i]);
}
+ mtx_init(&callout_lock, "callout", MTX_SPIN);
+
#if defined(USERCONFIG)
userconfig();
cninit(); /* the preferred console may have changed */
diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c
index 0fae551..0cc6f28 100644
--- a/sys/ia64/ia64/machdep.c
+++ b/sys/ia64/ia64/machdep.c
@@ -319,6 +319,8 @@ again:
TAILQ_INIT(&callwheel[i]);
}
+ mtx_init(&callout_lock, "callout", MTX_SPIN);
+
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)
if (1)
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 74f346e..b021580 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -154,6 +154,7 @@ hardclock(frame)
register struct clockframe *frame;
{
register struct proc *p;
+ int need_softclock = 0;
p = curproc;
if (p != idleproc) {
@@ -187,16 +188,25 @@ hardclock(frame)
statclock(frame);
tc_windup();
- ticks++;
/*
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
+ mtx_enter(&callout_lock, MTX_SPIN);
+ ticks++;
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
- sched_swi(softclock_ih, SWI_NOSWITCH);
+ need_softclock = 1;
} else if (softticks + 1 == ticks)
++softticks;
+ mtx_exit(&callout_lock, MTX_SPIN);
+
+ /*
+ * sched_swi acquires sched_lock, so we don't want to call it with
+ * callout_lock held; incorrect locking order.
+ */
+ if (need_softclock)
+ sched_swi(softclock_ih, SWI_NOSWITCH);
}
/*
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 81a66a8..30192b5 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -258,7 +258,7 @@ start_softintr(dummy)
{
net_ih = sinthand_add("net", NULL, swi_net, NULL, SWI_NET, 0);
softclock_ih =
- sinthand_add("clock", &clk_ithd, softclock, NULL, SWI_CLOCK, 0);
+ sinthand_add("clock", &clk_ithd, softclock, NULL, SWI_CLOCK, INTR_MPSAFE);
vm_ih = sinthand_add("vm", NULL, swi_vm, NULL, SWI_VM, 0);
}
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 58a98d1..f8d3e9e 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -703,6 +703,7 @@ static char *spin_order_list[] = {
#ifdef __i386__
"clk",
#endif
+ "callout",
/*
* leaf locks
*/
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 58619a7..57be762 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -56,6 +56,7 @@ struct callout_list callfree;
int callwheelsize, callwheelbits, callwheelmask;
struct callout_tailq *callwheel;
int softticks; /* Like ticks, but for softclock(). */
+struct mtx callout_lock;
static struct callout *nextsoftcheck; /* Next callout to be checked. */
@@ -90,7 +91,7 @@ softclock(void *dummy)
steps = 0;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_enter(&callout_lock, MTX_SPIN);
while (softticks != ticks) {
softticks++;
/*
@@ -107,21 +108,23 @@ softclock(void *dummy)
if (steps >= MAX_SOFTCLOCK_STEPS) {
nextsoftcheck = c;
/* Give interrupts a chance. */
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_enter(&callout_lock, MTX_SPIN);
c = nextsoftcheck;
steps = 0;
}
} else {
void (*c_func)(void *);
void *c_arg;
+ int c_flags;
nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(bucket, c, c_links.tqe);
c_func = c->c_func;
c_arg = c->c_arg;
+ c_flags = c->c_flags;
c->c_func = NULL;
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
c->c_flags = CALLOUT_LOCAL_ALLOC;
@@ -131,18 +134,22 @@ softclock(void *dummy)
c->c_flags =
(c->c_flags & ~CALLOUT_PENDING);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
+ if (!(c_flags & CALLOUT_MPSAFE))
+ mtx_enter(&Giant, MTX_DEF);
splx(s);
c_func(c_arg);
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ if (!(c_flags & CALLOUT_MPSAFE))
+ mtx_exit(&Giant, MTX_DEF);
+ mtx_enter(&callout_lock, MTX_SPIN);
steps = 0;
c = nextsoftcheck;
}
}
}
nextsoftcheck = NULL;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
}
@@ -173,7 +180,7 @@ timeout(ftn, arg, to_ticks)
struct callout_handle handle;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_enter(&callout_lock, MTX_SPIN);
/* Fill in the next free callout structure. */
new = SLIST_FIRST(&callfree);
@@ -185,7 +192,7 @@ timeout(ftn, arg, to_ticks)
callout_reset(new, to_ticks, ftn, arg);
handle.callout = new;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
return (handle);
}
@@ -207,10 +214,10 @@ untimeout(ftn, arg, handle)
return;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_enter(&callout_lock, MTX_SPIN);
if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
callout_stop(handle.callout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
}
@@ -244,7 +251,7 @@ callout_reset(c, to_ticks, ftn, arg)
int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_enter(&callout_lock, MTX_SPIN);
if (c->c_flags & CALLOUT_PENDING)
callout_stop(c);
@@ -262,7 +269,7 @@ callout_reset(c, to_ticks, ftn, arg)
c->c_time = ticks + to_ticks;
TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
c, c_links.tqe);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
}
@@ -273,13 +280,13 @@ callout_stop(c)
int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_enter(&callout_lock, MTX_SPIN);
/*
* Don't attempt to delete a callout that's not on the queue.
*/
if (!(c->c_flags & CALLOUT_PENDING)) {
c->c_flags &= ~CALLOUT_ACTIVE;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
return;
}
@@ -294,7 +301,7 @@ callout_stop(c)
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
}
@@ -356,7 +363,7 @@ adjust_timeout_calltodo(time_change)
/* don't collide with softclock() */
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_enter(&callout_lock, MTX_SPIN);
for (p = calltodo.c_next; p != NULL; p = p->c_next) {
p->c_time -= delta_ticks;
@@ -367,7 +374,7 @@ adjust_timeout_calltodo(time_change)
/* take back the ticks the timer didn't use (p->c_time <= 0) */
delta_ticks = -p->c_time;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
return;
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 58a98d1..f8d3e9e 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -703,6 +703,7 @@ static char *spin_order_list[] = {
#ifdef __i386__
"clk",
#endif
+ "callout",
/*
* leaf locks
*/
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 58a98d1..f8d3e9e 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -703,6 +703,7 @@ static char *spin_order_list[] = {
#ifdef __i386__
"clk",
#endif
+ "callout",
/*
* leaf locks
*/
diff --git a/sys/sys/callout.h b/sys/sys/callout.h
index c83bf44..a90285f 100644
--- a/sys/sys/callout.h
+++ b/sys/sys/callout.h
@@ -61,6 +61,7 @@ struct callout {
#define CALLOUT_LOCAL_ALLOC 0x0001 /* was allocated from callfree */
#define CALLOUT_ACTIVE 0x0002 /* callout is currently active */
#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */
+#define CALLOUT_MPSAFE 0x0008 /* callout handler is mp safe */
struct callout_handle {
struct callout *callout;
@@ -72,6 +73,7 @@ extern struct callout *callout;
extern int ncallout;
extern struct callout_tailq *callwheel;
extern int callwheelsize, callwheelbits, callwheelmask, softticks;
+extern struct mtx callout_lock;
#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)
OpenPOWER on IntegriCloud