summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2000-11-16 21:20:52 +0000
committerjhb <jhb@FreeBSD.org>2000-11-16 21:20:52 +0000
commitc87c13134f58e8c43d88548733164d19b7bf0fcb (patch)
tree54eafdd49caf6b37d020d4a381011df9725d6b53 /sys
parent4b6731649b5bd2ad944b4db901b34f53a4ebb6e4 (diff)
downloadFreeBSD-src-c87c13134f58e8c43d88548733164d19b7bf0fcb.zip
FreeBSD-src-c87c13134f58e8c43d88548733164d19b7bf0fcb.tar.gz
The recent changes to msleep() and mawait() resulted in timeout() and
untimeout() not being called with Giant in those functions. For now, use the sched_lock to protect the callout wheel in softclock() and in the various timeout and callout functions. Noticed by: tegge
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_timeout.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index fb6cda3..831fbac 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -43,6 +43,7 @@
#include <sys/systm.h>
#include <sys/callout.h>
#include <sys/kernel.h>
+#include <sys/mutex.h>
/*
* TODO:
@@ -89,6 +90,7 @@ softclock(void *dummy)
steps = 0;
s = splhigh();
+ mtx_enter(&sched_lock, MTX_SPIN);
while (softticks != ticks) {
softticks++;
/*
@@ -127,15 +129,18 @@ softclock(void *dummy)
c->c_flags =
(c->c_flags & ~CALLOUT_PENDING);
}
+ mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
c_func(c_arg);
s = splhigh();
+ mtx_enter(&sched_lock, MTX_SPIN);
steps = 0;
c = nextsoftcheck;
}
}
}
nextsoftcheck = NULL;
+ mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
}
@@ -166,6 +171,7 @@ timeout(ftn, arg, to_ticks)
struct callout_handle handle;
s = splhigh();
+ mtx_enter(&sched_lock, MTX_SPIN);
/* Fill in the next free callout structure. */
new = SLIST_FIRST(&callfree);
@@ -177,6 +183,7 @@ timeout(ftn, arg, to_ticks)
callout_reset(new, to_ticks, ftn, arg);
handle.callout = new;
+ mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
return (handle);
}
@@ -198,8 +205,10 @@ untimeout(ftn, arg, handle)
return;
s = splhigh();
+ mtx_enter(&sched_lock, MTX_SPIN);
if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
callout_stop(handle.callout);
+ mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
}
@@ -233,6 +242,7 @@ callout_reset(c, to_ticks, ftn, arg)
int s;
s = splhigh();
+ mtx_enter(&sched_lock, MTX_SPIN);
if (c->c_flags & CALLOUT_PENDING)
callout_stop(c);
@@ -250,8 +260,8 @@ callout_reset(c, to_ticks, ftn, arg)
c->c_time = ticks + to_ticks;
TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
c, c_links.tqe);
+ mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
-
}
void
@@ -261,11 +271,13 @@ callout_stop(c)
int s;
s = splhigh();
+ mtx_enter(&sched_lock, MTX_SPIN);
/*
* Don't attempt to delete a callout that's not on the queue.
*/
if (!(c->c_flags & CALLOUT_PENDING)) {
c->c_flags &= ~CALLOUT_ACTIVE;
+ mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
return;
}
@@ -280,6 +292,7 @@ callout_stop(c)
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
}
+ mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
}
@@ -341,6 +354,7 @@ adjust_timeout_calltodo(time_change)
/* don't collide with softclock() */
s = splhigh();
+ mtx_enter(&sched_lock, MTX_SPIN);
for (p = calltodo.c_next; p != NULL; p = p->c_next) {
p->c_time -= delta_ticks;
@@ -351,6 +365,7 @@ adjust_timeout_calltodo(time_change)
/* take back the ticks the timer didn't use (p->c_time <= 0) */
delta_ticks = -p->c_time;
}
+ mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
return;
OpenPOWER on IntegriCloud