summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_timeout.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2007-08-31 19:01:30 +0000
committerjhb <jhb@FreeBSD.org>2007-08-31 19:01:30 +0000
commitc54b68ab60eecb5c06e27dc1c38bd56f61b5a8fc (patch)
treeb8d8e434794ffd20b69e404fb79c0f23f5d23953 /sys/kern/kern_timeout.c
parent01348838463a02c468390e817a9507e889103c83 (diff)
downloadFreeBSD-src-c54b68ab60eecb5c06e27dc1c38bd56f61b5a8fc.zip
FreeBSD-src-c54b68ab60eecb5c06e27dc1c38bd56f61b5a8fc.tar.gz
Close a race that snuck in with the recent changes to fix a LOR between
the callout_lock spin lock and the sleepqueue spin locks. In the fix, callout_drain() has to drop the callout_lock so it can acquire the sleepqueue lock. The state of the callout can change while the callout_lock is held however (for example, it can be rescheduled via callout_reset()). The previous code assumed that the only state change that could happen is that the callout could finish executing. This change alters callout_drain() to effectively restart and recheck everything after it acquires the sleepqueue lock thus handling all the possible states that the callout could be in after any changes while callout_lock was dropped. Approved by: re (kensmith) Tested by: kris
Diffstat (limited to 'sys/kern/kern_timeout.c')
-rw-r--r--sys/kern/kern_timeout.c40
1 files changed, 27 insertions, 13 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 2e67ece1f..62ec575 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -491,7 +491,7 @@ _callout_stop_safe(c, safe)
struct callout *c;
int safe;
{
- int use_mtx;
+ int use_mtx, sq_locked;
if (!safe && c->c_mtx != NULL) {
#ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */
@@ -504,6 +504,8 @@ _callout_stop_safe(c, safe)
use_mtx = 0;
}
+ sq_locked = 0;
+again:
mtx_lock_spin(&callout_lock);
/*
* If the callout isn't pending, it's not on the queue, so
@@ -521,6 +523,8 @@ _callout_stop_safe(c, safe)
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg);
mtx_unlock_spin(&callout_lock);
+ if (sq_locked)
+ sleepq_release(&callout_wait);
return (0);
}
@@ -539,20 +543,24 @@ _callout_stop_safe(c, safe)
* a LOR between callout_lock and sleepqueue
* chain spinlocks. This piece of code
* emulates a msleep_spin() call actually.
+ *
+ * If we already have the sleepqueue chain
+ * locked, then we can safely block. If we
+ * don't already have it locked, however,
+ * we have to drop the callout_lock to lock
+ * it. This opens several races, so we
+ * restart at the beginning once we have
+ * both locks. If nothing has changed, then
+ * we will end up back here with sq_locked
+ * set.
*/
- mtx_unlock_spin(&callout_lock);
- sleepq_lock(&callout_wait);
-
- /*
- * Check again the state of curr_callout
- * because curthread could have lost the
- * race previously won.
- */
- mtx_lock_spin(&callout_lock);
- if (c != curr_callout) {
- sleepq_release(&callout_wait);
- break;
+ if (!sq_locked) {
+ mtx_unlock_spin(&callout_lock);
+ sleepq_lock(&callout_wait);
+ sq_locked = 1;
+ goto again;
}
+
callout_wait = 1;
DROP_GIANT();
mtx_unlock_spin(&callout_lock);
@@ -560,6 +568,7 @@ _callout_stop_safe(c, safe)
&callout_lock.lock_object, "codrain",
SLEEPQ_SLEEP, 0);
sleepq_wait(&callout_wait);
+ sq_locked = 0;
/* Reacquire locks previously released. */
PICKUP_GIANT();
@@ -577,13 +586,18 @@ _callout_stop_safe(c, safe)
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
mtx_unlock_spin(&callout_lock);
+ KASSERT(!sq_locked, ("sleepqueue chain locked"));
return (1);
}
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg);
mtx_unlock_spin(&callout_lock);
+ KASSERT(!sq_locked, ("sleepqueue chain still locked"));
return (0);
}
+ if (sq_locked)
+ sleepq_release(&callout_wait);
+
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
if (nextsoftcheck == c) {
OpenPOWER on IntegriCloud