summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_timeout.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2007-06-26 21:42:01 +0000
committerattilio <attilio@FreeBSD.org>2007-06-26 21:42:01 +0000
commit6b276781fad7a061344301ba071e88e9822c50ba (patch)
treedfed61b9a1bfce96b75b75f2b621da64cccf888a /sys/kern/kern_timeout.c
parent15389329a0e4e6702f7710bd904131886e2a6d1d (diff)
downloadFreeBSD-src-6b276781fad7a061344301ba071e88e9822c50ba.zip
FreeBSD-src-6b276781fad7a061344301ba071e88e9822c50ba.tar.gz
Fix an old standing LOR between callout_lock and sleepqueues chain (which
could lead to a deadlock). - sleepq_set_timeout acquires callout_lock (via callout_reset()) only with sleepq chain lock held - msleep_spin in _callout_stop_safe lock the sleepqueue chain with callout_lock held In order to solve this don't use msleep_spin in _callout_stop_safe() but use directly sleepqueues as inline msleep_spin code. Rearrange the wakeup path in order to have it consistent too. Reported by: kris (via stress2 test suite) Tested by: Timothy Redaelli <drizzt@gufi.org> Reviewed by: jhb Approved by: jeff (mentor) Approved by: re
Diffstat (limited to 'sys/kern/kern_timeout.c')
-rw-r--r--sys/kern/kern_timeout.c38
1 files changed, 35 insertions, 3 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index e3441c2..2e67ece1f 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/sleepqueue.h>
#include <sys/sysctl.h>
static int avg_depth;
@@ -308,8 +309,10 @@ softclock(void *dummy)
* There is someone waiting
* for the callout to complete.
*/
- wakeup(&callout_wait);
callout_wait = 0;
+ mtx_unlock_spin(&callout_lock);
+ wakeup(&callout_wait);
+ mtx_lock_spin(&callout_lock);
}
steps = 0;
c = nextsoftcheck;
@@ -529,9 +532,38 @@ _callout_stop_safe(c, safe)
* finish.
*/
while (c == curr_callout) {
+
+ /*
+ * Use direct calls to sleepqueue interface
+ * instead of cv/msleep in order to avoid
+ * a LOR between callout_lock and sleepqueue
+ * chain spinlocks. This piece of code
+ * emulates a msleep_spin() call actually.
+ */
+ mtx_unlock_spin(&callout_lock);
+ sleepq_lock(&callout_wait);
+
+ /*
+ * Check again the state of curr_callout
+ * because curthread could have lost the
+ * race previously won.
+ */
+ mtx_lock_spin(&callout_lock);
+ if (c != curr_callout) {
+ sleepq_release(&callout_wait);
+ break;
+ }
callout_wait = 1;
- msleep_spin(&callout_wait, &callout_lock,
- "codrain", 0);
+ DROP_GIANT();
+ mtx_unlock_spin(&callout_lock);
+ sleepq_add(&callout_wait,
+ &callout_lock.lock_object, "codrain",
+ SLEEPQ_SLEEP, 0);
+ sleepq_wait(&callout_wait);
+
+ /* Reacquire locks previously released. */
+ PICKUP_GIANT();
+ mtx_lock_spin(&callout_lock);
}
} else if (use_mtx && !curr_cancelled) {
/*
OpenPOWER on IntegriCloud