summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-07-09 17:46:27 +0000
committerjhb <jhb@FreeBSD.org>2004-07-09 17:46:27 +0000
commit761713b2ffa4ede44e97aa2cb23d397021370e0d (patch)
tree2acf612e7e42d10ffb269f3c7c2c56bf72ac2d33 /sys
parent68fab2852b5abf742193e13a1858e2b7d4d30e89 (diff)
downloadFreeBSD-src-761713b2ffa4ede44e97aa2cb23d397021370e0d.zip
FreeBSD-src-761713b2ffa4ede44e97aa2cb23d397021370e0d.tar.gz
Check the lock lists to see if they are empty directly rather than
assigning a pointer to the list and then dereferencing the pointer as a second step. When the first spin lock is acquired, curthread is not in a critical section so it may be preempted and would end up using another CPUs lock list instead of its own. When this code was in witness_lock() this sequence was safe as curthread was in a critical section already since witness_lock() is called after the lock is acquired. Tested by: Daniel Lang dl at leo.org
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/subr_witness.c30
1 files changed, 21 insertions, 9 deletions
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 2e5527a..3557cf6 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -690,22 +690,34 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
if (class->lc_flags & LC_SLEEPLOCK) {
/*
* Since spin locks include a critical section, this check
- * impliclty enforces a lock order of all sleep locks before
+ * implicitly enforces a lock order of all sleep locks before
* all spin locks.
*/
if (td->td_critnest != 0)
panic("blockable sleep lock (%s) %s @ %s:%d",
class->lc_name, lock->lo_name, file, line);
+
+ /*
+ * If this is the first lock acquired then just return as
+ * no order checking is needed.
+ */
+ if (td->td_sleeplocks == NULL)
+ return;
lock_list = &td->td_sleeplocks;
- } else
+ } else {
+ /*
+ * If this is the first lock, just return as no order
+ * checking is needed. We check this in both if clauses
+ * here as unifying the check would require us to use a
+ * critical section to ensure we don't migrate while doing
+ * the check. Note that if this is not the first lock, we
+ * are already in a critical section and are safe for the
+ * rest of the check.
+ */
+ if (PCPU_GET(spinlocks) == NULL)
+ return;
lock_list = PCPU_PTR(spinlocks);
-
- /*
- * Is this the first lock acquired? If so, then no order checking
- * is needed.
- */
- if (*lock_list == NULL)
- return;
+ }
/*
* Check to see if we are recursing on a lock we already own. If
OpenPOWER on IntegriCloud