summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-01-28 20:39:57 +0000
committerjhb <jhb@FreeBSD.org>2004-01-28 20:39:57 +0000
commit7c38a96e26ea6ac5011842d1d51cd51ed70eb958 (patch)
treed3c375a1a6a32f2ee6a61b1e0480fe131cb8ae7f
parent3678d31f1762251640dd504e43143335331df1d0 (diff)
downloadFreeBSD-src-7c38a96e26ea6ac5011842d1d51cd51ed70eb958.zip
FreeBSD-src-7c38a96e26ea6ac5011842d1d51cd51ed70eb958.tar.gz
Rework witness_lock() to make it slightly more useful and flexible.
- witness_lock() is split into two pieces: witness_checkorder() and witness_lock(). Witness_checkorder() determines if acquiring a specified lock at the time it is called would result in a lock order. It optionally adds a new lock order relationship as well. witness_lock() updates witness's data structures to assume that a lock has been acquired by stick a new lock instance in the appropriate lock instance list. - The mutex and sx lock functions now call checkorder() prior to trying to acquire a lock and continue to call witness_lock() after the acquire is completed. This will let witness catch a deadlock before it happens rather than trying to do so after the threads have deadlocked (i.e. never actually report it). - A new function witness_defineorder() has been added that adds a lock order between two locks at runtime without having to acquire the locks. If the lock order cannot be added it will return an error. This function is available to programmers via the WITNESS_DEFINEORDER() macro which accepts either two mutexes or two sx locks as its arguments. - A few simple wrapper macros were added to allow developers to call witness_checkorder() anywhere as a way of enforcing locking assertions in code that might acquire a certain lock in some situations. The macros are: witness_check_{mutex,shared_sx,exclusive_sx} and take an appropriate lock as the sole argument. - The code to remove a lock instance from a lock list in witness_unlock() was unnested by using a goto to vastly improve the readability of this function.
-rw-r--r--sys/kern/kern_mutex.c4
-rw-r--r--sys/kern/kern_sx.c3
-rw-r--r--sys/kern/subr_witness.c288
-rw-r--r--sys/sys/lock.h27
4 files changed, 214 insertions, 108 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 0ab4fb6..df01eb6 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -219,6 +219,8 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
file, line));
+ WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
+ file, line);
_get_sleep_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
@@ -321,6 +323,8 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
("mtx_lock_spin() of sleep mutex %s @ %s:%d",
m->mtx_object.lo_name, file, line));
+ WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
+ file, line);
#if defined(SMP) || LOCK_DEBUG > 0 || 1
_get_spin_lock(m, curthread, opts, file, line);
#else
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 4256b0a..fd723ed 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -112,6 +112,7 @@ _sx_slock(struct sx *sx, const char *file, int line)
KASSERT(sx->sx_xholder != curthread,
("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
sx->sx_object.lo_name, file, line));
+ WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line);
/*
* Loop in case we lose the race for lock acquisition.
@@ -165,6 +166,8 @@ _sx_xlock(struct sx *sx, const char *file, int line)
KASSERT(sx->sx_xholder != curthread,
("%s (%s): xlock already held @ %s:%d", __func__,
sx->sx_object.lo_name, file, line));
+ WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
+ line);
/* Loop in case we lose the race for lock acquisition. */
while (sx->sx_cnt != 0) {
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 140dafa..db3c55d 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -579,8 +579,42 @@ fixup_filename(const char *file)
return (file);
}
+int
+witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
+{
+
+ if (witness_watch == 0 || panicstr != NULL)
+ return (0);
+
+ /* Require locks that witness knows about. */
+ if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
+ lock2->lo_witness == NULL)
+ return (EINVAL);
+
+ MPASS(!mtx_owned(&w_mtx));
+ mtx_lock_spin(&w_mtx);
+
+ /*
+ * If we already have either an explicit or implied lock order that
+ * is the other way around, then return an error.
+ */
+ if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
+ mtx_unlock_spin(&w_mtx);
+ return (EDOOFUS);
+ }
+
+ /* Try to add the new order. */
+ CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
+ lock2->lo_type, lock1->lo_type);
+ if (!itismychild(lock1->lo_witness, lock2->lo_witness))
+ return (ENOMEM);
+ mtx_unlock_spin(&w_mtx);
+ return (0);
+}
+
void
-witness_lock(struct lock_object *lock, int flags, const char *file, int line)
+witness_checkorder(struct lock_object *lock, int flags, const char *file,
+ int line)
{
struct lock_list_entry **lock_list, *lle;
struct lock_instance *lock1, *lock2;
@@ -588,13 +622,22 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
struct witness *w, *w1;
struct thread *td;
int i, j;
-#ifdef DDB
- int go_into_ddb = 0;
-#endif
if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
panicstr != NULL)
return;
+
+ /*
+ * Try locks do not block if they fail to acquire the lock, thus
+ * there is no danger of deadlocks or of switching while holding a
+ * spin lock if we acquire a lock via a try operation. This
+ * function shouldn't even be called for try locks, so panic if
+ * that happens.
+ */
+ if (flags & LOP_TRYLOCK)
+ panic("%s should not be called for try lock operations",
+ __func__);
+
w = lock->lo_witness;
class = lock->lo_class;
td = curthread;
@@ -606,7 +649,7 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
* impliclty enforces a lock order of all sleep locks before
* all spin locks.
*/
- if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0)
+ if (td->td_critnest != 0)
panic("blockable sleep lock (%s) %s @ %s:%d",
class->lc_name, lock->lo_name, file, line);
lock_list = &td->td_sleeplocks;
@@ -618,10 +661,12 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
* is needed.
*/
if (*lock_list == NULL)
- goto out;
+ return;
/*
- * Check to see if we are recursing on a lock we already own.
+ * Check to see if we are recursing on a lock we already own. If
+ * so, make sure that we don't mismatch exclusive and shared lock
+ * acquires.
*/
lock1 = find_instance(*lock_list, lock);
if (lock1 != NULL) {
@@ -641,20 +686,6 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
lock1->li_file, lock1->li_line);
panic("excl->share");
}
- lock1->li_flags++;
- if ((lock->lo_flags & LO_RECURSABLE) == 0) {
- printf(
- "recursed on non-recursive lock (%s) %s @ %s:%d\n",
- class->lc_name, lock->lo_name, file, line);
- printf("first acquired @ %s:%d\n", lock1->li_file,
- lock1->li_line);
- panic("recurse");
- }
- CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
- td->td_proc->p_pid, lock->lo_name,
- lock1->li_flags & LI_RECURSEMASK);
- lock1->li_file = file;
- lock1->li_line = line;
return;
}
@@ -664,7 +695,7 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
* spin lock if we acquire a lock via a try operation.
*/
if (flags & LOP_TRYLOCK)
- goto out;
+ return;
/*
* Check for duplicate locks of the same type. Note that we only
@@ -675,7 +706,7 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
w1 = lock1->li_lock->lo_witness;
if (w1 == w) {
if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK))
- goto out;
+ return;
w->w_same_squawked = 1;
printf("acquiring duplicate lock of same type: \"%s\"\n",
lock->lo_type);
@@ -683,9 +714,10 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
lock1->li_file, lock1->li_line);
printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
#ifdef DDB
- go_into_ddb = 1;
+ goto debugger;
+#else
+ return;
#endif
- goto out;
}
MPASS(!mtx_owned(&w_mtx));
mtx_lock_spin(&w_mtx);
@@ -694,7 +726,7 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
*/
if (witness_watch > 1 && w->w_level > w1->w_level) {
mtx_unlock_spin(&w_mtx);
- goto out;
+ return;
}
/*
* If we know that the the lock we are acquiring comes after
@@ -703,7 +735,7 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
*/
if (isitmydescendant(w1, w)) {
mtx_unlock_spin(&w_mtx);
- goto out;
+ return;
}
for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
for (i = lle->ll_count - 1; i >= 0; i--, j++) {
@@ -754,17 +786,22 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
*/
mtx_unlock_spin(&w_mtx);
#ifdef BLESSING
+ /*
+ * If the lock order is blessed, just bail. We don't
+ * look for other lock order violations though, which
+ * may be a bug.
+ */
if (blessed(w, w1))
- goto out;
+ return;
#endif
if (lock1->li_lock == &Giant.mtx_object) {
if (w1->w_Giant_squawked)
- goto out;
+ return;
else
w1->w_Giant_squawked = 1;
} else {
if (w1->w_other_squawked)
- goto out;
+ return;
else
w1->w_other_squawked = 1;
}
@@ -781,12 +818,12 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
MPASS(lock2->li_lock != NULL);
if (lock2->li_lock->lo_witness == w)
break;
- i--;
if (i == 0 && lle->ll_next != NULL) {
lle = lle->ll_next;
i = lle->ll_count - 1;
MPASS(i >= 0 && i < LOCK_NCHILDREN);
- }
+ } else
+ i--;
} while (i >= 0);
if (i < 0) {
printf(" 1st %p %s (%s) @ %s:%d\n",
@@ -808,18 +845,21 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
lock->lo_name, lock->lo_type, file, line);
}
#ifdef DDB
- go_into_ddb = 1;
+ goto debugger;
+#else
+ return;
#endif
- goto out;
}
}
lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
/*
- * Don't build a new relationship between a sleepable lock and
- * Giant if it is the wrong direction. The real lock order is that
- * sleepable locks come before Giant.
+ * If requested, build a new lock order. However, don't build a new
+ * relationship between a sleepable lock and Giant if it is in the
+ * wrong direction. The correct lock order is that sleepable locks
+ * always come before Giant.
*/
- if (!(lock1->li_lock == &Giant.mtx_object &&
+ if (flags & LOP_NEWORDER &&
+ !(lock1->li_lock == &Giant.mtx_object &&
(lock->lo_flags & LO_SLEEPABLE) != 0)) {
CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
lock->lo_type, lock1->li_lock->lo_type);
@@ -828,19 +868,55 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
return;
}
mtx_unlock_spin(&w_mtx);
+ return;
-out:
#ifdef DDB
- if (go_into_ddb) {
- if (witness_trace)
- backtrace();
- if (witness_ddb)
- Debugger(__func__);
- }
+debugger:
+ if (witness_trace)
+ backtrace();
+ if (witness_ddb)
+ Debugger(__func__);
#endif
+}
+
+void
+witness_lock(struct lock_object *lock, int flags, const char *file, int line)
+{
+ struct lock_list_entry **lock_list, *lle;
+ struct lock_instance *instance;
+ struct witness *w;
+ struct thread *td;
+
+ if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
+ panicstr != NULL)
+ return;
+ w = lock->lo_witness;
+ td = curthread;
+ file = fixup_filename(file);
+
+ /* Determine lock list for this lock. */
+ if (lock->lo_class->lc_flags & LC_SLEEPLOCK)
+ lock_list = &td->td_sleeplocks;
+ else
+ lock_list = PCPU_PTR(spinlocks);
+
+ /* Check to see if we are recursing on a lock we already own. */
+ instance = find_instance(*lock_list, lock);
+ if (instance != NULL) {
+ instance->li_flags++;
+ CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
+ td->td_proc->p_pid, lock->lo_name,
+ instance->li_flags & LI_RECURSEMASK);
+ instance->li_file = file;
+ instance->li_line = line;
+ return;
+ }
+
+ /* Update per-witness last file and line acquire. */
w->w_file = file;
w->w_line = line;
-
+
+ /* Find the next open lock instance in the list and fill it. */
lle = *lock_list;
if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
lle = witness_lock_list_get();
@@ -851,14 +927,14 @@ out:
td->td_proc->p_pid, lle);
*lock_list = lle;
}
- lock1 = &lle->ll_children[lle->ll_count++];
- lock1->li_lock = lock;
- lock1->li_line = line;
- lock1->li_file = file;
+ instance = &lle->ll_children[lle->ll_count++];
+ instance->li_lock = lock;
+ instance->li_line = line;
+ instance->li_file = file;
if ((flags & LOP_EXCLUSIVE) != 0)
- lock1->li_flags = LI_EXCLUSIVE;
+ instance->li_flags = LI_EXCLUSIVE;
else
- lock1->li_flags = 0;
+ instance->li_flags = 0;
CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
}
@@ -945,6 +1021,8 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
td = curthread;
class = lock->lo_class;
file = fixup_filename(file);
+
+ /* Find lock instance associated with this lock. */
if (class->lc_flags & LC_SLEEPLOCK)
lock_list = &td->td_sleeplocks;
else
@@ -952,65 +1030,59 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
for (i = 0; i < (*lock_list)->ll_count; i++) {
instance = &(*lock_list)->ll_children[i];
- if (instance->li_lock == lock) {
- if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
- (flags & LOP_EXCLUSIVE) == 0) {
- printf(
- "shared unlock of (%s) %s @ %s:%d\n",
- class->lc_name, lock->lo_name,
- file, line);
- printf(
- "while exclusively locked from %s:%d\n",
- instance->li_file,
- instance->li_line);
- panic("excl->ushare");
- }
- if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
- (flags & LOP_EXCLUSIVE) != 0) {
- printf(
- "exclusive unlock of (%s) %s @ %s:%d\n",
- class->lc_name, lock->lo_name,
- file, line);
- printf(
- "while share locked from %s:%d\n",
- instance->li_file,
- instance->li_line);
- panic("share->uexcl");
- }
- /* If we are recursed, unrecurse. */
- if ((instance->li_flags & LI_RECURSEMASK) > 0) {
- CTR4(KTR_WITNESS,
- "%s: pid %d unrecursed on %s r=%d", __func__,
- td->td_proc->p_pid,
- instance->li_lock->lo_name,
- instance->li_flags);
- instance->li_flags--;
- return;
- }
- s = intr_disable();
- CTR4(KTR_WITNESS,
- "%s: pid %d removed %s from lle[%d]", __func__,
- td->td_proc->p_pid,
- instance->li_lock->lo_name,
- (*lock_list)->ll_count - 1);
- for (j = i; j < (*lock_list)->ll_count - 1; j++)
- (*lock_list)->ll_children[j] =
- (*lock_list)->ll_children[j + 1];
- (*lock_list)->ll_count--;
- intr_restore(s);
- if ((*lock_list)->ll_count == 0) {
- lle = *lock_list;
- *lock_list = lle->ll_next;
- CTR3(KTR_WITNESS,
- "%s: pid %d removed lle %p", __func__,
- td->td_proc->p_pid, lle);
- witness_lock_list_free(lle);
- }
- return;
- }
+ if (instance->li_lock == lock)
+ goto found;
}
panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
file, line);
+found:
+
+ /* First, check for shared/exclusive mismatches. */
+ if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
+ (flags & LOP_EXCLUSIVE) == 0) {
+ printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
+ lock->lo_name, file, line);
+ printf("while exclusively locked from %s:%d\n",
+ instance->li_file, instance->li_line);
+ panic("excl->ushare");
+ }
+ if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
+ (flags & LOP_EXCLUSIVE) != 0) {
+ printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
+ lock->lo_name, file, line);
+ printf("while share locked from %s:%d\n", instance->li_file,
+ instance->li_line);
+ panic("share->uexcl");
+ }
+
+ /* If we are recursed, unrecurse. */
+ if ((instance->li_flags & LI_RECURSEMASK) > 0) {
+ CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
+ td->td_proc->p_pid, instance->li_lock->lo_name,
+ instance->li_flags);
+ instance->li_flags--;
+ return;
+ }
+
+ /* Otherwise, remove this item from the list. */
+ s = intr_disable();
+ CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
+ td->td_proc->p_pid, instance->li_lock->lo_name,
+ (*lock_list)->ll_count - 1);
+ for (j = i; j < (*lock_list)->ll_count - 1; j++)
+ (*lock_list)->ll_children[j] =
+ (*lock_list)->ll_children[j + 1];
+ (*lock_list)->ll_count--;
+ intr_restore(s);
+
+ /* If this lock list entry is now empty, free it. */
+ if ((*lock_list)->ll_count == 0) {
+ lle = *lock_list;
+ *lock_list = lle->ll_next;
+ CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
+ td->td_proc->p_pid, lle);
+ witness_lock_list_free(lle);
+ }
}
/*
diff --git a/sys/sys/lock.h b/sys/sys/lock.h
index c2359f6..34a22e2 100644
--- a/sys/sys/lock.h
+++ b/sys/sys/lock.h
@@ -74,6 +74,7 @@ struct lock_class {
* Option flags passed to lock operations that witness also needs to know
* about or that are generic across all locks.
*/
+#define LOP_NEWORDER 0x00000001 /* Define a new lock order. */
#define LOP_QUIET 0x00000002 /* Don't log locking operations. */
#define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */
#define LOP_EXCLUSIVE 0x00000008 /* Exclusive lock. */
@@ -197,6 +198,8 @@ extern struct lock_class lock_class_sx;
void witness_init(struct lock_object *);
void witness_destroy(struct lock_object *);
+int witness_defineorder(struct lock_object *, struct lock_object *);
+void witness_checkorder(struct lock_object *, int, const char *, int);
void witness_lock(struct lock_object *, int, const char *, int);
void witness_upgrade(struct lock_object *, int, const char *, int);
void witness_downgrade(struct lock_object *, int, const char *, int);
@@ -223,6 +226,13 @@ const char *witness_file(struct lock_object *);
#define WITNESS_DESTROY(lock) \
witness_destroy(lock)
+#define WITNESS_CHECKORDER(lock, flags, file, line) \
+ witness_checkorder((lock), (flags), (file), (line))
+
+#define WITNESS_DEFINEORDER(lock1, lock2) \
+ witness_defineorder((struct lock_object *)(lock1), \
+ (struct lock_object *)(lock2))
+
#define WITNESS_LOCK(lock, flags, file, line) \
witness_lock((lock), (flags), (file), (line))
@@ -257,6 +267,8 @@ const char *witness_file(struct lock_object *);
#else /* WITNESS */
#define WITNESS_INIT(lock) ((lock)->lo_flags |= LO_INITIALIZED)
#define WITNESS_DESTROY(lock) ((lock)->lo_flags &= ~LO_INITIALIZED)
+#define WITNESS_DEFINEORDER(lock1, lock2) 0
+#define WITNESS_CHECKORDER(lock, flags, file, line)
#define WITNESS_LOCK(lock, flags, file, line)
#define WITNESS_UPGRADE(lock, flags, file, line)
#define WITNESS_DOWNGRADE(lock, flags, file, line)
@@ -269,5 +281,20 @@ const char *witness_file(struct lock_object *);
#define WITNESS_LINE(lock) (0)
#endif /* WITNESS */
+/*
+ * Helper macros to allow developers to add explicit lock order checks
+ * wherever they please without having to actually grab a lock to do so.
+ */
+#define witness_check_mutex(m) \
+ WITNESS_CHECKORDER(&(m)->mtx_object, LOP_EXCLUSIVE, LOCK_FILE, \
+ LOCK_LINE)
+
+#define witness_check_shared_sx(sx) \
+ WITNESS_CHECKORDER(&(sx)->sx_object, 0, LOCK_FILE, LOCK_LINE)
+
+#define witness_check_exclusive_sx(sx) \
+ WITNESS_CHECKORDER(&(sx)->sx_object, LOP_EXCLUSIVE, LOCK_FILE, \
+ LOCK_LINE)
+
#endif /* _KERNEL */
#endif /* _SYS_LOCK_H_ */
OpenPOWER on IntegriCloud