summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-04 23:55:45 +0000
committerjeff <jeff@FreeBSD.org>2007-06-04 23:55:45 +0000
commit3a4cdc52dcc2679fea1c55cd0fac1ee071c4dac7 (patch)
tree8450686eb3eb2340548ab4e0c94be99ae3fb4e10 /sys
parent0e873af7bd7d13718d9f28b5184cdec1d247eded (diff)
downloadFreeBSD-src-3a4cdc52dcc2679fea1c55cd0fac1ee071c4dac7.zip
FreeBSD-src-3a4cdc52dcc2679fea1c55cd0fac1ee071c4dac7.tar.gz
Commit 10/14 of sched_lock decomposition.
- Add new spinlocks to support thread_lock() and adjust ordering. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/subr_witness.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 6069870..2310016 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -404,9 +404,12 @@ static struct witness_order_list_entry order_lists[] = {
#ifdef HWPMC_HOOKS
{ "pmc-per-proc", &lock_class_mtx_spin },
#endif
+ { "process slock", &lock_class_mtx_spin },
{ "sleepq chain", &lock_class_mtx_spin },
- { "sched lock", &lock_class_mtx_spin },
+ { "umtx lock", &lock_class_mtx_spin },
{ "turnstile chain", &lock_class_mtx_spin },
+ { "turnstile lock", &lock_class_mtx_spin },
+ { "sched lock", &lock_class_mtx_spin },
{ "td_contested", &lock_class_mtx_spin },
{ "callout", &lock_class_mtx_spin },
{ "entropy harvest mutex", &lock_class_mtx_spin },
@@ -429,7 +432,8 @@ static struct witness_order_list_entry order_lists[] = {
#endif
{ "clk", &lock_class_mtx_spin },
{ "mutex profiling lock", &lock_class_mtx_spin },
- { "kse zombie lock", &lock_class_mtx_spin },
+ { "kse lock", &lock_class_mtx_spin },
+ { "zombie lock", &lock_class_mtx_spin },
{ "ALD Queue", &lock_class_mtx_spin },
#ifdef __ia64__
{ "MCA spin lock", &lock_class_mtx_spin },
@@ -446,6 +450,7 @@ static struct witness_order_list_entry order_lists[] = {
#ifdef HWPMC_HOOKS
{ "pmc-leaf", &lock_class_mtx_spin },
#endif
+ { "blocked lock", &lock_class_mtx_spin },
{ NULL, NULL },
{ NULL, NULL }
};
@@ -1961,10 +1966,10 @@ witness_list(struct thread *td)
* td->td_oncpu to get the list of spinlocks for this thread
* and "fix" this.
*
- * That still wouldn't really fix this unless we locked sched_lock
- * or stopped the other CPU to make sure it wasn't changing the list
- * out from under us. It is probably best to just not try to handle
- * threads on other CPU's for now.
+ * That still wouldn't really fix this unless we locked the scheduler
+ * lock or stopped the other CPU to make sure it wasn't changing the
+ * list out from under us. It is probably best to just not try to
+ * handle threads on other CPU's for now.
*/
if (td == curthread && PCPU_GET(spinlocks) != NULL)
witness_list_locks(PCPU_PTR(spinlocks));
OpenPOWER on IntegriCloud