summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2012-09-12 22:10:53 +0000
committerattilio <attilio@FreeBSD.org>2012-09-12 22:10:53 +0000
commita634dfc320608f9955ab0350a7f78ee73064b909 (patch)
tree139a9f57a8c17d9b87a87568fbf9be1819e671f0 /sys
parentac29dac65b70ec7f821f36ac07f086e6a514fe1a (diff)
downloadFreeBSD-src-a634dfc320608f9955ab0350a7f78ee73064b909.zip
FreeBSD-src-a634dfc320608f9955ab0350a7f78ee73064b909.tar.gz
Improve check coverage about idle threads.
Idle threads are not allowed to acquire any lock but spinlocks. Deny any attempt to do so by panicing at the locking operation when INVARIANTS is on. Then, remove the check on blocking on a turnstile. The check in sleepqueues is left because they are not allowed to use tsleep() either which could happen still. Reviewed by: bde, jhb, kib MFC after: 1 week
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_lock.c3
-rw-r--r--sys/kern/kern_mutex.c6
-rw-r--r--sys/kern/kern_rmlock.c6
-rw-r--r--sys/kern/kern_rwlock.c13
-rw-r--r--sys/kern/kern_sx.c13
-rw-r--r--sys/kern/subr_turnstile.c1
6 files changed, 41 insertions, 1 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 24526b0..8b428bd 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -477,6 +477,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
__func__, file, line));
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
+ lk->lock_object.lo_name, file, line));
class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
if (panicstr != NULL) {
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index f718ca0..9827a9f 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -201,6 +201,9 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
+ curthread, m->lock_object.lo_name, file, line));
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
@@ -301,6 +304,9 @@ mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line)
return (1);
MPASS(curthread != NULL);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
+ curthread, m->lock_object.lo_name, file, line));
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index 27d0462..ef1920b 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -498,6 +498,9 @@ void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
+ curthread, rm->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
file, line, NULL);
@@ -540,6 +543,9 @@ _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
if (SCHEDULER_STOPPED())
return (1);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
+ curthread, rm->lock_object.lo_name, file, line));
if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
file, line, NULL);
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index c337041..e0be154 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -242,6 +242,9 @@ _rw_wlock(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
+ curthread, rw->lock_object.lo_name, file, line));
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
@@ -260,6 +263,9 @@ _rw_try_wlock(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return (1);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
+ curthread, rw->lock_object.lo_name, file, line));
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
@@ -333,6 +339,9 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
+ curthread, rw->lock_object.lo_name, file, line));
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
KASSERT(rw_wowner(rw) != curthread,
@@ -521,6 +530,10 @@ _rw_try_rlock(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return (1);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
+ curthread, rw->lock_object.lo_name, file, line));
+
for (;;) {
x = rw->rw_lock;
KASSERT(rw->rw_lock != RW_DESTROYED,
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index bcd7acd..487a324 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -250,6 +250,9 @@ _sx_slock(struct sx *sx, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return (0);
MPASS(curthread != NULL);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("sx_slock() by idle thread %p on sx %s @ %s:%d",
+ curthread, sx->lock_object.lo_name, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_slock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
@@ -271,6 +274,10 @@ sx_try_slock_(struct sx *sx, const char *file, int line)
if (SCHEDULER_STOPPED())
return (1);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
+ curthread, sx->lock_object.lo_name, file, line));
+
for (;;) {
x = sx->sx_lock;
KASSERT(x != SX_LOCK_DESTROYED,
@@ -297,6 +304,9 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return (0);
MPASS(curthread != NULL);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
+ curthread, sx->lock_object.lo_name, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xlock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
@@ -321,6 +331,9 @@ sx_try_xlock_(struct sx *sx, const char *file, int line)
return (1);
MPASS(curthread != NULL);
+ KASSERT(!TD_IS_IDLETHREAD(curthread),
+ ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
+ curthread, sx->lock_object.lo_name, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 31d16fe..76fb964 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -684,7 +684,6 @@ turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
if (owner)
MPASS(owner->td_proc->p_magic == P_MAGIC);
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
- KASSERT(!TD_IS_IDLETHREAD(td), ("idle threads cannot block on locks"));
/*
* If the lock does not already have a turnstile, use this thread's
OpenPOWER on IntegriCloud