summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2005-09-15 19:05:37 +0000
committerjhb <jhb@FreeBSD.org>2005-09-15 19:05:37 +0000
commite535e11c9f1e82c6daa8b6af94249d2f8cc719c7 (patch)
tree1a97be74d31fd11ae0c4179f921efa5d4a11fa7a
parent4fbd998dffe50e8632d7713e024a655da034de2f (diff)
downloadFreeBSD-src-e535e11c9f1e82c6daa8b6af94249d2f8cc719c7.zip
FreeBSD-src-e535e11c9f1e82c6daa8b6af94249d2f8cc719c7.tar.gz
- Add a new simple facility for marking the current thread as being in a
state where sleeping on a sleep queue is not allowed. The facility doesn't support recursion but uses a simple private per-thread flag (TDP_NOSLEEPING). The sleepq_add() function will panic if the flag is set and INVARIANTS is enabled. - Use this new facility to replace the g_xup and g_xdown mutexes that were (ab)used to achieve similar behavior. - Disallow sleeping in interrupt threads when invoking interrupt handlers. MFC after: 1 week Reviewed by: phk
-rw-r--r--sys/geom/geom_io.c37
-rw-r--r--sys/kern/kern_intr.c2
-rw-r--r--sys/kern/subr_sleepqueue.c4
-rw-r--r--sys/sys/proc.h15
4 files changed, 27 insertions, 31 deletions
diff --git a/sys/geom/geom_io.c b/sys/geom/geom_io.c
index f50dad9..7ee1d25 100644
--- a/sys/geom/geom_io.c
+++ b/sys/geom/geom_io.c
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/bio.h>
#include <sys/ktr.h>
+#include <sys/proc.h>
#include <sys/stack.h>
#include <sys/errno.h>
@@ -400,12 +401,6 @@ g_io_schedule_down(struct thread *tp __unused)
struct bio *bp;
off_t excess;
int error;
-#ifdef WITNESS
- struct mtx mymutex;
-
- bzero(&mymutex, sizeof mymutex);
- mtx_init(&mymutex, "g_xdown", NULL, MTX_DEF);
-#endif
for(;;) {
g_bioq_lock(&g_bio_run_down);
@@ -461,16 +456,12 @@ g_io_schedule_down(struct thread *tp __unused)
default:
break;
}
-#ifdef WITNESS
- mtx_lock(&mymutex);
-#endif
+ THREAD_NO_SLEEPING();
CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
"len %ld", bp, bp->bio_to->name, bp->bio_offset,
bp->bio_length);
bp->bio_to->geom->start(bp);
-#ifdef WITNESS
- mtx_unlock(&mymutex);
-#endif
+ THREAD_SLEEPING_OK();
}
}
@@ -498,40 +489,26 @@ void
g_io_schedule_up(struct thread *tp __unused)
{
struct bio *bp;
-#ifdef WITNESS
- struct mtx mymutex;
-
- bzero(&mymutex, sizeof mymutex);
- mtx_init(&mymutex, "g_xup", NULL, MTX_DEF);
-#endif
for(;;) {
g_bioq_lock(&g_bio_run_up);
bp = g_bioq_first(&g_bio_run_task);
if (bp != NULL) {
g_bioq_unlock(&g_bio_run_up);
-#ifdef WITNESS
- mtx_lock(&mymutex);
-#endif
+ THREAD_NO_SLEEPING();
CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
bp->bio_task(bp->bio_task_arg);
-#ifdef WITNESS
- mtx_unlock(&mymutex);
-#endif
+ THREAD_SLEEPING_OK();
continue;
}
bp = g_bioq_first(&g_bio_run_up);
if (bp != NULL) {
g_bioq_unlock(&g_bio_run_up);
-#ifdef WITNESS
- mtx_lock(&mymutex);
-#endif
+ THREAD_NO_SLEEPING();
CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
"%ld len %ld", bp, bp->bio_to->name,
bp->bio_offset, bp->bio_length);
biodone(bp);
-#ifdef WITNESS
- mtx_unlock(&mymutex);
-#endif
+ THREAD_SLEEPING_OK();
continue;
}
CTR0(KTR_GEOM, "g_up going to sleep");
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 83ea2b1..d3c876d 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -521,6 +521,7 @@ ithread_loop(void *arg)
* another pass.
*/
atomic_store_rel_int(&ithd->it_need, 0);
+ THREAD_NO_SLEEPING();
restart:
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
if (ithd->it_flags & IT_SOFT && !ih->ih_need)
@@ -546,6 +547,7 @@ restart:
if ((ih->ih_flags & IH_MPSAFE) == 0)
mtx_unlock(&Giant);
}
+ THREAD_SLEEPING_OK();
/*
* Interrupt storm handling:
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index 44a2596..3def263 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -269,6 +269,10 @@ sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags)
MPASS(td->td_sleepqueue != NULL);
MPASS(wchan != NULL);
+ /* If this thread is not allowed to sleep, die a horrible death. */
+ KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
+ ("trying to sleep while sleeping is prohibited"));
+
/* Look up the sleep queue associated with the wait channel 'wchan'. */
sq = sleepq_lookup(wchan);
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 7f7de04..0bbc867 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -368,7 +368,7 @@ struct thread {
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
#define TDP_SA 0x00000080 /* A scheduler activation based thread. */
-#define TDP_UNUSED8 0x00000100 /* --available -- */
+#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_UNUSED10 0x00000400 /* --available -- */
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
@@ -793,6 +793,19 @@ MALLOC_DECLARE(M_ZOMBIE);
/* Check whether a thread is safe to be swapped out. */
#define thread_safetoswapout(td) (TD_IS_SLEEPING(td) || TD_IS_SUSPENDED(td))
+/* Control whether or not it is safe for curthread to sleep. */
+#define THREAD_NO_SLEEPING() do { \
+ KASSERT(!(curthread->td_pflags & TDP_NOSLEEPING), \
+ ("nested no sleeping")); \
+ curthread->td_pflags |= TDP_NOSLEEPING; \
+} while (0)
+
+#define THREAD_SLEEPING_OK() do { \
+ KASSERT((curthread->td_pflags & TDP_NOSLEEPING), \
+ ("nested sleeping ok")); \
+ curthread->td_pflags &= ~TDP_NOSLEEPING; \
+} while (0)
+
/* Lock and unlock process arguments. */
#define PARGS_LOCK(p) mtx_lock(&pargs_ref_lock)
#define PARGS_UNLOCK(p) mtx_unlock(&pargs_ref_lock)
OpenPOWER on IntegriCloud