summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2005-12-29 20:57:45 +0000
committerjhb <jhb@FreeBSD.org>2005-12-29 20:57:45 +0000
commitdc2b7b5f5dfe89873b4d3a409ccc2ebecd9526e9 (patch)
tree406a3f78937d90f222773af787bb938c8b72230d /sys
parentefb6208d84afd810181637220f225dbd024da565 (diff)
downloadFreeBSD-src-dc2b7b5f5dfe89873b4d3a409ccc2ebecd9526e9.zip
FreeBSD-src-dc2b7b5f5dfe89873b4d3a409ccc2ebecd9526e9.tar.gz
Add a new function msleep_spin() which is a slightly stripped down version
of msleep(). msleep_spin() doesn't support changing the priority of the thread while it is asleep nor does it support interruptible sleeps (PCATCH) or the PDROP flag. It does support timeouts however. It differs from msleep() in that the passed in mutex is a spin mutex. This means one can use msleep_spin() and wakeup() with a spin mutex similar to msleep() and wakeup() with a regular mutex. Note that the spin mutex in question needs to come before sched_lock and the sleepq locks in lock order.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_synch.c164
-rw-r--r--sys/sys/systm.h1
2 files changed, 165 insertions, 0 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index f4ff2b0..b49cb0a 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -241,6 +241,170 @@ msleep(ident, mtx, priority, wmesg, timo)
return (rval);
}
+int
+msleep_spin(ident, mtx, wmesg, timo)
+ void *ident;
+ struct mtx *mtx;
+ const char *wmesg;
+ int timo;
+{
+ struct thread *td;
+ struct proc *p;
+ int rval;
+ WITNESS_SAVE_DECL(mtx);
+
+ td = curthread;
+ p = td->td_proc;
+ KASSERT(mtx != NULL, ("sleeping without a mutex"));
+ KASSERT(p != NULL, ("msleep1"));
+ KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
+
+ if (cold) {
+ /*
+ * During autoconfiguration, just return;
+ * don't run any other threads or panic below,
+ * in case this is the idle thread and already asleep.
+ * XXX: this used to do "s = splhigh(); splx(safepri);
+ * splx(s);" to give interrupts a chance, but there is
+ * no way to give interrupts a chance now.
+ */
+ return (0);
+ }
+
+ sleepq_lock(ident);
+ CTR5(KTR_PROC, "msleep_spin: thread %p (pid %ld, %s) on %s (%p)",
+ (void *)td, (long)p->p_pid, p->p_comm, wmesg, ident);
+
+ DROP_GIANT();
+ mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
+ WITNESS_SAVE(&mtx->mtx_object, mtx);
+ mtx_unlock_spin(mtx);
+
+ /*
+ * We put ourselves on the sleep queue and start our timeout.
+ */
+ sleepq_add(ident, mtx, wmesg, SLEEPQ_MSLEEP);
+ if (timo)
+ sleepq_set_timeout(ident, timo);
+
+ /*
+ * Can't call ktrace with any spin locks held so it can lock the
+ * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
+ * any spin lock. Thus, we have to drop the sleepq spin lock while
+ * we handle those requests. This is safe since we have placed our
+ * thread on the sleep queue already.
+ */
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW)) {
+ sleepq_release(ident);
+ ktrcsw(1, 0);
+ sleepq_lock(ident);
+ }
+#endif
+#ifdef WITNESS
+ sleepq_release(ident);
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
+ wmesg);
+ sleepq_lock(ident);
+#endif
+ if (timo)
+ rval = sleepq_timedwait(ident);
+ else {
+ sleepq_wait(ident);
+ rval = 0;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW))
+ ktrcsw(0, 0);
+#endif
+ PICKUP_GIANT();
+ mtx_lock_spin(mtx);
+ WITNESS_RESTORE(&mtx->mtx_object, mtx);
+ return (rval);
+}
+
+int
+msleep_spin(ident, mtx, wmesg, timo)
+ void *ident;
+ struct mtx *mtx;
+ const char *wmesg;
+ int timo;
+{
+ struct thread *td;
+ struct proc *p;
+ int rval;
+ WITNESS_SAVE_DECL(mtx);
+
+ td = curthread;
+ p = td->td_proc;
+ KASSERT(mtx != NULL, ("sleeping without a mutex"));
+ KASSERT(p != NULL, ("msleep1"));
+ KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
+
+ if (cold) {
+ /*
+ * During autoconfiguration, just return;
+ * don't run any other threads or panic below,
+ * in case this is the idle thread and already asleep.
+ * XXX: this used to do "s = splhigh(); splx(safepri);
+ * splx(s);" to give interrupts a chance, but there is
+ * no way to give interrupts a chance now.
+ */
+ return (0);
+ }
+
+ sleepq_lock(ident);
+ CTR5(KTR_PROC, "msleep_spin: thread %p (pid %ld, %s) on %s (%p)",
+ (void *)td, (long)p->p_pid, p->p_comm, wmesg, ident);
+
+ DROP_GIANT();
+ mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
+ WITNESS_SAVE(&mtx->mtx_object, mtx);
+ mtx_unlock_spin(mtx);
+
+ /*
+ * We put ourselves on the sleep queue and start our timeout.
+ */
+ sleepq_add(ident, mtx, wmesg, SLEEPQ_MSLEEP);
+ if (timo)
+ sleepq_set_timeout(ident, timo);
+
+ /*
+ * Can't call ktrace with any spin locks held so it can lock the
+ * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
+ * any spin lock. Thus, we have to drop the sleepq spin lock while
+ * we handle those requests. This is safe since we have placed our
+ * thread on the sleep queue already.
+ */
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW)) {
+ sleepq_release(ident);
+ ktrcsw(1, 0);
+ sleepq_lock(ident);
+ }
+#endif
+#ifdef WITNESS
+ sleepq_release(ident);
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
+ wmesg);
+ sleepq_lock(ident);
+#endif
+ if (timo)
+ rval = sleepq_timedwait(ident);
+ else {
+ sleepq_wait(ident);
+ rval = 0;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW))
+ ktrcsw(0, 0);
+#endif
+ PICKUP_GIANT();
+ mtx_lock_spin(mtx);
+ WITNESS_RESTORE(&mtx->mtx_object, mtx);
+ return (rval);
+}
+
/*
* Make all threads sleeping on the specified identifier runnable.
*/
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index 400758c..80ec09d 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -294,6 +294,7 @@ static __inline void splx(intrmask_t ipl __unused) { return; }
*/
int msleep(void *chan, struct mtx *mtx, int pri, const char *wmesg,
int timo);
+int msleep_spin(void *chan, struct mtx *mtx, const char *wmesg, int timo);
#define tsleep(chan, pri, wmesg, timo) msleep(chan, NULL, pri, wmesg, timo)
void wakeup(void *chan) __nonnull(1);
void wakeup_one(void *chan) __nonnull(1);
OpenPOWER on IntegriCloud