summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2002-09-11 08:13:56 +0000
committerjulian <julian@FreeBSD.org>2002-09-11 08:13:56 +0000
commit5702a380a53c99a00275cb7e2836033a7497bef8 (patch)
tree10514ece7d621a24c034e4c778c793b9ea2d9675
parent58f594ebe3d58d1f0905309bd8494e4e9549d121 (diff)
downloadFreeBSD-src-5702a380a53c99a00275cb7e2836033a7497bef8.zip
FreeBSD-src-5702a380a53c99a00275cb7e2836033a7497bef8.tar.gz
Completely redo thread states.
Reviewed by: davidxu@freebsd.org
-rw-r--r--lib/libkvm/kvm_proc.c45
-rw-r--r--sys/alpha/linux/linux_machdep.c1
-rw-r--r--sys/compat/linprocfs/linprocfs.c7
-rw-r--r--sys/ddb/db_ps.c73
-rw-r--r--sys/fs/procfs/procfs_ctl.c2
-rw-r--r--sys/fs/procfs/procfs_ioctl.c3
-rw-r--r--sys/i386/linux/linux_machdep.c1
-rw-r--r--sys/kern/init_main.c1
-rw-r--r--sys/kern/kern_condvar.c102
-rw-r--r--sys/kern/kern_fork.c1
-rw-r--r--sys/kern/kern_idle.c4
-rw-r--r--sys/kern/kern_intr.c16
-rw-r--r--sys/kern/kern_kse.c95
-rw-r--r--sys/kern/kern_kthread.c7
-rw-r--r--sys/kern/kern_mutex.c13
-rw-r--r--sys/kern/kern_proc.c10
-rw-r--r--sys/kern/kern_sig.c24
-rw-r--r--sys/kern/kern_switch.c14
-rw-r--r--sys/kern/kern_synch.c188
-rw-r--r--sys/kern/kern_thread.c95
-rw-r--r--sys/kern/ksched.c8
-rw-r--r--sys/kern/subr_smp.c2
-rw-r--r--sys/kern/subr_turnstile.c13
-rw-r--r--sys/kern/sys_generic.c7
-rw-r--r--sys/kern/sys_process.c1
-rw-r--r--sys/kern/tty.c13
-rw-r--r--sys/kern/vfs_subr.c8
-rw-r--r--sys/posix4/ksched.c8
-rw-r--r--sys/sys/proc.h106
-rw-r--r--sys/vm/vm_glue.c25
-rw-r--r--sys/vm/vm_meter.c37
-rw-r--r--sys/vm/vm_pageout.c12
32 files changed, 443 insertions, 499 deletions
diff --git a/lib/libkvm/kvm_proc.c b/lib/libkvm/kvm_proc.c
index 5029373..19c28e0 100644
--- a/lib/libkvm/kvm_proc.c
+++ b/lib/libkvm/kvm_proc.c
@@ -119,7 +119,7 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
struct procsig procsig;
struct pstats pstats;
struct ucred ucred;
- struct thread mainthread;
+ struct thread mtd;
struct proc proc;
struct proc pproc;
struct timeval tv;
@@ -134,7 +134,7 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
}
if (proc.p_state != PRS_ZOMBIE) {
if (KREAD(kd, (u_long)TAILQ_FIRST(&proc.p_threads),
- &mainthread)) {
+ &mtd)) {
_kvm_err(kd, kd->program,
"can't read thread at %x",
TAILQ_FIRST(&proc.p_threads));
@@ -273,8 +273,8 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
nopgrp:
kp->ki_tdev = NODEV;
}
- if ((proc.p_state != PRS_ZOMBIE) && mainthread.td_wmesg)
- (void)kvm_read(kd, (u_long)mainthread.td_wmesg,
+ if ((proc.p_state != PRS_ZOMBIE) && mtd.td_wmesg)
+ (void)kvm_read(kd, (u_long)mtd.td_wmesg,
kp->ki_wmesg, WMESGLEN);
#ifdef sparc
@@ -313,11 +313,11 @@ nopgrp:
kp->ki_comm[MAXCOMLEN] = 0;
}
if ((proc.p_state != PRS_ZOMBIE) &&
- (mainthread.td_blocked != 0)) {
+ (mtd.td_blocked != 0)) {
kp->ki_kiflag |= KI_MTXBLOCK;
- if (mainthread.td_mtxname)
+ if (mtd.td_mtxname)
(void)kvm_read(kd,
- (u_long)mainthread.td_mtxname,
+ (u_long)mtd.td_mtxname,
kp->ki_mtxname, MTXNAMELEN);
kp->ki_mtxname[MTXNAMELEN] = 0;
}
@@ -335,33 +335,36 @@ nopgrp:
kp->ki_swtime = proc.p_swtime;
kp->ki_flag = proc.p_flag;
kp->ki_sflag = proc.p_sflag;
- kp->ki_wchan = mainthread.td_wchan;
+ kp->ki_wchan = mtd.td_wchan;
kp->ki_traceflag = proc.p_traceflag;
if (proc.p_state == PRS_NORMAL) {
- if ((mainthread.td_state == TDS_RUNQ) ||
- (mainthread.td_state == TDS_RUNNING)) {
+ if (TD_ON_RUNQ(&mtd) ||
+ TD_CAN_RUN(&mtd) ||
+ TD_IS_RUNNING(&mtd)) {
kp->ki_stat = SRUN;
- } else if (mainthread.td_state == TDS_SLP) {
- kp->ki_stat = SSLEEP;
- } else if (P_SHOULDSTOP(&proc)) {
- kp->ki_stat = SSTOP;
- } else if (mainthread.td_state == TDS_MTX) {
- kp->ki_stat = SMTX;
- } else {
- kp->ki_stat = SWAIT;
+ } else if (mtd.td_state == TDS_INHIBITED) {
+ if (P_SHOULDSTOP(&proc)) {
+ kp->ki_stat = SSTOP;
+ } else if (TD_IS_SLEEPING(&mtd)) {
+ kp->ki_stat = SSLEEP;
+ } else if (TD_ON_MUTEX(&mtd)) {
+ kp->ki_stat = SMTX;
+ } else {
+ kp->ki_stat = SWAIT;
+ }
}
} else {
kp->ki_stat = SIDL;
}
kp->ki_pri.pri_class = proc.p_ksegrp.kg_pri_class;
kp->ki_pri.pri_user = proc.p_ksegrp.kg_user_pri;
- kp->ki_pri.pri_level = mainthread.td_priority;
- kp->ki_pri.pri_native = mainthread.td_base_pri;
+ kp->ki_pri.pri_level = mtd.td_priority;
+ kp->ki_pri.pri_native = mtd.td_base_pri;
kp->ki_nice = proc.p_ksegrp.kg_nice;
kp->ki_lock = proc.p_lock;
kp->ki_rqindex = proc.p_kse.ke_rqindex;
kp->ki_oncpu = proc.p_kse.ke_oncpu;
- kp->ki_lastcpu = mainthread.td_lastcpu;
+ kp->ki_lastcpu = mtd.td_lastcpu;
} else {
kp->ki_stat = SZOMB;
}
diff --git a/sys/alpha/linux/linux_machdep.c b/sys/alpha/linux/linux_machdep.c
index 5f33c80..ecec7a1 100644
--- a/sys/alpha/linux/linux_machdep.c
+++ b/sys/alpha/linux/linux_machdep.c
@@ -180,6 +180,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
* Make this runnable after we are finished with it.
*/
mtx_lock_spin(&sched_lock);
+ TD_SET_CAN_RUN(td2);
setrunqueue(FIRST_THREAD_IN_PROC(p2));
mtx_unlock_spin(&sched_lock);
diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c
index 5129746..74280d6 100644
--- a/sys/compat/linprocfs/linprocfs.c
+++ b/sys/compat/linprocfs/linprocfs.c
@@ -566,18 +566,13 @@ linprocfs_doprocstatus(PFS_FILL_ARGS)
break;
}
switch(td2->td_state) {
- case TDS_SLP:
- case TDS_MTX:
+ case TDS_INHIBITED:
state = "S (sleeping)";
break;
case TDS_RUNQ:
case TDS_RUNNING:
state = "R (running)";
break;
- case TDS_NEW:
- case TDS_UNQUEUED:
- case TDS_IWAIT:
- case TDS_SURPLUS:
default:
state = "? (unknown)";
break;
diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c
index 7025ee3..cb9bfc9 100644
--- a/sys/ddb/db_ps.c
+++ b/sys/ddb/db_ps.c
@@ -112,7 +112,7 @@ db_ps(dummy1, dummy2, dummy3, dummy4)
state = "wait";
break;
case PRS_ZOMBIE:
- state = "zomp";
+ state = "zomb";
break;
default:
state = "Unkn";
@@ -123,33 +123,56 @@ db_ps(dummy1, dummy2, dummy3, dummy4)
p->p_ucred != NULL ? p->p_ucred->cr_ruid : 0, pp->p_pid,
p->p_pgrp != NULL ? p->p_pgrp->pg_id : 0, p->p_flag,
state);
- if (p->p_flag & P_KSES) {
+ if (p->p_flag & P_KSES)
db_printf("(threaded) %s\n", p->p_comm);
- FOREACH_THREAD_IN_PROC(p, td) {
- db_printf( ". . . . . . . "
- ". thread %p . . . ", td);
- if (td->td_wchan != NULL) {
- db_printf("SLP %6s %8p\n", td->td_wmesg,
- (void *)td->td_wchan);
- } else if (td->td_state == TDS_MTX) {
- db_printf("MTX %6s %8p\n", td->td_mtxname,
- (void *)td->td_blocked);
- } else {
- db_printf("--not blocked--\n");
- }
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if (p->p_flag & P_KSES)
+ db_printf( " thread %p ", td);
+ if (TD_ON_SLEEPQ(td)) {
+ if (td->td_flags & TDF_CVWAITQ)
+ db_printf("[CVQ ");
+ else
+ db_printf("[SLPQ ");
+ db_printf(" %6s %8p]", td->td_wmesg,
+ (void *)td->td_wchan);
}
- } else {
- td = FIRST_THREAD_IN_PROC(p);
- if (td != NULL && td->td_wchan != NULL) {
- db_printf(" %-6s %8p", td->td_wmesg,
- (void *)td->td_wchan);
- } else if (td != NULL && td->td_state == TDS_MTX) {
- db_printf(" %6s %8p", td->td_mtxname,
- (void *)td->td_blocked);
- } else {
- db_printf(" ");
+ switch (td->td_state) {
+ case TDS_INHIBITED:
+ if (TD_ON_MUTEX(td)) {
+ db_printf("[MTX %6s %8p]",
+ td->td_mtxname,
+ (void *)td->td_blocked);
+ }
+ if (TD_IS_SLEEPING(td)) {
+ db_printf("[SLP]");
+ }
+ if (TD_IS_SWAPPED(td)) {
+ db_printf("[SWAP]");
+ }
+ if (TD_IS_SUSPENDED(td)) {
+ db_printf("[SUSP]");
+ }
+ if (TD_AWAITING_INTR(td)) {
+ db_printf("[IWAIT]");
+ }
+ break;
+ case TDS_CAN_RUN:
+ db_printf("[Can run]");
+ break;
+ case TDS_RUNQ:
+ db_printf("[RUNQ]");
+ break;
+ case TDS_RUNNING:
+ db_printf("[CPU %d]", td->td_kse->ke_oncpu);
+ break;
+ default:
+ panic("unknown thread state");
}
- db_printf(" %s\n", p->p_comm);
+ if (p->p_flag & P_KSES)
+ db_printf("\n");
+ else
+ db_printf(" %s\n", p->p_comm);
+
}
/* PROC_UNLOCK(p); */
diff --git a/sys/fs/procfs/procfs_ctl.c b/sys/fs/procfs/procfs_ctl.c
index 19758d8..c9572b6 100644
--- a/sys/fs/procfs/procfs_ctl.c
+++ b/sys/fs/procfs/procfs_ctl.c
@@ -352,7 +352,7 @@ procfs_doprocctl(PFS_FILL_ARGS)
mtx_lock_spin(&sched_lock);
/* XXXKSE: */
p->p_flag &= ~P_STOPPED_SIG;
- setrunnable(FIRST_THREAD_IN_PROC(p));
+ thread_unsuspend(p);
mtx_unlock_spin(&sched_lock);
} else
psignal(p, nm->nm_val);
diff --git a/sys/fs/procfs/procfs_ioctl.c b/sys/fs/procfs/procfs_ioctl.c
index 1dd0b6d..170a810 100644
--- a/sys/fs/procfs/procfs_ioctl.c
+++ b/sys/fs/procfs/procfs_ioctl.c
@@ -97,8 +97,7 @@ procfs_ioctl(PFS_IOCTL_ARGS)
if (P_SHOULDSTOP(p)) {
p->p_xstat = sig;
p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
- FOREACH_THREAD_IN_PROC(p, td)
- setrunnable(td); /* XXX Totally bogus */
+ thread_unsuspend(p);
mtx_unlock_spin(&sched_lock);
} else {
mtx_unlock_spin(&sched_lock);
diff --git a/sys/i386/linux/linux_machdep.c b/sys/i386/linux/linux_machdep.c
index d435768..56952cb 100644
--- a/sys/i386/linux/linux_machdep.c
+++ b/sys/i386/linux/linux_machdep.c
@@ -362,6 +362,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
* Make this runnable after we are finished with it.
*/
mtx_lock_spin(&sched_lock);
+ TD_SET_CAN_RUN(FIRST_THREAD_IN_PROC(p2));
setrunqueue(FIRST_THREAD_IN_PROC(p2));
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p2);
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 8c4e8de..70dfa24 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -716,6 +716,7 @@ kick_init(const void *udata __unused)
td = FIRST_THREAD_IN_PROC(initproc);
mtx_lock_spin(&sched_lock);
+ TD_SET_CAN_RUN(td);
setrunqueue(td); /* XXXKSE */
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 5d73049..26f5376 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -48,7 +48,7 @@
*/
#define CV_ASSERT(cvp, mp, td) do { \
KASSERT((td) != NULL, ("%s: curthread NULL", __func__)); \
- KASSERT((td)->td_state == TDS_RUNNING, ("%s: not TDS_RUNNING", __func__)); \
+ KASSERT(TD_IS_RUNNING(td), ("%s: not TDS_RUNNING", __func__)); \
KASSERT((cvp) != NULL, ("%s: cvp NULL", __func__)); \
KASSERT((mp) != NULL, ("%s: mp NULL", __func__)); \
mtx_assert((mp), MA_OWNED | MA_NOTRECURSED); \
@@ -68,12 +68,14 @@
("%s: Multiple mutexes", __func__)); \
} \
} while (0)
+
#define CV_SIGNAL_VALIDATE(cvp) do { \
if (!TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
KASSERT(mtx_owned((cvp)->cv_mtx), \
- ("%s: Mutex not owned", __func__)); \
+ ("%s: Mutex not owned", __func__)); \
} \
} while (0)
+
#else
#define CV_WAIT_VALIDATE(cvp, mp)
#define CV_SIGNAL_VALIDATE(cvp)
@@ -148,9 +150,9 @@ static __inline void
cv_switch(struct thread *td)
{
- td->td_state = TDS_SLP;
- td->td_proc->p_stats->p_ru.ru_nvcsw++;
cv_check_upcall(td);
+ TD_SET_SLEEPING(td);
+ td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)", td,
td->td_proc->p_pid, td->td_proc->p_comm);
@@ -171,22 +173,23 @@ cv_switch_catch(struct thread *td)
* both) could occur while we were stopped. A SIGCONT would cause us to
* be marked as TDS_SLP without resuming us, thus we must be ready for
* sleep when cursig is called. If the wakeup happens while we're
- * stopped, td->td_wchan will be 0 upon return from cursig.
+ * stopped, td->td_wchan will be 0 upon return from cursig,
+ * and TD_ON_SLEEPQ() will return false.
*/
td->td_flags |= TDF_SINTR;
mtx_unlock_spin(&sched_lock);
p = td->td_proc;
PROC_LOCK(p);
- sig = cursig(td); /* XXXKSE */
+ sig = cursig(td);
if (thread_suspend_check(1))
sig = SIGSTOP;
mtx_lock_spin(&sched_lock);
PROC_UNLOCK(p);
if (sig != 0) {
- if (td->td_wchan != NULL)
+ if (TD_ON_SLEEPQ(td))
cv_waitq_remove(td);
- td->td_state = TDS_RUNNING; /* XXXKSE */
- } else if (td->td_wchan != NULL) {
+ TD_SET_RUNNING(td);
+ } else if (TD_ON_SLEEPQ(td)) {
cv_switch(td);
}
td->td_flags &= ~TDF_SINTR;
@@ -202,6 +205,7 @@ cv_waitq_add(struct cv *cvp, struct thread *td)
{
td->td_flags |= TDF_CVWAITQ;
+ TD_SET_ON_SLEEPQ(td);
td->td_wchan = cvp;
td->td_wmesg = cvp->cv_description;
td->td_ksegrp->kg_slptime = 0; /* XXXKSE */
@@ -389,10 +393,10 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
* between msleep and endtsleep.
* Go back to sleep.
*/
- td->td_flags |= TDF_TIMEOUT;
- td->td_state = TDS_SLP;
+ TD_SET_SLEEPING(td);
td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
+ td->td_flags &= ~TDF_TIMOFAIL;
}
if (td->td_proc->p_flag & P_WEXIT)
@@ -467,10 +471,10 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
* between msleep and endtsleep.
* Go back to sleep.
*/
- td->td_flags |= TDF_TIMEOUT;
- td->td_state = TDS_SLP;
+ TD_SET_SLEEPING(td);
td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
+ td->td_flags &= ~TDF_TIMOFAIL;
}
mtx_unlock_spin(&sched_lock);
@@ -507,35 +511,14 @@ static __inline void
cv_wakeup(struct cv *cvp)
{
struct thread *td;
- struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED);
td = TAILQ_FIRST(&cvp->cv_waitq);
KASSERT(td->td_wchan == cvp, ("%s: bogus wchan", __func__));
KASSERT(td->td_flags & TDF_CVWAITQ, ("%s: not on waitq", __func__));
- TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
- td->td_flags &= ~TDF_CVWAITQ;
- td->td_wchan = 0;
- if (td->td_state == TDS_SLP) {
- /* OPTIMIZED EXPANSION OF setrunnable(td); */
- CTR3(KTR_PROC, "cv_wakeup: thread %p (pid %d, %s)",
- td, td->td_proc->p_pid, td->td_proc->p_comm);
- kg = td->td_ksegrp;
- if (kg->kg_slptime > 1) /* XXXKSE */
- updatepri(kg);
- kg->kg_slptime = 0;
- if (td->td_proc->p_sflag & PS_INMEM) {
- setrunqueue(td);
- maybe_resched(td);
- } else {
- td->td_state = TDS_SWAPPED;
- if ((td->td_proc->p_sflag & PS_SWAPPINGIN) == 0) {
- td->td_proc->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
- }
- }
- /* END INLINE EXPANSION */
- }
+ cv_waitq_remove(td);
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
}
/*
@@ -583,13 +566,12 @@ cv_waitq_remove(struct thread *td)
{
struct cv *cvp;
- mtx_lock_spin(&sched_lock);
+ mtx_assert(&sched_lock, MA_OWNED);
if ((cvp = td->td_wchan) != NULL && td->td_flags & TDF_CVWAITQ) {
TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
td->td_flags &= ~TDF_CVWAITQ;
- td->td_wchan = NULL;
+ TD_CLR_ON_SLEEPQ(td);
}
- mtx_unlock_spin(&sched_lock);
}
/*
@@ -602,29 +584,17 @@ cv_timedwait_end(void *arg)
struct thread *td;
td = arg;
- CTR3(KTR_PROC, "cv_timedwait_end: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
- td->td_proc->p_comm);
+ CTR3(KTR_PROC, "cv_timedwait_end: thread %p (pid %d, %s)",
+ td, td->td_proc->p_pid, td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
- if (td->td_flags & TDF_TIMEOUT) {
- td->td_flags &= ~TDF_TIMEOUT;
- if (td->td_proc->p_sflag & PS_INMEM) {
- setrunqueue(td);
- maybe_resched(td);
- } else {
- td->td_state = TDS_SWAPPED;
- if ((td->td_proc->p_sflag & PS_SWAPPINGIN) == 0) {
- td->td_proc->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
- }
- }
- } else if (td->td_wchan != NULL) {
- if (td->td_state == TDS_SLP) /* XXXKSE */
- setrunnable(td);
- else
- cv_waitq_remove(td);
+ if (TD_ON_SLEEPQ(td)) {
+ cv_waitq_remove(td);
td->td_flags |= TDF_TIMEOUT;
- } else
+ } else {
td->td_flags |= TDF_TIMOFAIL;
+ }
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
mtx_unlock_spin(&sched_lock);
}
@@ -637,16 +607,14 @@ cv_abort(struct thread *td)
{
CTR3(KTR_PROC, "cv_abort: thread %p (pid %d, %s)", td,
- td->td_proc->p_pid,
- td->td_proc->p_comm);
+ td->td_proc->p_pid, td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
if ((td->td_flags & (TDF_SINTR|TDF_TIMEOUT)) == TDF_SINTR) {
- if (td->td_wchan != NULL) {
- if (td->td_state == TDS_SLP)
- setrunnable(td);
- else
- cv_waitq_remove(td);
+ if (TD_ON_SLEEPQ(td)) {
+ cv_waitq_remove(td);
}
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
}
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index cb991d9..d23f47e 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -711,6 +711,7 @@ again:
if ((flags & RFSTOPPED) == 0) {
mtx_lock_spin(&sched_lock);
p2->p_state = PRS_NORMAL;
+ TD_SET_CAN_RUN(td2);
setrunqueue(td2);
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index 49aacf8..b0f4fda 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -63,7 +63,7 @@ idle_setup(void *dummy)
p->p_flag |= P_NOLOAD;
p->p_state = PRS_NORMAL;
td = FIRST_THREAD_IN_PROC(p);
- td->td_state = TDS_UNQUEUED;
+ td->td_state = TDS_CAN_RUN;
td->td_kse->ke_flags |= KEF_IDLEKSE;
#ifdef SMP
}
@@ -112,7 +112,7 @@ idle_proc(void *dummy)
mtx_lock_spin(&sched_lock);
p->p_stats->p_ru.ru_nvcsw++;
- td->td_state = TDS_UNQUEUED;
+ td->td_state = TDS_CAN_RUN;
mi_switch();
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 796c34b..4eb5c1f 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -201,7 +201,7 @@ ithread_create(struct ithd **ithread, int vector, int flags,
td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
td->td_ksegrp->kg_pri_class = PRI_ITHD;
td->td_priority = PRI_MAX_ITHD;
- td->td_state = TDS_IWAIT;
+ TD_SET_IWAIT(td);
ithd->it_td = td;
td->td_ithd = ithd;
if (ithread != NULL)
@@ -229,7 +229,8 @@ ithread_destroy(struct ithd *ithread)
}
ithread->it_flags |= IT_DEAD;
mtx_lock_spin(&sched_lock);
- if (td->td_state == TDS_IWAIT) {
+ if (TD_AWAITING_INTR(td)) {
+ TD_CLR_IWAIT(td);
setrunqueue(td);
}
mtx_unlock_spin(&sched_lock);
@@ -326,7 +327,7 @@ ok:
* handler as being dead and let the ithread do the actual removal.
*/
mtx_lock_spin(&sched_lock);
- if (ithread->it_td->td_state != TDS_IWAIT) {
+ if (!TD_AWAITING_INTR(ithread->it_td)) {
handler->ih_flags |= IH_DEAD;
/*
@@ -388,16 +389,17 @@ ithread_schedule(struct ithd *ithread, int do_switch)
*/
ithread->it_need = 1;
mtx_lock_spin(&sched_lock);
- if (td->td_state == TDS_IWAIT) {
+ if (TD_AWAITING_INTR(td)) {
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
+ TD_CLR_IWAIT(td);
setrunqueue(td);
if (do_switch &&
(ctd->td_critnest == 1) ) {
- KASSERT((ctd->td_state == TDS_RUNNING),
+ KASSERT((TD_IS_RUNNING(ctd)),
("ithread_schedule: Bad state for curthread."));
ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
if (ctd->td_kse->ke_flags & KEF_IDLEKSE)
- ctd->td_state = TDS_UNQUEUED;
+ ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
mi_switch();
} else {
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
@@ -552,7 +554,7 @@ restart:
*/
if (ithd->it_enable != NULL)
ithd->it_enable(ithd->it_vector);
- td->td_state = TDS_IWAIT; /* we're idle */
+ TD_SET_IWAIT(td); /* we're idle */
p->p_stats->p_ru.ru_nvcsw++;
CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
mi_switch();
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 0f9c7dd..0716d9b 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -95,7 +95,7 @@ thread_ctor(void *mem, int size, void *arg)
("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
td = (struct thread *)mem;
- td->td_state = TDS_NEW;
+ td->td_state = TDS_INACTIVE;
td->td_flags |= TDF_UNBOUND;
cached_threads--; /* XXXSMP */
active_threads++; /* XXXSMP */
@@ -117,8 +117,9 @@ thread_dtor(void *mem, int size, void *arg)
#ifdef INVARIANTS
/* Verify that this thread is in a safe state to free. */
switch (td->td_state) {
- case TDS_SLP:
- case TDS_MTX:
+ case TDS_INHIBITED:
+ case TDS_RUNNING:
+ case TDS_CAN_RUN:
case TDS_RUNQ:
/*
* We must never unlink a thread that is in one of
@@ -126,10 +127,7 @@ thread_dtor(void *mem, int size, void *arg)
*/
panic("bad state for thread unlinking");
/* NOTREACHED */
- case TDS_UNQUEUED:
- case TDS_NEW:
- case TDS_RUNNING:
- case TDS_SURPLUS:
+ case TDS_INACTIVE:
break;
default:
panic("bad thread state");
@@ -316,7 +314,7 @@ thread_exit(void)
KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
if (ke->ke_tdspare != NULL) {
- thread_free(ke->ke_tdspare);
+ thread_stash(ke->ke_tdspare);
ke->ke_tdspare = NULL;
}
cpu_thread_exit(td); /* XXXSMP */
@@ -345,14 +343,11 @@ thread_exit(void)
*/
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount) {
- TAILQ_REMOVE(&p->p_suspended,
- p->p_singlethread, td_runq);
- setrunqueue(p->p_singlethread);
- p->p_suspcount--;
+ thread_unsuspend_one(p->p_singlethread);
}
}
PROC_UNLOCK(p);
- td->td_state = TDS_SURPLUS;
+ td->td_state = TDS_INACTIVE;
td->td_proc = NULL;
td->td_ksegrp = NULL;
td->td_last_kse = NULL;
@@ -379,7 +374,7 @@ thread_link(struct thread *td, struct ksegrp *kg)
struct proc *p;
p = kg->kg_proc;
- td->td_state = TDS_NEW;
+ td->td_state = TDS_INACTIVE;
td->td_proc = p;
td->td_ksegrp = kg;
td->td_last_kse = NULL;
@@ -427,6 +422,7 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
cpu_set_upcall(td2, ke->ke_pcb);
td2->td_ucred = crhold(td->td_ucred);
td2->td_flags = TDF_UNBOUND|TDF_UPCALLING;
+ TD_SET_CAN_RUN(td2);
setrunqueue(td2);
return (td2);
}
@@ -607,38 +603,32 @@ thread_single(int force_exit)
p->p_flag |= P_STOPPED_SINGLE;
p->p_singlethread = td;
while ((p->p_numthreads - p->p_suspcount) != 1) {
+ mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td2) {
if (td2 == td)
continue;
- switch(td2->td_state) {
- case TDS_SUSPENDED:
- if (force_exit == SINGLE_EXIT) {
- mtx_lock_spin(&sched_lock);
- TAILQ_REMOVE(&p->p_suspended,
- td, td_runq);
- p->p_suspcount--;
- setrunqueue(td); /* Should suicide. */
- mtx_unlock_spin(&sched_lock);
+ if (TD_IS_INHIBITED(td2)) {
+ if (TD_IS_SUSPENDED(td2)) {
+ if (force_exit == SINGLE_EXIT) {
+ thread_unsuspend_one(td2);
+ }
}
- case TDS_SLP:
- if (td2->td_flags & TDF_CVWAITQ)
- cv_abort(td2);
- else
- abortsleep(td2);
- break;
- /* case TDS RUNNABLE: XXXKSE maybe raise priority? */
- default: /* needed to avoid an error */
- break;
+ if ( TD_IS_SLEEPING(td2)) {
+ if (td2->td_flags & TDF_CVWAITQ)
+ cv_waitq_remove(td2);
+ else
+ unsleep(td2);
+ break;
+ }
+ if (TD_CAN_RUN(td2))
+ setrunqueue(td2);
}
}
/*
* Wake us up when everyone else has suspended.
* In the mean time we suspend as well.
*/
- mtx_lock_spin(&sched_lock);
- TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
- td->td_state = TDS_SUSPENDED;
- p->p_suspcount++;
+ thread_suspend_one(td);
mtx_unlock(&Giant);
PROC_UNLOCK(p);
mi_switch();
@@ -745,16 +735,11 @@ thread_suspend_check(int return_instead)
mtx_lock_spin(&sched_lock);
}
mtx_assert(&Giant, MA_NOTOWNED);
- p->p_suspcount++;
- td->td_state = TDS_SUSPENDED;
- TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
+ thread_suspend_one(td);
PROC_UNLOCK(p);
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount) {
- TAILQ_REMOVE(&p->p_suspended,
- p->p_singlethread, td_runq);
- p->p_suspcount--;
- setrunqueue(p->p_singlethread);
+ thread_unsuspend_one(p->p_singlethread);
}
}
p->p_stats->p_ru.ru_nivcsw++;
@@ -772,8 +757,15 @@ thread_suspend_one(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
p->p_suspcount++;
- td->td_state = TDS_SUSPENDED;
+ TD_SET_SUSPENDED(td);
TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
+ /*
+ * Hack: If we are suspending but are on the sleep queue
+ * then we are in msleep or the cv equivalent. We
+ * want to look like we have two Inhibitors.
+ */
+ if (TD_ON_SLEEPQ(td))
+ TD_SET_SLEEPING(td);
}
void
@@ -783,16 +775,9 @@ thread_unsuspend_one(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
TAILQ_REMOVE(&p->p_suspended, td, td_runq);
+ TD_CLR_SUSPENDED(td);
p->p_suspcount--;
- if (td->td_wchan != NULL) {
- td->td_state = TDS_SLP;
- } else {
- if (td->td_ksegrp->kg_slptime > 1) {
- updatepri(td->td_ksegrp);
- td->td_ksegrp->kg_slptime = 0;
- }
- setrunqueue(td);
- }
+ setrunnable(td);
}
/*
@@ -840,9 +825,7 @@ thread_single_end(void)
if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
mtx_lock_spin(&sched_lock);
while (( td = TAILQ_FIRST(&p->p_suspended))) {
- TAILQ_REMOVE(&p->p_suspended, td, td_runq);
- p->p_suspcount--;
- setrunqueue(td);
+ thread_unsuspend_one(td);
}
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c
index e8e2fea..08ef71f 100644
--- a/sys/kern/kern_kthread.c
+++ b/sys/kern/kern_kthread.c
@@ -76,6 +76,7 @@ kthread_create(void (*func)(void *), void *arg,
{
int error;
va_list ap;
+ struct thread *td;
struct proc *p2;
if (!proc0.p_stats /* || proc0.p_stats->p_start.tv_sec == 0 */)
@@ -103,13 +104,15 @@ kthread_create(void (*func)(void *), void *arg,
va_end(ap);
/* call the processes' main()... */
- cpu_set_fork_handler(FIRST_THREAD_IN_PROC(p2), func, arg);
+ td = FIRST_THREAD_IN_PROC(p2);
+ cpu_set_fork_handler(td, func, arg);
+ TD_SET_CAN_RUN(td);
/* Delay putting it on the run queue until now. */
mtx_lock_spin(&sched_lock);
p2->p_sflag |= PS_INMEM;
if (!(flags & RFSTOPPED)) {
- setrunqueue(FIRST_THREAD_IN_PROC(p2));
+ setrunqueue(td);
}
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 00a789c..34cc6d5 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -119,11 +119,9 @@ propagate_priority(struct thread *td)
return;
}
- KASSERT(td->td_state != TDS_SURPLUS, ("Mutex owner SURPLUS"));
MPASS(td->td_proc != NULL);
MPASS(td->td_proc->p_magic == P_MAGIC);
- KASSERT(td->td_state != TDS_SLP,
- ("sleeping thread owns a mutex"));
+ KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex"));
if (td->td_priority <= pri) /* lower is higher priority */
return;
@@ -131,7 +129,7 @@ propagate_priority(struct thread *td)
/*
* If lock holder is actually running, just bump priority.
*/
- if (td->td_state == TDS_RUNNING) {
+ if (TD_IS_RUNNING(td)) {
td->td_priority = pri;
return;
}
@@ -150,7 +148,7 @@ propagate_priority(struct thread *td)
* but try anyhow.
* We should have a special call to do this more efficiently.
*/
- if (td->td_state == TDS_RUNQ) {
+ if (TD_ON_RUNQ(td)) {
MPASS(td->td_blocked == NULL);
remrunqueue(td);
td->td_priority = pri;
@@ -165,7 +163,7 @@ propagate_priority(struct thread *td)
/*
* If we aren't blocked on a mutex, we should be.
*/
- KASSERT(td->td_state == TDS_MTX, (
+ KASSERT(TD_ON_MUTEX(td), (
"process %d(%s):%d holds %s but isn't blocked on a mutex\n",
td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
m->mtx_object.lo_name));
@@ -619,7 +617,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
*/
td->td_blocked = m;
td->td_mtxname = m->mtx_object.lo_name;
- td->td_state = TDS_MTX;
+ TD_SET_MUTEX(td);
propagate_priority(td);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
@@ -763,6 +761,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
m, td1);
td1->td_blocked = NULL;
+ TD_CLR_MUTEX(td1);
setrunqueue(td1);
if (td->td_critnest == 1 && td1->td_priority < pri) {
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 61be8de..2ee4f51 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -854,7 +854,7 @@ fill_kinfo_proc(p, kp)
strncpy(kp->ki_wmesg, td->td_wmesg,
sizeof(kp->ki_wmesg) - 1);
}
- if (td->td_state == TDS_MTX) {
+ if (TD_ON_MUTEX(td)) {
kp->ki_kiflag |= KI_MTXBLOCK;
strncpy(kp->ki_mtxname, td->td_mtxname,
sizeof(kp->ki_mtxname) - 1);
@@ -862,14 +862,14 @@ fill_kinfo_proc(p, kp)
}
if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
- if ((td->td_state == TDS_RUNQ) ||
- (td->td_state == TDS_RUNNING)) {
+ if ((TD_ON_RUNQ(td)) ||
+ (TD_IS_RUNNING(td))) {
kp->ki_stat = SRUN;
- } else if (td->td_state == TDS_SLP) {
+ } else if (TD_IS_SLEEPING(td)) {
kp->ki_stat = SSLEEP;
} else if (P_SHOULDSTOP(p)) {
kp->ki_stat = SSTOP;
- } else if (td->td_state == TDS_MTX) {
+ } else if (TD_ON_MUTEX(td)) {
kp->ki_stat = SMTX;
} else {
kp->ki_stat = SWAIT;
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 5dbb757..4f9f516 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1410,12 +1410,11 @@ psignal(p, sig)
*/
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td) {
- if (td->td_wchan && (td->td_flags & TDF_SINTR)) {
+ if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
if (td->td_flags & TDF_CVWAITQ)
- cv_waitq_remove(td);
+ cv_abort(td);
else
- unsleep(td);
- setrunnable(td);
+ abortsleep(td);
}
}
mtx_unlock_spin(&sched_lock);
@@ -1447,7 +1446,7 @@ psignal(p, sig)
goto out;
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td) {
- if (td->td_state == TDS_SLP &&
+ if (TD_IS_SLEEPING(td) &&
(td->td_flags & TDF_SINTR))
thread_suspend_one(td);
}
@@ -1522,7 +1521,7 @@ tdsignal(struct thread *td, int sig, sig_t action)
if (action == SIG_HOLD) {
return;
}
- if (td->td_state == TDS_SLP) {
+ if (TD_IS_SLEEPING(td)) {
/*
* If thread is sleeping uninterruptibly
* we can't interrupt the sleep... the signal will
@@ -1558,7 +1557,10 @@ tdsignal(struct thread *td, int sig, sig_t action)
td->td_priority = PUSER;
}
}
- setrunnable(td);
+ if (td->td_flags & TDF_CVWAITQ)
+ cv_abort(td);
+ else
+ abortsleep(td);
}
#ifdef SMP
else {
@@ -1567,7 +1569,7 @@ tdsignal(struct thread *td, int sig, sig_t action)
* other than kicking ourselves if we are running.
* It will either never be noticed, or noticed very soon.
*/
- if (td->td_state == TDS_RUNNING && td != curthread) {
+ if (TD_IS_RUNNING(td) && td != curthread) {
forward_signal(td);
}
}
@@ -1629,7 +1631,7 @@ issignal(td)
PROC_UNLOCK(p->p_pptr);
mtx_lock_spin(&sched_lock);
stop(p); /* uses schedlock too eventually */
- td->td_state = TDS_UNQUEUED;
+ thread_suspend_one(td);
PROC_UNLOCK(p);
DROP_GIANT();
p->p_stats->p_ru.ru_nivcsw++;
@@ -1713,9 +1715,7 @@ issignal(td)
mtx_lock_spin(&sched_lock);
}
stop(p);
- p->p_suspcount++;
- td->td_state = TDS_SUSPENDED;
- TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
+ thread_suspend_one(td);
PROC_UNLOCK(p);
DROP_GIANT();
p->p_stats->p_ru.ru_nivcsw++;
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index f704121..6ccc916 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -163,7 +163,7 @@ retry:
if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
(td->td_flags & TDF_INPANIC) == 0))
goto retry;
- td->td_state = TDS_RUNNING;
+ TD_SET_RUNNING(td);
return (td);
}
@@ -229,8 +229,7 @@ remrunqueue(struct thread *td)
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT ((td->td_state == TDS_RUNQ),
- ("remrunqueue: Bad state on run queue"));
+ KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
kg = td->td_ksegrp;
ke = td->td_kse;
/*
@@ -238,8 +237,8 @@ remrunqueue(struct thread *td)
* threads are BOUND.
*/
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
- td->td_state = TDS_UNQUEUED;
kg->kg_runnable--;
+ TD_SET_CAN_RUN(td);
if ((td->td_flags & TDF_UNBOUND) == 0) {
/* Bring its kse with it, leave the thread attached */
runq_remove(&runq, ke);
@@ -300,8 +299,9 @@ setrunqueue(struct thread *td)
CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT((td->td_state != TDS_RUNQ), ("setrunqueue: bad thread state"));
- td->td_state = TDS_RUNQ;
+ KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
+ ("setrunqueue: bad thread state"));
+ TD_SET_RUNQ(td);
kg = td->td_ksegrp;
kg->kg_runnable++;
if ((td->td_flags & TDF_UNBOUND) == 0) {
@@ -715,7 +715,7 @@ thread_sanity_check(struct thread *td)
}
FOREACH_THREAD_IN_GROUP(kg, td2) {
if (((td2->td_flags & TDF_UNBOUND) == 0) &&
- (td2->td_state == TDS_RUNQ)) {
+ (TD_ON_RUNQ(td2))) {
assigned++;
if (td2->td_kse == NULL) {
panic ("BOUND thread with no KSE");
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index fb33e51..74ef9ed 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -285,7 +285,7 @@ schedcpu(arg)
awake = 1;
ke->ke_flags &= ~KEF_DIDRUN;
} else if ((ke->ke_state == KES_THREAD) &&
- (ke->ke_thread->td_state == TDS_RUNNING)) {
+ (TD_IS_RUNNING(ke->ke_thread))) {
awake = 1;
/* Do not clear KEF_DIDRUN */
} else if (ke->ke_flags & KEF_DIDRUN) {
@@ -361,8 +361,7 @@ schedcpu(arg)
(kg->kg_user_pri / RQ_PPQ));
td->td_priority = kg->kg_user_pri;
- if (changedqueue &&
- td->td_state == TDS_RUNQ) {
+ if (changedqueue && TD_ON_RUNQ(td)) {
/* this could be optimised */
remrunqueue(td);
td->td_priority =
@@ -515,15 +514,17 @@ msleep(ident, mtx, priority, wmesg, timo)
}
KASSERT(p != NULL, ("msleep1"));
- KASSERT(ident != NULL && td->td_state == TDS_RUNNING, ("msleep"));
+ KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
+
+ CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
+ td, p->p_pid, p->p_comm, wmesg, ident);
td->td_wchan = ident;
td->td_wmesg = wmesg;
td->td_ksegrp->kg_slptime = 0;
td->td_priority = priority & PRIMASK;
- CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
- td, p->p_pid, p->p_comm, wmesg, ident);
TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
+ TD_SET_ON_SLEEPQ(td);
if (timo)
callout_reset(&td->td_slpcallout, timo, endtsleep, td);
/*
@@ -546,20 +547,20 @@ msleep(ident, mtx, priority, wmesg, timo)
mtx_lock_spin(&sched_lock);
PROC_UNLOCK(p);
if (sig != 0) {
- if (td->td_wchan != NULL)
+ if (TD_ON_SLEEPQ(td))
unsleep(td);
- } else if (td->td_wchan == NULL)
+ } else if (!TD_ON_SLEEPQ(td))
catch = 0;
} else
sig = 0;
- if (td->td_wchan != NULL) {
+ if (TD_ON_SLEEPQ(td)) {
p->p_stats->p_ru.ru_nvcsw++;
- td->td_state = TDS_SLP;
+ TD_SET_SLEEPING(td);
mi_switch();
}
CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
- KASSERT(td->td_state == TDS_RUNNING, ("running but not TDS_RUNNING"));
+ KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
td->td_flags &= ~TDF_SINTR;
if (td->td_flags & TDF_TIMEOUT) {
td->td_flags &= ~TDF_TIMEOUT;
@@ -577,10 +578,10 @@ msleep(ident, mtx, priority, wmesg, timo)
* has a chance to run and the callout may end up waking up
* the wrong msleep(). Yuck.
*/
- td->td_flags |= TDF_TIMEOUT;
- td->td_state = TDS_SLP;
+ TD_SET_SLEEPING(td);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
+ td->td_flags &= ~TDF_TIMOFAIL;
}
mtx_unlock_spin(&sched_lock);
@@ -621,35 +622,23 @@ endtsleep(arg)
{
register struct thread *td = arg;
- CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
- td->td_proc->p_comm);
+ CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
+ td, td->td_proc->p_pid, td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
/*
* This is the other half of the synchronization with msleep()
* described above. If the TDS_TIMEOUT flag is set, we lost the
* race and just need to put the process back on the runqueue.
*/
- if ((td->td_flags & TDF_TIMEOUT) != 0) {
- td->td_flags &= ~TDF_TIMEOUT;
- if (td->td_proc->p_sflag & PS_INMEM) {
- setrunqueue(td);
- maybe_resched(td);
- } else {
- td->td_state = TDS_SWAPPED;
- if ((td->td_proc->p_sflag & PS_SWAPPINGIN) == 0) {
- td->td_proc->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
- }
- }
- } else if (td->td_wchan != NULL) {
- if (td->td_state == TDS_SLP) /* XXXKSE */
- setrunnable(td);
- else
- unsleep(td);
+ if (TD_ON_SLEEPQ(td)) {
+ TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
+ TD_CLR_ON_SLEEPQ(td);
td->td_flags |= TDF_TIMEOUT;
} else {
td->td_flags |= TDF_TIMOFAIL;
}
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
mtx_unlock_spin(&sched_lock);
}
@@ -664,25 +653,18 @@ void
abortsleep(struct thread *td)
{
- mtx_lock_spin(&sched_lock);
+ mtx_assert(&sched_lock, MA_OWNED);
/*
* If the TDF_TIMEOUT flag is set, just leave. A
* timeout is scheduled anyhow.
*/
if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
- if (td->td_wchan != NULL) {
- if (td->td_state == TDS_SLP) { /* XXXKSE */
- setrunnable(td);
- } else {
- /*
- * Probably in a suspended state..
- * um.. dunno XXXKSE
- */
- unsleep(td);
- }
+ if (TD_ON_SLEEPQ(td)) {
+ unsleep(td);
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
}
}
- mtx_unlock_spin(&sched_lock);
}
/*
@@ -693,9 +675,9 @@ unsleep(struct thread *td)
{
mtx_lock_spin(&sched_lock);
- if (td->td_wchan != NULL) {
+ if (TD_ON_SLEEPQ(td)) {
TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
- td->td_wchan = NULL;
+ TD_CLR_ON_SLEEPQ(td);
}
mtx_unlock_spin(&sched_lock);
}
@@ -710,7 +692,6 @@ wakeup(ident)
register struct slpquehead *qp;
register struct thread *td;
struct thread *ntd;
- struct ksegrp *kg;
struct proc *p;
mtx_lock_spin(&sched_lock);
@@ -718,30 +699,13 @@ wakeup(ident)
restart:
for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
ntd = TAILQ_NEXT(td, td_slpq);
- p = td->td_proc;
if (td->td_wchan == ident) {
- TAILQ_REMOVE(qp, td, td_slpq);
- td->td_wchan = NULL;
- if (td->td_state == TDS_SLP) {
- /* OPTIMIZED EXPANSION OF setrunnable(p); */
- CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)",
- td, p->p_pid, p->p_comm);
- kg = td->td_ksegrp;
- if (kg->kg_slptime > 1)
- updatepri(kg);
- kg->kg_slptime = 0;
- if (p->p_sflag & PS_INMEM) {
- setrunqueue(td);
- maybe_resched(td);
- } else {
- td->td_state = TDS_SWAPPED;
- if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
- p->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
- }
- }
- /* END INLINE EXPANSION */
- }
+ unsleep(td);
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
+ p = td->td_proc;
+ CTR3(KTR_PROC,"wakeup: thread %p (pid %d, %s)",
+ td, p->p_pid, p->p_comm);
goto restart;
}
}
@@ -761,39 +725,19 @@ wakeup_one(ident)
register struct thread *td;
register struct proc *p;
struct thread *ntd;
- struct ksegrp *kg;
mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
-restart:
for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
ntd = TAILQ_NEXT(td, td_slpq);
- p = td->td_proc;
if (td->td_wchan == ident) {
- TAILQ_REMOVE(qp, td, td_slpq);
- td->td_wchan = NULL;
- if (td->td_state == TDS_SLP) {
- /* OPTIMIZED EXPANSION OF setrunnable(p); */
- CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
- td, p->p_pid, p->p_comm);
- kg = td->td_ksegrp;
- if (kg->kg_slptime > 1)
- updatepri(kg);
- kg->kg_slptime = 0;
- if (p->p_sflag & PS_INMEM) {
- setrunqueue(td);
- maybe_resched(td);
- break;
- } else {
- td->td_state = TDS_SWAPPED;
- if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
- p->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
- }
- }
- /* END INLINE EXPANSION */
- goto restart;
- }
+ unsleep(td);
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
+ p = td->td_proc;
+ CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
+ td, p->p_pid, p->p_comm);
+ break;
}
}
mtx_unlock_spin(&sched_lock);
@@ -816,11 +760,11 @@ mi_switch(void)
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
- KASSERT((td->td_state != TDS_RUNQ), ("mi_switch: called by old code"));
+ KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
#ifdef INVARIANTS
- if (td->td_state != TDS_MTX &&
- td->td_state != TDS_RUNQ &&
- td->td_state != TDS_RUNNING)
+ if (!TD_ON_MUTEX(td) &&
+ !TD_ON_RUNQ(td) &&
+ !TD_IS_RUNNING(td))
mtx_assert(&Giant, MA_NOTOWNED);
#endif
KASSERT(td->td_critnest == 1,
@@ -891,7 +835,7 @@ mi_switch(void)
* then put it back on the run queue as it has not been suspended
* or stopped or any thing else similar.
*/
- if (td->td_state == TDS_RUNNING) {
+ if (TD_IS_RUNNING(td)) {
KASSERT(((ke->ke_flags & KEF_IDLEKSE) == 0),
("Idle thread in mi_switch with wrong state"));
/* Put us back on the run queue (kse and all). */
@@ -950,39 +894,33 @@ setrunnable(struct thread *td)
break;
}
switch (td->td_state) {
- case 0:
case TDS_RUNNING:
- case TDS_IWAIT:
- case TDS_SWAPPED:
+ case TDS_RUNQ:
+ return;
+ case TDS_INHIBITED:
+ /*
+ * If we are only inhibited because we are swapped out
+ * then arange to swap in this process. Otherwise just return.
+ */
+ if (td->td_inhibitors != TDI_SWAPPED)
+ return;
+ case TDS_CAN_RUN:
+ break;
default:
- printf("state is %d", td->td_state);
+ printf("state is 0x%x", td->td_state);
panic("setrunnable(2)");
- case TDS_SUSPENDED:
- thread_unsuspend(p);
- break;
- case TDS_SLP: /* e.g. when sending signals */
- if (td->td_flags & TDF_CVWAITQ)
- cv_waitq_remove(td);
- else
- unsleep(td);
- case TDS_UNQUEUED: /* being put back onto the queue */
- case TDS_NEW: /* not yet had time to suspend */
- case TDS_RUNQ: /* not yet had time to suspend */
- break;
}
- kg = td->td_ksegrp;
- if (kg->kg_slptime > 1)
- updatepri(kg);
- kg->kg_slptime = 0;
if ((p->p_sflag & PS_INMEM) == 0) {
- td->td_state = TDS_SWAPPED;
if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
p->p_sflag |= PS_SWAPINREQ;
wakeup(&proc0);
}
} else {
- if (td->td_state != TDS_RUNQ)
- setrunqueue(td); /* XXXKSE */
+ kg = td->td_ksegrp;
+ if (kg->kg_slptime > 1)
+ updatepri(kg);
+ kg->kg_slptime = 0;
+ setrunqueue(td);
maybe_resched(td);
}
}
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 0f9c7dd..0716d9b 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -95,7 +95,7 @@ thread_ctor(void *mem, int size, void *arg)
("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
td = (struct thread *)mem;
- td->td_state = TDS_NEW;
+ td->td_state = TDS_INACTIVE;
td->td_flags |= TDF_UNBOUND;
cached_threads--; /* XXXSMP */
active_threads++; /* XXXSMP */
@@ -117,8 +117,9 @@ thread_dtor(void *mem, int size, void *arg)
#ifdef INVARIANTS
/* Verify that this thread is in a safe state to free. */
switch (td->td_state) {
- case TDS_SLP:
- case TDS_MTX:
+ case TDS_INHIBITED:
+ case TDS_RUNNING:
+ case TDS_CAN_RUN:
case TDS_RUNQ:
/*
* We must never unlink a thread that is in one of
@@ -126,10 +127,7 @@ thread_dtor(void *mem, int size, void *arg)
*/
panic("bad state for thread unlinking");
/* NOTREACHED */
- case TDS_UNQUEUED:
- case TDS_NEW:
- case TDS_RUNNING:
- case TDS_SURPLUS:
+ case TDS_INACTIVE:
break;
default:
panic("bad thread state");
@@ -316,7 +314,7 @@ thread_exit(void)
KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
if (ke->ke_tdspare != NULL) {
- thread_free(ke->ke_tdspare);
+ thread_stash(ke->ke_tdspare);
ke->ke_tdspare = NULL;
}
cpu_thread_exit(td); /* XXXSMP */
@@ -345,14 +343,11 @@ thread_exit(void)
*/
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount) {
- TAILQ_REMOVE(&p->p_suspended,
- p->p_singlethread, td_runq);
- setrunqueue(p->p_singlethread);
- p->p_suspcount--;
+ thread_unsuspend_one(p->p_singlethread);
}
}
PROC_UNLOCK(p);
- td->td_state = TDS_SURPLUS;
+ td->td_state = TDS_INACTIVE;
td->td_proc = NULL;
td->td_ksegrp = NULL;
td->td_last_kse = NULL;
@@ -379,7 +374,7 @@ thread_link(struct thread *td, struct ksegrp *kg)
struct proc *p;
p = kg->kg_proc;
- td->td_state = TDS_NEW;
+ td->td_state = TDS_INACTIVE;
td->td_proc = p;
td->td_ksegrp = kg;
td->td_last_kse = NULL;
@@ -427,6 +422,7 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
cpu_set_upcall(td2, ke->ke_pcb);
td2->td_ucred = crhold(td->td_ucred);
td2->td_flags = TDF_UNBOUND|TDF_UPCALLING;
+ TD_SET_CAN_RUN(td2);
setrunqueue(td2);
return (td2);
}
@@ -607,38 +603,32 @@ thread_single(int force_exit)
p->p_flag |= P_STOPPED_SINGLE;
p->p_singlethread = td;
while ((p->p_numthreads - p->p_suspcount) != 1) {
+ mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td2) {
if (td2 == td)
continue;
- switch(td2->td_state) {
- case TDS_SUSPENDED:
- if (force_exit == SINGLE_EXIT) {
- mtx_lock_spin(&sched_lock);
- TAILQ_REMOVE(&p->p_suspended,
- td, td_runq);
- p->p_suspcount--;
- setrunqueue(td); /* Should suicide. */
- mtx_unlock_spin(&sched_lock);
+ if (TD_IS_INHIBITED(td2)) {
+ if (TD_IS_SUSPENDED(td2)) {
+ if (force_exit == SINGLE_EXIT) {
+ thread_unsuspend_one(td2);
+ }
}
- case TDS_SLP:
- if (td2->td_flags & TDF_CVWAITQ)
- cv_abort(td2);
- else
- abortsleep(td2);
- break;
- /* case TDS RUNNABLE: XXXKSE maybe raise priority? */
- default: /* needed to avoid an error */
- break;
+ if ( TD_IS_SLEEPING(td2)) {
+ if (td2->td_flags & TDF_CVWAITQ)
+ cv_waitq_remove(td2);
+ else
+ unsleep(td2);
+ break;
+ }
+ if (TD_CAN_RUN(td2))
+ setrunqueue(td2);
}
}
/*
* Wake us up when everyone else has suspended.
* In the mean time we suspend as well.
*/
- mtx_lock_spin(&sched_lock);
- TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
- td->td_state = TDS_SUSPENDED;
- p->p_suspcount++;
+ thread_suspend_one(td);
mtx_unlock(&Giant);
PROC_UNLOCK(p);
mi_switch();
@@ -745,16 +735,11 @@ thread_suspend_check(int return_instead)
mtx_lock_spin(&sched_lock);
}
mtx_assert(&Giant, MA_NOTOWNED);
- p->p_suspcount++;
- td->td_state = TDS_SUSPENDED;
- TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
+ thread_suspend_one(td);
PROC_UNLOCK(p);
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount) {
- TAILQ_REMOVE(&p->p_suspended,
- p->p_singlethread, td_runq);
- p->p_suspcount--;
- setrunqueue(p->p_singlethread);
+ thread_unsuspend_one(p->p_singlethread);
}
}
p->p_stats->p_ru.ru_nivcsw++;
@@ -772,8 +757,15 @@ thread_suspend_one(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
p->p_suspcount++;
- td->td_state = TDS_SUSPENDED;
+ TD_SET_SUSPENDED(td);
TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
+ /*
+ * Hack: If we are suspending but are on the sleep queue
+ * then we are in msleep or the cv equivalent. We
+ * want to look like we have two Inhibitors.
+ */
+ if (TD_ON_SLEEPQ(td))
+ TD_SET_SLEEPING(td);
}
void
@@ -783,16 +775,9 @@ thread_unsuspend_one(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
TAILQ_REMOVE(&p->p_suspended, td, td_runq);
+ TD_CLR_SUSPENDED(td);
p->p_suspcount--;
- if (td->td_wchan != NULL) {
- td->td_state = TDS_SLP;
- } else {
- if (td->td_ksegrp->kg_slptime > 1) {
- updatepri(td->td_ksegrp);
- td->td_ksegrp->kg_slptime = 0;
- }
- setrunqueue(td);
- }
+ setrunnable(td);
}
/*
@@ -840,9 +825,7 @@ thread_single_end(void)
if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
mtx_lock_spin(&sched_lock);
while (( td = TAILQ_FIRST(&p->p_suspended))) {
- TAILQ_REMOVE(&p->p_suspended, td, td_runq);
- p->p_suspcount--;
- setrunqueue(td);
+ thread_unsuspend_one(td);
}
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c
index bbe36be..881d4a3 100644
--- a/sys/kern/ksched.c
+++ b/sys/kern/ksched.c
@@ -182,9 +182,9 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, kg);
FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
- if (td->td_state == TDS_RUNNING) {
+ if (TD_IS_RUNNING(td)) {
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
- } else if (td->td_state == TDS_RUNQ) {
+ } else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
remrunqueue(td);
td->td_priority =
@@ -215,9 +215,9 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
* scheduling info alone.
*/
FOREACH_THREAD_IN_GROUP(kg, td) {
- if (td->td_state == TDS_RUNNING) {
+ if (TD_IS_RUNNING(td)) {
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
- } else if (td->td_state == TDS_RUNQ) {
+ } else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
remrunqueue(td);
td->td_priority =
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index afd4c5d..cfdd94c 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -124,7 +124,7 @@ forward_signal(struct thread *td)
* executing so that it executes ast().
*/
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT(td->td_state == TDS_RUNNING,
+ KASSERT(TD_IS_RUNNING(td),
("forward_signal: thread is not TDS_RUNNING"));
CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 00a789c..34cc6d5 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -119,11 +119,9 @@ propagate_priority(struct thread *td)
return;
}
- KASSERT(td->td_state != TDS_SURPLUS, ("Mutex owner SURPLUS"));
MPASS(td->td_proc != NULL);
MPASS(td->td_proc->p_magic == P_MAGIC);
- KASSERT(td->td_state != TDS_SLP,
- ("sleeping thread owns a mutex"));
+ KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex"));
if (td->td_priority <= pri) /* lower is higher priority */
return;
@@ -131,7 +129,7 @@ propagate_priority(struct thread *td)
/*
* If lock holder is actually running, just bump priority.
*/
- if (td->td_state == TDS_RUNNING) {
+ if (TD_IS_RUNNING(td)) {
td->td_priority = pri;
return;
}
@@ -150,7 +148,7 @@ propagate_priority(struct thread *td)
* but try anyhow.
* We should have a special call to do this more efficiently.
*/
- if (td->td_state == TDS_RUNQ) {
+ if (TD_ON_RUNQ(td)) {
MPASS(td->td_blocked == NULL);
remrunqueue(td);
td->td_priority = pri;
@@ -165,7 +163,7 @@ propagate_priority(struct thread *td)
/*
* If we aren't blocked on a mutex, we should be.
*/
- KASSERT(td->td_state == TDS_MTX, (
+ KASSERT(TD_ON_MUTEX(td), (
"process %d(%s):%d holds %s but isn't blocked on a mutex\n",
td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
m->mtx_object.lo_name));
@@ -619,7 +617,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
*/
td->td_blocked = m;
td->td_mtxname = m->mtx_object.lo_name;
- td->td_state = TDS_MTX;
+ TD_SET_MUTEX(td);
propagate_priority(td);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
@@ -763,6 +761,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
m, td1);
td1->td_blocked = NULL;
+ TD_CLR_MUTEX(td1);
setrunqueue(td1);
if (td->td_critnest == 1 && td1->td_priority < pri) {
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index ec9958d..4c45ef7 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -1206,10 +1206,9 @@ selwakeup(sip)
sip->si_thread = NULL;
mtx_lock_spin(&sched_lock);
if (td->td_wchan == &selwait) {
- if (td->td_state == TDS_SLP)
- setrunnable(td);
- else
- cv_waitq_remove(td);
+ cv_waitq_remove(td);
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
} else
td->td_flags &= ~TDF_SELECT;
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index b604c41..521ba6a 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -600,6 +600,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
p->p_xstat = data;
mtx_lock_spin(&sched_lock);
p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
+ thread_unsuspend(p);
setrunnable(td2); /* XXXKSE */
/* Need foreach kse in proc, ... make_kse_queued(). */
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index efac248..c075a47 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -2396,9 +2396,10 @@ ttyinfo(struct tty *tp)
stmp = "KSE" ; /* XXXKSE */
} else {
if (td) {
- if (td->td_state == TDS_RUNQ) {
+ if (TD_ON_RUNQ(td) ||
+ (TD_IS_RUNNING(td))) {
stmp = "running";
- } else if (td->td_state == TDS_MTX) {
+ } else if (TD_ON_MUTEX(td)) {
stmp = td->td_mtxname;
} else if (td->td_wmesg) {
stmp = td->td_wmesg;
@@ -2413,7 +2414,7 @@ ttyinfo(struct tty *tp)
calcru(pick, &utime, &stime, NULL);
/* XXXKSE The TDS_IWAIT line is Dubious */
if (pick->p_state == PRS_NEW ||
- (td && (td->td_state == TDS_IWAIT)) ||
+ (td && (TD_AWAITING_INTR(td))) ||
pick->p_state == PRS_ZOMBIE) {
ltmp = 0;
} else {
@@ -2424,7 +2425,7 @@ ttyinfo(struct tty *tp)
ttyprintf(tp, " cmd: %s %d [%s%s] ", pick->p_comm,
pick->p_pid,
- td->td_state == TDS_MTX ? "*" : "",
+ TD_ON_MUTEX(td) ? "*" : "",
stmp);
/* Print user time. */
@@ -2461,8 +2462,8 @@ do { \
struct thread *td; \
val = 0; \
FOREACH_THREAD_IN_PROC(p, td) { \
- if (td->td_state == TDS_RUNQ || \
- td->td_state == TDS_RUNNING) { \
+ if (TD_ON_RUNQ(td) || \
+ TD_IS_RUNNING(td)) { \
val = 1; \
break; \
} \
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 631fa72..2bf0721 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1654,10 +1654,14 @@ sched_sync(void)
int
speedup_syncer()
{
+ struct thread *td;
+ td = FIRST_THREAD_IN_PROC(updateproc);
mtx_lock_spin(&sched_lock);
- if (FIRST_THREAD_IN_PROC(updateproc)->td_wchan == &lbolt) /* XXXKSE */
- setrunnable(FIRST_THREAD_IN_PROC(updateproc));
+ if (td->td_wchan == &lbolt) /* XXXKSE */
+ unsleep(td);
+ TD_CLR_SLEEPING(td);
+ setrunnable(td);
mtx_unlock_spin(&sched_lock);
if (rushjob < syncdelay / 2) {
rushjob += 1;
diff --git a/sys/posix4/ksched.c b/sys/posix4/ksched.c
index bbe36be..881d4a3 100644
--- a/sys/posix4/ksched.c
+++ b/sys/posix4/ksched.c
@@ -182,9 +182,9 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, kg);
FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
- if (td->td_state == TDS_RUNNING) {
+ if (TD_IS_RUNNING(td)) {
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
- } else if (td->td_state == TDS_RUNQ) {
+ } else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
remrunqueue(td);
td->td_priority =
@@ -215,9 +215,9 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
* scheduling info alone.
*/
FOREACH_THREAD_IN_GROUP(kg, td) {
- if (td->td_state == TDS_RUNNING) {
+ if (TD_IS_RUNNING(td)) {
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
- } else if (td->td_state == TDS_RUNQ) {
+ } else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
remrunqueue(td);
td->td_priority =
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index d5741ec..ece52d1 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -266,6 +266,7 @@ struct thread {
/* Cleared during fork1() or thread_sched_upcall() */
#define td_startzero td_flags
int td_flags; /* (j) TDF_* flags. */
+ int td_inhibitors; /* (j) Why can not run */
struct kse *td_last_kse; /* Where it wants to be if possible. */
struct kse *td_kse; /* Current KSE if running. */
int td_dupfd; /* (k) Ret value from fdopen. XXX */
@@ -301,17 +302,11 @@ struct thread {
*/
struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */
enum {
- TDS_NEW = 0x20,
- TDS_UNQUEUED,
- TDS_SLP,
- TDS_MTX,
+ TDS_INACTIVE = 0x20,
+ TDS_INHIBITED,
+ TDS_CAN_RUN,
TDS_RUNQ,
TDS_RUNNING,
- TDS_SUSPENDED, /* would have liked to have run */
- TDS_IWAIT,
- TDS_SURPLUS,
- TDS_SWAPPED,
- TDS_SUSP_SLP /* on sleep queue AND suspend queue */
} td_state;
register_t td_retval[2]; /* (k) Syscall aux returns. */
struct callout td_slpcallout; /* (h) Callout for sleep. */
@@ -319,17 +314,68 @@ struct thread {
struct vm_object *td_kstack_obj;/* (a) Kstack object. */
vm_offset_t td_kstack; /* Kernel VA of kstack. */
};
-/* flags kept in td_flags */
-#define TDF_UNBOUND 0x000001 /* may give away the kse, uses the kg runq */
-#define TDF_INPANIC 0x000002 /* Caused a panic, let it drive crashdump */
-#define TDF_SINTR 0x000008 /* Sleep is interruptible. */
-#define TDF_TIMEOUT 0x000010 /* Timing out during sleep. */
-#define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */
-#define TDF_CVWAITQ 0x000080 /* Thread is on a cv_waitq (not slpq). */
-#define TDF_UPCALLING 0x000100 /* This thread is doing an upcall. */
+/* flags kept in td_flags */
+#define TDF_UNBOUND 0x000001 /* may give away the kse, uses the kg runq */
+#define TDF_INPANIC 0x000002 /* Caused a panic, let it drive crashdump */
+#define TDF_SINTR 0x000008 /* Sleep is interruptible. */
+#define TDF_TIMEOUT 0x000010 /* Timing out during sleep. */
+#define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */
+#define TDF_CVWAITQ 0x000080 /* Thread is on a cv_waitq (not slpq). */
+#define TDF_UPCALLING 0x000100 /* This thread is doing an upcall. */
+#define TDF_ONSLEEPQ 0x000200 /* On the sleep queue */
#define TDF_INMSLEEP 0x000400 /* Don't recurse in msleep() */
-#define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */
-#define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */
+#define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */
+#define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */
+
+#define TDI_SUSPENDED 0x01 /* on suspension queue */
+#define TDI_SLEEPING 0x02 /* Actually asleep! */ /* actually tricky */
+#define TDI_SWAPPED 0x04 /* stack not in mem.. bad juju if run */
+#define TDI_MUTEX 0x08 /* Stopped on a mutex */
+#define TDI_IWAIT 0x10 /* Awaiting interrupt */
+
+#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
+#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
+#define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED)
+#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
+#define TD_ON_MUTEX(td) ((td)->td_inhibitors & TDI_MUTEX)
+#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
+#define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING)
+#define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ)
+#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
+#define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED)
+
+#define TD_SET_INHIB(td, inhib) do { \
+ (td)->td_state = TDS_INHIBITED; \
+ (td)->td_inhibitors |= inhib; \
+} while (0)
+
+#define TD_CLR_INHIB(td, inhib) do { \
+ if (((td)->td_inhibitors & inhib) && \
+ (((td)->td_inhibitors &= ~inhib) == 0)) \
+ (td)->td_state = TDS_CAN_RUN; \
+} while (0)
+
+#define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING)
+#define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED)
+#define TD_SET_MUTEX(td) TD_SET_INHIB((td), TDI_MUTEX)
+#define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED)
+#define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT)
+
+#define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING)
+#define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED)
+#define TD_CLR_MUTEX(td) TD_CLR_INHIB((td), TDI_MUTEX)
+#define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED)
+#define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT)
+
+#define TD_SET_RUNNING(td) do {(td)->td_state = TDS_RUNNING; } while (0)
+#define TD_SET_RUNQ(td) do {(td)->td_state = TDS_RUNQ; } while (0)
+#define TD_SET_CAN_RUN(td) do {(td)->td_state = TDS_CAN_RUN; } while (0)
+#define TD_SET_ON_SLEEPQ(td) do {(td)->td_flags |= TDF_ONSLEEPQ; } while (0)
+#define TD_CLR_ON_SLEEPQ(td) do { \
+ (td)->td_flags &= ~TDF_ONSLEEPQ; \
+ (td)->td_wchan = NULL; \
+} while (0)
+
/*
* Traps for young players:
@@ -372,15 +418,15 @@ struct kse {
enum {
KES_IDLE = 0x10,
KES_ONRUNQ,
- KES_UNQUEUED, /* in transit */
- KES_THREAD /* slaved to thread state */
- } ke_state; /* (j) S* process status. */
+ KES_UNQUEUED, /* in transit */
+ KES_THREAD /* slaved to thread state */
+ } ke_state; /* (j) S* process status. */
void *ke_mailbox; /* the userland mailbox address */
struct thread *ke_tdspare; /* spare thread for upcalls */
#define ke_endzero ke_dummy
#define ke_startcopy ke_endzero
- u_char ke_dummy; /* */
+ u_char ke_dummy;
#define ke_endcopy ke_mdstorage
void *ke_upcall;
@@ -460,7 +506,7 @@ struct proc {
struct plimit *p_limit; /* (m) Process limits. */
struct vm_object *p_upages_obj; /* (a) Upages object. */
struct procsig *p_procsig; /* (c) Signal actions, state (CPU). */
-
+
struct ksegrp p_ksegrp;
struct kse p_kse;
@@ -471,11 +517,11 @@ struct proc {
int p_flag; /* (c) P_* flags. */
int p_sflag; /* (j) PS_* flags. */
enum {
- PRS_NEW = 0, /* In creation */
- PRS_NORMAL, /* KSEs can be run */
- PRS_WAIT, /* Waiting on interrupt ? */
+ PRS_NEW = 0, /* In creation */
+ PRS_NORMAL, /* KSEs can be run */
+ PRS_WAIT, /* Waiting on interrupt ? */
PRS_ZOMBIE
- } p_state; /* (j) S* process status. */
+ } p_state; /* (j) S* process status. */
pid_t p_pid; /* (b) Process identifier. */
LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */
LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */
@@ -735,9 +781,7 @@ sigonstack(size_t sp)
} while (0)
/* Check whether a thread is safe to be swapped out. */
-#define thread_safetoswapout(td) \
- ((td)->td_state == TDS_RUNQ || \
- (td)->td_state == TDS_SLP)
+#define thread_safetoswapout(td) (TD_IS_SLEEPING(td) || TD_IS_SUSPENDED(td))
/* Lock and unlock process arguments. */
#define PARGS_LOCK(p) mtx_lock(&pargs_ref_lock)
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 41e1719..fd18159 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -499,16 +499,18 @@ faultin(p)
PROC_UNLOCK(p);
vm_proc_swapin(p);
- FOREACH_THREAD_IN_PROC (p, td)
+ FOREACH_THREAD_IN_PROC (p, td) {
pmap_swapin_thread(td);
+ TD_CLR_SWAPPED(td);
+ }
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_SWAPPINGIN;
p->p_sflag |= PS_INMEM;
FOREACH_THREAD_IN_PROC (p, td)
- if (td->td_state == TDS_SWAPPED)
- setrunqueue(td);
+ if (TD_CAN_RUN(td))
+ setrunnable(td);
wakeup(&p->p_sflag);
@@ -558,10 +560,11 @@ loop:
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td) {
/*
- * A runnable thread of a process swapped out is in
- * TDS_SWAPPED.
+ * An otherwise runnable thread of a process
+ * swapped out has only the TDI_SWAPPED bit set.
+ *
*/
- if (td->td_state == TDS_SWAPPED) {
+ if (td->td_inhibitors == TDI_SWAPPED) {
kg = td->td_ksegrp;
pri = p->p_swtime + kg->kg_slptime;
if ((p->p_sflag & PS_SWAPINREQ) == 0) {
@@ -849,18 +852,20 @@ swapout(p)
p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
PROC_UNLOCK(p);
- FOREACH_THREAD_IN_PROC (p, td)
- if (td->td_state == TDS_RUNQ) { /* XXXKSE */
+ FOREACH_THREAD_IN_PROC (p, td) /* shouldn't be possible, but..... */
+ if (TD_ON_RUNQ(td)) { /* XXXKSE */
+ panic("swapping out runnable process");
remrunqueue(td); /* XXXKSE */
- td->td_state = TDS_SWAPPED;
}
p->p_sflag &= ~PS_INMEM;
p->p_sflag |= PS_SWAPPING;
mtx_unlock_spin(&sched_lock);
vm_proc_swapout(p);
- FOREACH_THREAD_IN_PROC(p, td)
+ FOREACH_THREAD_IN_PROC(p, td) {
pmap_swapout_thread(td);
+ TD_SET_SWAPPED(td);
+ }
mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_SWAPPING;
p->p_swtime = 0;
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index a1b8adb..78a5348 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -88,7 +88,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
vm_object_t object;
vm_map_t map;
int paging;
- struct ksegrp *kg;
struct thread *td;
totalp = &total;
@@ -111,42 +110,34 @@ vmtotal(SYSCTL_HANDLER_ARGS)
mtx_lock_spin(&sched_lock);
switch (p->p_state) {
case PRS_NEW:
- if (p->p_sflag & PS_INMEM)
- totalp->t_rq++;
- else
- totalp->t_sw++;
mtx_unlock_spin(&sched_lock);
continue;
break;
default:
FOREACH_THREAD_IN_PROC(p, td) {
+ /* Need new statistics XXX */
switch (td->td_state) {
- case TDS_MTX:
- case TDS_SLP:
- kg = td->td_ksegrp; /* XXXKSE */
- if (p->p_sflag & PS_INMEM) {
+ case TDS_INHIBITED:
+ if (TD_ON_MUTEX(td) ||
+ (td->td_inhibitors ==
+ TDI_SWAPPED)) {
+ totalp->t_sw++;
+ } else if (TD_IS_SLEEPING(td) ||
+ TD_AWAITING_INTR(td) ||
+ TD_IS_SUSPENDED(td)) {
if (td->td_priority <= PZERO)
totalp->t_dw++;
- else if (kg->kg_slptime
- < maxslp)
+ else
totalp->t_sl++;
- } else if (kg->kg_slptime < maxslp)
- totalp->t_sw++;
- if (kg->kg_slptime >= maxslp) {
- continue;
}
break;
+ case TDS_CAN_RUN:
+ totalp->t_sw++;
+ break;
case TDS_RUNQ:
case TDS_RUNNING:
- if (p->p_sflag & PS_INMEM)
- totalp->t_rq++;
- else
- totalp->t_sw++;
- continue;
-
- case TDS_IWAIT:
- totalp->t_sl++;
+ totalp->t_rq++;
continue;
default:
break;
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index e38b86d..e20ddfe 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1155,9 +1155,9 @@ rescan0:
mtx_lock_spin(&sched_lock);
breakout = 0;
FOREACH_THREAD_IN_PROC(p, td) {
- if (td->td_state != TDS_RUNQ &&
- td->td_state != TDS_RUNNING &&
- td->td_state != TDS_SLP) {
+ if (!TD_ON_RUNQ(td) &&
+ !TD_IS_RUNNING(td) &&
+ !TD_IS_SLEEPING(td)) {
breakout = 1;
break;
}
@@ -1498,9 +1498,9 @@ vm_daemon()
mtx_lock_spin(&sched_lock);
breakout = 0;
FOREACH_THREAD_IN_PROC(p, td) {
- if (td->td_state != TDS_RUNQ &&
- td->td_state != TDS_RUNNING &&
- td->td_state != TDS_SLP) {
+ if (!TD_ON_RUNQ(td) &&
+ !TD_IS_RUNNING(td) &&
+ !TD_IS_SLEEPING(td)) {
breakout = 1;
break;
}
OpenPOWER on IntegriCloud