summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2002-08-01 18:45:10 +0000
committerjulian <julian@FreeBSD.org>2002-08-01 18:45:10 +0000
commitb3aca85def667b6b08a1073688d0b21bcc075ffb (patch)
tree8a197b12a5345ddab4947db75c41005118cfc2c2
parentc9a153d8bb4613add10b9e75b74196428e7aedd6 (diff)
downloadFreeBSD-src-b3aca85def667b6b08a1073688d0b21bcc075ffb.zip
FreeBSD-src-b3aca85def667b6b08a1073688d0b21bcc075ffb.tar.gz
Slight cleanup of some comments/whitespace.
Make idle process state more consistant. Add an assert on thread state. Clean up idleproc/mi_switch() interaction. Use a local instead of referencing curthread 7 times in a row (I've been told curthread can be expensive on some architectures) Remove some commented out code. Add a little commented out code (completion coming soon) Reviewed by: jhb@freebsd.org
-rw-r--r--sys/kern/kern_idle.c3
-rw-r--r--sys/kern/kern_intr.c17
-rw-r--r--sys/kern/kern_sig.c29
-rw-r--r--sys/kern/kern_synch.c31
4 files changed, 55 insertions, 25 deletions
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index a7e8998..49aacf8 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -63,7 +63,7 @@ idle_setup(void *dummy)
p->p_flag |= P_NOLOAD;
p->p_state = PRS_NORMAL;
td = FIRST_THREAD_IN_PROC(p);
- td->td_state = TDS_UNQUEUED;
+ td->td_state = TDS_UNQUEUED;
td->td_kse->ke_flags |= KEF_IDLEKSE;
#ifdef SMP
}
@@ -112,6 +112,7 @@ idle_proc(void *dummy)
mtx_lock_spin(&sched_lock);
p->p_stats->p_ru.ru_nvcsw++;
+ td->td_state = TDS_UNQUEUED;
mi_switch();
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index fb9c092..db0a3f5 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -351,6 +351,7 @@ ithread_schedule(struct ithd *ithread, int do_switch)
{
struct int_entropy entropy;
struct thread *td;
+ struct thread *ctd;
struct proc *p;
/*
@@ -359,13 +360,14 @@ ithread_schedule(struct ithd *ithread, int do_switch)
if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
return (EINVAL);
+ ctd = curthread;
/*
* If any of the handlers for this ithread claim to be good
* sources of entropy, then gather some.
*/
if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
entropy.vector = ithread->it_vector;
- entropy.proc = curthread->td_proc;;
+ entropy.proc = ctd->td_proc;;
random_harvest(&entropy, sizeof(entropy), 2, 0,
RANDOM_INTERRUPT);
}
@@ -390,13 +392,12 @@ ithread_schedule(struct ithd *ithread, int do_switch)
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
setrunqueue(td);
if (do_switch &&
- (curthread->td_critnest == 1)/* &&
- (curthread->td_state == TDS_RUNNING) XXXKSE*/) {
-#if 0 /* not needed in KSE */
- if (curthread != PCPU_GET(idlethread))
- setrunqueue(curthread);
-#endif
- curthread->td_proc->p_stats->p_ru.ru_nivcsw++;
+ (ctd->td_critnest == 1) ) {
+ KASSERT((ctd->td_state == TDS_RUNNING),
+ ("ithread_schedule: Bad state for curthread."));
+ ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
+ if (ctd->td_kse->ke_flags & KEF_IDLEKSE)
+ ctd->td_state = TDS_UNQUEUED;
mi_switch();
} else {
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 2c39e13..87711ab 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1364,7 +1364,7 @@ psignal(p, sig)
* The signal is not ignored or caught.
*/
mtx_lock_spin(&sched_lock);
- thread_unsuspend(p); /* Checks if should do it. */
+ thread_unsuspend(p);
mtx_unlock_spin(&sched_lock);
goto out;
}
@@ -1373,7 +1373,9 @@ psignal(p, sig)
/*
* Already stopped, don't need to stop again
* (If we did the shell could get confused).
+ * Just make sure the signal STOP bit set.
*/
+ p->p_flag |= P_STOPPED_SGNL;
SIGDELSET(p->p_siglist, sig);
goto out;
}
@@ -1383,10 +1385,8 @@ psignal(p, sig)
* If a thread is sleeping interruptibly, simulate a
* wakeup so that when it is continued it will be made
* runnable and can look at the signal. However, don't make
- * the process runnable, leave it stopped.
+ * the PROCESS runnable, leave it stopped.
* It may run a bit until it hits a thread_suspend_check().
- *
- * XXXKSE I don't understand this at all.
*/
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td) {
@@ -1403,6 +1403,8 @@ psignal(p, sig)
/*
* XXXKSE What about threads that are waiting on mutexes?
* Shouldn't they abort too?
+ * No, hopefully mutexes are short lived.. They'll
+ * eventually hit thread_suspend_check().
*/
} else if (p->p_state == PRS_NORMAL) {
if (prop & SA_CONT) {
@@ -1419,6 +1421,7 @@ psignal(p, sig)
* cause the process to run.
*/
if (prop & SA_STOP) {
+ int should_signal = 1;
if (action != SIG_DFL)
goto runfast;
@@ -1430,8 +1433,22 @@ psignal(p, sig)
goto out;
SIGDELSET(p->p_siglist, sig);
p->p_xstat = sig;
- PROC_LOCK(p->p_pptr);
- if (!(p->p_pptr->p_procsig->ps_flag & PS_NOCLDSTOP))
+ PROC_LOCK(p->p_pptr); /* XXX un-needed? */
+#if 0
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if (td->td_state == TDS_RUNNING) {
+ /*
+ * all other states must be in
+ * the kernel
+ */
+ should_signal = 0;
+ break;
+ }
+ }
+/* don't enable until the equivalent code is in thread_suspend_check() */
+#endif
+ if (!(p->p_pptr->p_procsig->ps_flag & PS_NOCLDSTOP) &&
+ should_signal)
psignal(p->p_pptr, SIGCHLD);
PROC_UNLOCK(p->p_pptr);
stop(p);
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index e32a681..1b03692 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -807,6 +807,7 @@ mi_switch()
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
+ KASSERT((td->td_state != TDS_RUNQ), ("mi_switch: called by old code"));
#ifdef INVARIANTS
if (td->td_state != TDS_MTX &&
td->td_state != TDS_RUNQ &&
@@ -866,7 +867,7 @@ mi_switch()
#endif
/*
- * Pick a new current process and record its start time.
+ * Finish up stats for outgoing thread.
*/
cnt.v_swtch++;
PCPU_SET(switchtime, new_switchtime);
@@ -877,23 +878,33 @@ mi_switch()
ke->ke_oncpu = NOCPU;
ke->ke_flags &= ~KEF_NEEDRESCHED;
/*
- * At the last moment: if this KSE is not on the run queue,
- * it needs to be freed correctly and the thread treated accordingly.
+ * At the last moment, if this thread is still marked RUNNING,
+ * then put it back on the run queue as it has not been suspended
+ * or stopped or any thing else similar.
*/
- if ((td->td_state == TDS_RUNNING) &&
- ((ke->ke_flags & KEF_IDLEKSE) == 0)) {
+ if (td->td_state == TDS_RUNNING) {
+ KASSERT(((ke->ke_flags & KEF_IDLEKSE) == 0),
+ ("Idle thread in mi_switch with wrong state"));
/* Put us back on the run queue (kse and all). */
setrunqueue(td);
- } else if ((td->td_flags & TDF_UNBOUND) &&
- (td->td_state != TDS_RUNQ)) { /* in case of old code */
+ } else if (td->td_flags & TDF_UNBOUND) {
/*
- * We will not be on the run queue.
- * Someone else can use the KSE if they need it.
+ * We will not be on the run queue. So we must be
+ * sleeping or similar. If it's available,
+ * someone else can use the KSE if they need it.
+ * XXXKSE KSE loaning will change this.
*/
td->td_kse = NULL;
kse_reassign(ke);
}
- cpu_switch();
+
+ cpu_switch(); /* SHAZAM!!*/
+
+ /*
+ * Start setting up stats etc. for the incoming thread.
+ * Similar code in fork_exit() is returned to by cpu_switch()
+ * in the case of a new thread/process.
+ */
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
OpenPOWER on IntegriCloud