summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-07-02 19:09:50 +0000
committerjhb <jhb@FreeBSD.org>2004-07-02 19:09:50 +0000
commit1b16b181d12075ffc084f2a593e16fe3dd8f6f6d (patch)
treee89f55b0c72410220a98331e61987c812e0316dd /sys
parent4b39413aeb9e3e3ff1341b98477edddee8807db5 (diff)
downloadFreeBSD-src-1b16b181d12075ffc084f2a593e16fe3dd8f6f6d.zip
FreeBSD-src-1b16b181d12075ffc084f2a593e16fe3dd8f6f6d.tar.gz
- Change mi_switch() and sched_switch() to accept an optional thread to
switch to. If a non-NULL thread pointer is passed in, then the CPU will switch to that thread directly rather than calling choosethread() to pick a thread to choose to. - Make sched_switch() aware of idle threads and know to do TD_SET_CAN_RUN() instead of sticking them on the run queue rather than requiring all callers of mi_switch() to know to do this if they can be called from an idlethread. - Move constants for arguments to mi_switch() and thread_single() out of the middle of the function prototypes and up above into their own section.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_idle.c5
-rw-r--r--sys/kern/kern_intr.c4
-rw-r--r--sys/kern/kern_mutex.c2
-rw-r--r--sys/kern/kern_shutdown.c2
-rw-r--r--sys/kern/kern_sig.c4
-rw-r--r--sys/kern/kern_subr.c2
-rw-r--r--sys/kern/kern_synch.c6
-rw-r--r--sys/kern/kern_thread.c4
-rw-r--r--sys/kern/sched_4bsd.c17
-rw-r--r--sys/kern/sched_ule.c14
-rw-r--r--sys/kern/subr_sleepqueue.c4
-rw-r--r--sys/kern/subr_trap.c2
-rw-r--r--sys/kern/subr_turnstile.c2
-rw-r--r--sys/sys/proc.h17
-rw-r--r--sys/sys/sched.h2
-rw-r--r--sys/vm/vm_zeroidle.c2
16 files changed, 51 insertions, 38 deletions
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index 62d9704..5644d8f 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -58,7 +58,7 @@ idle_setup(void *dummy)
p->p_flag |= P_NOLOAD;
mtx_lock_spin(&sched_lock);
td = FIRST_THREAD_IN_PROC(p);
- td->td_state = TDS_CAN_RUN;
+ TD_SET_CAN_RUN(td);
td->td_flags |= TDF_IDLETD;
td->td_priority = PRI_MAX_IDLE;
mtx_unlock_spin(&sched_lock);
@@ -86,8 +86,7 @@ idle_proc(void *dummy)
cpu_idle();
mtx_lock_spin(&sched_lock);
- td->td_state = TDS_CAN_RUN;
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 920b9da..d11e9d2 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -416,7 +416,7 @@ ithread_schedule(struct ithd *ithread, int do_switch)
("ithread_schedule: Bad state for curthread."));
if (ctd->td_flags & TDF_IDLETD)
ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
- mi_switch(SW_INVOL);
+ mi_switch(SW_INVOL, NULL);
} else {
curthread->td_flags |= TDF_NEEDRESCHED;
}
@@ -618,7 +618,7 @@ restart:
if (!ithd->it_need) {
TD_SET_IWAIT(td);
CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
}
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index a4f0446..2388983 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -695,7 +695,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
"_mtx_unlock_sleep: %p switching out lock=%p", m,
(void *)m->mtx_lock);
- mi_switch(SW_INVOL);
+ mi_switch(SW_INVOL, NULL);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
m, (void *)m->mtx_lock);
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 491bc9e..b9bfd39 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -307,7 +307,7 @@ boot(int howto)
/*
* Allow interrupt threads to run
*/
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
DELAY(1000);
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index e3466a9..d17cbe2 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -2019,7 +2019,7 @@ ptracestop(struct thread *td, int sig)
thread_suspend_one(td);
PROC_UNLOCK(p);
DROP_GIANT();
- mi_switch(SW_INVOL);
+ mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
}
@@ -2168,7 +2168,7 @@ issignal(td)
thread_suspend_one(td);
PROC_UNLOCK(p);
DROP_GIANT();
- mi_switch(SW_INVOL);
+ mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index d662f5e..7ccccdd 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -430,7 +430,7 @@ uio_yield(void)
mtx_lock_spin(&sched_lock);
DROP_GIANT();
sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */
- mi_switch(SW_INVOL);
+ mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 54f7cdf..5c211d1 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -285,7 +285,7 @@ wakeup_one(ident)
* The machine independent parts of context switching.
*/
void
-mi_switch(int flags)
+mi_switch(int flags, struct thread *newtd)
{
struct bintime new_switchtime;
struct thread *td;
@@ -349,7 +349,7 @@ mi_switch(int flags)
(void *)td, (long)p->p_pid, p->p_comm);
if (td->td_proc->p_flag & P_SA)
thread_switchout(td);
- sched_switch(td);
+ sched_switch(td, newtd);
CTR3(KTR_PROC, "mi_switch: new thread %p (pid %ld, %s)",
(void *)td, (long)p->p_pid, p->p_comm);
@@ -468,7 +468,7 @@ yield(struct thread *td, struct yield_args *uap)
mtx_assert(&Giant, MA_NOTOWNED);
mtx_lock_spin(&sched_lock);
sched_prio(td, PRI_MAX_TIMESHARE);
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = 0;
return (0);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 8554b48..4b7bd47 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -908,7 +908,7 @@ thread_single(int force_exit)
*/
thread_suspend_one(td);
PROC_UNLOCK(p);
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@@ -1011,7 +1011,7 @@ thread_suspend_check(int return_instead)
}
}
PROC_UNLOCK(p);
- mi_switch(SW_INVOL);
+ mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index ae8492f..5d8961e 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -637,9 +637,8 @@ sched_sleep(struct thread *td)
}
void
-sched_switch(struct thread *td)
+sched_switch(struct thread *td, struct thread *newtd)
{
- struct thread *newtd;
struct kse *ke;
struct proc *p;
@@ -651,6 +650,8 @@ sched_switch(struct thread *td)
if ((p->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
+ if (newtd != NULL && (newtd->td_proc->p_flag & P_NOLOAD) == 0)
+ sched_tdcnt++;
td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
td->td_flags &= ~TDF_NEEDRESCHED;
@@ -658,9 +659,12 @@ sched_switch(struct thread *td)
/*
* At the last moment, if this thread is still marked RUNNING,
* then put it back on the run queue as it has not been suspended
- * or stopped or any thing else similar.
+ * or stopped or any thing else similar. We never put the idle
+ * threads on the run queue, however.
*/
- if (TD_IS_RUNNING(td)) {
+ if (td == PCPU_GET(idlethread))
+ TD_SET_CAN_RUN(td);
+ else if (TD_IS_RUNNING(td)) {
/* Put us back on the run queue (kse and all). */
setrunqueue(td);
} else if (p->p_flag & P_SA) {
@@ -671,7 +675,8 @@ sched_switch(struct thread *td)
*/
kse_reassign(ke);
}
- newtd = choosethread();
+ if (newtd == NULL)
+ newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
sched_lock.mtx_lock = (uintptr_t)td;
@@ -830,7 +835,7 @@ sched_bind(struct thread *td, int cpu)
ke->ke_state = KES_THREAD;
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
#endif
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 874dbd1..6340b71 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -788,7 +788,7 @@ kseq_choose(struct kseq *kseq)
ke = runq_choose(kseq->ksq_curr);
if (ke == NULL) {
/*
- * We already swaped once and didn't get anywhere.
+ * We already swapped once and didn't get anywhere.
*/
if (swap)
break;
@@ -1128,9 +1128,8 @@ sched_prio(struct thread *td, u_char prio)
}
void
-sched_switch(struct thread *td)
+sched_switch(struct thread *td, struct thread *newtd)
{
- struct thread *newtd;
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
@@ -1147,7 +1146,9 @@ sched_switch(struct thread *td)
* to the new cpu. This is the case in sched_bind().
*/
if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
- if (TD_IS_RUNNING(td)) {
+ if (td == PCPU_GET(idlethread))
+ TD_SET_CAN_RUN(td);
+ else if (TD_IS_RUNNING(td)) {
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
setrunqueue(td);
} else {
@@ -1163,7 +1164,10 @@ sched_switch(struct thread *td)
kse_reassign(ke);
}
}
- newtd = choosethread();
+ if (newtd == NULL)
+ newtd = choosethread();
+ else
+ kseq_load_add(KSEQ_SELF(), newtd->td_kse);
if (td != newtd)
cpu_switch(td, newtd);
sched_lock.mtx_lock = (uintptr_t)td;
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index 79bbf2c..a7a07d5 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -409,7 +409,7 @@ sleepq_switch(void *wchan)
sched_sleep(td);
TD_SET_SLEEPING(td);
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
@@ -449,7 +449,7 @@ sleepq_check_timeout(void)
else if (callout_stop(&td->td_slpcallout) == 0) {
td->td_flags |= TDF_TIMEOUT;
TD_SET_SLEEPING(td);
- mi_switch(SW_INVOL);
+ mi_switch(SW_INVOL, NULL);
}
return (0);
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 11550c0..b7b461e 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -242,7 +242,7 @@ ast(struct trapframe *framep)
#endif
mtx_lock_spin(&sched_lock);
sched_prio(td, kg->kg_user_pri);
- mi_switch(SW_INVOL);
+ mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index cae7e9a..c983379 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -559,7 +559,7 @@ turnstile_wait(struct turnstile *ts, struct lock_object *lock,
CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td,
lock, lock->lo_name);
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
if (LOCK_LOG_TEST(lock, 0))
CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s",
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 46b5f5d..6e268b2 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -675,6 +675,16 @@ struct proc {
#ifdef _KERNEL
+/* Flags for mi_switch(). */
+#define SW_VOL 0x0001 /* Voluntary switch. */
+#define SW_INVOL 0x0002 /* Involuntary switch. */
+
+/* How values for thread_single(). */
+#define SINGLE_NO_EXIT 0
+#define SINGLE_EXIT 1
+
+/* XXXKSE: Missing values for thread_signal_check(). */
+
#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_PARGS);
MALLOC_DECLARE(M_PGRP);
@@ -840,10 +850,7 @@ void fork_exit(void (*)(void *, struct trapframe *), void *,
void fork_return(struct thread *, struct trapframe *);
int inferior(struct proc *p);
int leavepgrp(struct proc *p);
-void mi_switch(int flags);
-/* Flags for mi_switch(). */
-#define SW_VOL 0x0001 /* Voluntary switch. */
-#define SW_INVOL 0x0002 /* Involuntary switch. */
+void mi_switch(int flags, struct thread *newtd);
int p_candebug(struct thread *td, struct proc *p);
int p_cansee(struct thread *td, struct proc *p);
int p_cansched(struct thread *td, struct proc *p);
@@ -906,8 +913,6 @@ void thread_link(struct thread *td, struct ksegrp *kg);
void thread_reap(void);
struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
int thread_single(int how);
-#define SINGLE_NO_EXIT 0 /* values for 'how' */
-#define SINGLE_EXIT 1
void thread_single_end(void);
void thread_stash(struct thread *td);
int thread_suspend_check(int how);
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 8a7e84e..461842a 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -66,7 +66,7 @@ void sched_fork_thread(struct thread *td, struct thread *child);
fixpt_t sched_pctcpu(struct thread *td);
void sched_prio(struct thread *td, u_char prio);
void sched_sleep(struct thread *td);
-void sched_switch(struct thread *td);
+void sched_switch(struct thread *td, struct thread *newtd);
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 06338ff..71b7a71 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -153,7 +153,7 @@ vm_pagezero(void __unused *arg)
pages += vm_page_zero_idle();
if (pages > idlezero_maxrun || sched_runnable()) {
mtx_lock_spin(&sched_lock);
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
pages = 0;
}
OpenPOWER on IntegriCloud