summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-10-16 08:53:46 +0000
committerjeff <jeff@FreeBSD.org>2003-10-16 08:53:46 +0000
commit4aea3a9433e80a27ebfdc96bbf34edc628c9749b (patch)
tree58676b73754d9322936b32b75441e5fb2dec634e
parent991febf6dd83fc12812470ce7a43503ff2b86f2c (diff)
downloadFreeBSD-src-4aea3a9433e80a27ebfdc96bbf34edc628c9749b.zip
FreeBSD-src-4aea3a9433e80a27ebfdc96bbf34edc628c9749b.tar.gz
- Collapse sched_switchin() and sched_switchout() into sched_switch(). Now
mi_switch() calls sched_switch() which calls cpu_switch(). This is actually one less function call than it had been.
-rw-r--r--sys/kern/kern_synch.c13
-rw-r--r--sys/kern/sched_4bsd.c19
-rw-r--r--sys/kern/sched_ule.c17
-rw-r--r--sys/sys/sched.h3
4 files changed, 21 insertions, 31 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 22dd49b..1b05640 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -457,9 +457,7 @@ mi_switch(void)
{
struct bintime new_switchtime;
struct thread *td;
- struct thread *newtd;
struct proc *p;
- u_int sched_nest;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
td = curthread; /* XXX */
@@ -510,18 +508,9 @@ mi_switch(void)
PCPU_SET(switchtime, new_switchtime);
CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
- sched_nest = sched_lock.mtx_recurse;
if (td->td_proc->p_flag & P_SA)
thread_switchout(td);
- sched_switchout(td);
-
- newtd = choosethread();
- if (td != newtd)
- cpu_switch(td, newtd); /* SHAZAM!! */
-
- sched_lock.mtx_recurse = sched_nest;
- sched_lock.mtx_lock = (uintptr_t)td;
- sched_switchin(td);
+ sched_switch(td);
/*
* Start setting up stats etc. for the incoming thread.
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 7f11c4b..542dce4 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -564,16 +564,10 @@ sched_sleep(struct thread *td, u_char prio)
}
void
-sched_switchin(struct thread *td)
-{
-
- mtx_assert(&sched_lock, MA_OWNED);
- td->td_oncpu = PCPU_GET(cpuid);
-}
-
-void
-sched_switchout(struct thread *td)
+sched_switch(struct thread *td)
{
+ struct thread *newtd;
+ u_long sched_nest;
struct kse *ke;
struct proc *p;
@@ -603,6 +597,13 @@ sched_switchout(struct thread *td)
*/
kse_reassign(ke);
}
+ sched_nest = sched_lock.mtx_recurse;
+ newtd = choosethread();
+ if (td != newtd)
+ cpu_switch(td, newtd);
+ sched_lock.mtx_recurse = sched_nest;
+ sched_lock.mtx_lock = (uintptr_t)td;
+ td->td_oncpu = PCPU_GET(cpuid);
}
void
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 71646b6..1045122 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -785,8 +785,10 @@ sched_prio(struct thread *td, u_char prio)
}
void
-sched_switchout(struct thread *td)
+sched_switch(struct thread *td)
{
+ struct thread *newtd;
+ u_int sched_nest;
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
@@ -823,13 +825,12 @@ sched_switchout(struct thread *td)
*/
if (td->td_proc->p_flag & P_SA)
kse_reassign(ke);
-}
-
-void
-sched_switchin(struct thread *td)
-{
- /* struct kse *ke = td->td_kse; */
- mtx_assert(&sched_lock, MA_OWNED);
+ sched_nest = sched_lock.mtx_recurse;
+ newtd = choosethread();
+ if (td != newtd)
+ cpu_switch(td, newtd);
+ sched_lock.mtx_recurse = sched_nest;
+ sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
}
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 7fc9df2..cd804cc 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -58,8 +58,7 @@ void sched_exit_thread(struct thread *td, struct thread *child);
void sched_fork_thread(struct thread *td, struct thread *child);
void sched_prio(struct thread *td, u_char prio);
void sched_sleep(struct thread *td, u_char prio);
-void sched_switchin(struct thread *td);
-void sched_switchout(struct thread *td);
+void sched_switch(struct thread *td);
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);
OpenPOWER on IntegriCloud