summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-12 07:47:09 +0000
committerjeff <jeff@FreeBSD.org>2007-06-12 07:47:09 +0000
commitbc31b141bba6473797641dcd39c7109a6c262fe4 (patch)
tree036f608c444de2ce81d35f5edb89bcfce3d51790
parent60f4b707fe5fac27f8dae57235d92f743aada286 (diff)
downloadFreeBSD-src-bc31b141bba6473797641dcd39c7109a6c262fe4.zip
FreeBSD-src-bc31b141bba6473797641dcd39c7109a6c262fe4.tar.gz
- Move some common code out of sched_fork_exit() and back into fork_exit().
-rw-r--r--sys/kern/kern_fork.c12
-rw-r--r--sys/kern/sched_4bsd.c19
-rw-r--r--sys/kern/sched_ule.c19
3 files changed, 20 insertions, 30 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index f20cefe..c0e3204 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -768,6 +768,7 @@ fork_exit(callout, arg, frame)
{
struct proc *p;
struct thread *td;
+ struct thread *dtd;
td = curthread;
p = td->td_proc;
@@ -778,6 +779,17 @@ fork_exit(callout, arg, frame)
sched_fork_exit(td);
/*
+ * Processes normally resume in mi_switch() after being
+ * cpu_switch()'ed to, but when children start up they arrive here
+ * instead, so we must do much the same things as mi_switch() would.
+ */
+ if ((dtd = PCPU_GET(deadthread))) {
+ PCPU_SET(deadthread, NULL);
+ thread_stash(dtd);
+ }
+ thread_unlock(td);
+
+ /*
* cpu_set_fork_handler intercepts this function call to
* have this call a non-return function to stay in kernel mode.
* initproc has its own fork handler, but it does return.
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index a4b1e08..7e368bb 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1410,27 +1410,16 @@ sched_throw(struct thread *td)
}
void
-sched_fork_exit(struct thread *ctd)
+sched_fork_exit(struct thread *td)
{
- struct thread *td;
/*
* Finish setting up thread glue so that it begins execution in a
* non-nested critical section with sched_lock held but not recursed.
*/
- ctd->td_oncpu = PCPU_GET(cpuid);
- sched_lock.mtx_lock = (uintptr_t)ctd;
- THREAD_LOCK_ASSERT(ctd, MA_OWNED | MA_NOTRECURSED);
- /*
- * Processes normally resume in mi_switch() after being
- * cpu_switch()'ed to, but when children start up they arrive here
- * instead, so we must do much the same things as mi_switch() would.
- */
- if ((td = PCPU_GET(deadthread))) {
- PCPU_SET(deadthread, NULL);
- thread_stash(td);
- }
- thread_unlock(ctd);
+ td->td_oncpu = PCPU_GET(cpuid);
+ sched_lock.mtx_lock = (uintptr_t)td;
+ THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
}
#define KERN_SWITCH_INCLUDE 1
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 83d6833..904b155 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -2145,27 +2145,16 @@ sched_throw(struct thread *td)
}
void
-sched_fork_exit(struct thread *ctd)
+sched_fork_exit(struct thread *td)
{
- struct thread *td;
/*
* Finish setting up thread glue so that it begins execution in a
* non-nested critical section with sched_lock held but not recursed.
*/
- ctd->td_oncpu = PCPU_GET(cpuid);
- sched_lock.mtx_lock = (uintptr_t)ctd;
- THREAD_LOCK_ASSERT(ctd, MA_OWNED | MA_NOTRECURSED);
- /*
- * Processes normally resume in mi_switch() after being
- * cpu_switch()'ed to, but when children start up they arrive here
- * instead, so we must do much the same things as mi_switch() would.
- */
- if ((td = PCPU_GET(deadthread))) {
- PCPU_SET(deadthread, NULL);
- thread_stash(td);
- }
- thread_unlock(ctd);
+ td->td_oncpu = PCPU_GET(cpuid);
+ sched_lock.mtx_lock = (uintptr_t)td;
+ THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
}
static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
OpenPOWER on IntegriCloud