From bc31b141bba6473797641dcd39c7109a6c262fe4 Mon Sep 17 00:00:00 2001 From: jeff Date: Tue, 12 Jun 2007 07:47:09 +0000 Subject: - Move some common code out of sched_fork_exit() and back into fork_exit(). --- sys/kern/kern_fork.c | 12 ++++++++++++ sys/kern/sched_4bsd.c | 19 ++++--------------- sys/kern/sched_ule.c | 19 ++++--------------- 3 files changed, 20 insertions(+), 30 deletions(-) (limited to 'sys') diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index f20cefe..c0e3204 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -768,6 +768,7 @@ fork_exit(callout, arg, frame) { struct proc *p; struct thread *td; + struct thread *dtd; td = curthread; p = td->td_proc; @@ -778,6 +779,17 @@ fork_exit(callout, arg, frame) sched_fork_exit(td); /* + * Processes normally resume in mi_switch() after being + * cpu_switch()'ed to, but when children start up they arrive here + * instead, so we must do much the same things as mi_switch() would. + */ + if ((dtd = PCPU_GET(deadthread))) { + PCPU_SET(deadthread, NULL); + thread_stash(dtd); + } + thread_unlock(td); + + /* * cpu_set_fork_handler intercepts this function call to * have this call a non-return function to stay in kernel mode. * initproc has its own fork handler, but it does return. diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index a4b1e08..7e368bb 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1410,27 +1410,16 @@ sched_throw(struct thread *td) } void -sched_fork_exit(struct thread *ctd) +sched_fork_exit(struct thread *td) { - struct thread *td; /* * Finish setting up thread glue so that it begins execution in a * non-nested critical section with sched_lock held but not recursed. */ - ctd->td_oncpu = PCPU_GET(cpuid); - sched_lock.mtx_lock = (uintptr_t)ctd; - THREAD_LOCK_ASSERT(ctd, MA_OWNED | MA_NOTRECURSED); - /* - * Processes normally resume in mi_switch() after being - * cpu_switch()'ed to, but when children start up they arrive here - * instead, so we must do much the same things as mi_switch() would. - */ - if ((td = PCPU_GET(deadthread))) { - PCPU_SET(deadthread, NULL); - thread_stash(td); - } - thread_unlock(ctd); + td->td_oncpu = PCPU_GET(cpuid); + sched_lock.mtx_lock = (uintptr_t)td; + THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); } #define KERN_SWITCH_INCLUDE 1 diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 83d6833..904b155 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -2145,27 +2145,16 @@ sched_throw(struct thread *td) } void -sched_fork_exit(struct thread *ctd) +sched_fork_exit(struct thread *td) { - struct thread *td; /* * Finish setting up thread glue so that it begins execution in a * non-nested critical section with sched_lock held but not recursed. */ - ctd->td_oncpu = PCPU_GET(cpuid); - sched_lock.mtx_lock = (uintptr_t)ctd; - THREAD_LOCK_ASSERT(ctd, MA_OWNED | MA_NOTRECURSED); - /* - * Processes normally resume in mi_switch() after being - * cpu_switch()'ed to, but when children start up they arrive here - * instead, so we must do much the same things as mi_switch() would. - */ - if ((td = PCPU_GET(deadthread))) { - PCPU_SET(deadthread, NULL); - thread_stash(td); - } - thread_unlock(ctd); + td->td_oncpu = PCPU_GET(cpuid); + sched_lock.mtx_lock = (uintptr_t)td; + THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); } static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); -- cgit v1.1