summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_fork.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-04 23:53:34 +0000
committerjeff <jeff@FreeBSD.org>2007-06-04 23:53:34 +0000
commit2fc40334862dcaac9472a7183f8c89b9346b2f68 (patch)
treec7f2d3522844cbb359db81a0857083b714c9cea3 /sys/kern/kern_fork.c
parentd72d9125823ece5b24b1632ae0a8f55cd40acacb (diff)
downloadFreeBSD-src-2fc40334862dcaac9472a7183f8c89b9346b2f68.zip
FreeBSD-src-2fc40334862dcaac9472a7183f8c89b9346b2f68.tar.gz
Commit 6/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling sychronization. - Use the per-process spinlock rather than the sched_lock for per-process scheduling synchronization. - Replace the tail-end of fork_exit() with a scheduler specific routine which can do the appropriate lock manipulations. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/kern/kern_fork.c')
-rw-r--r--sys/kern/kern_fork.c44
1 files changed, 14 insertions, 30 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 8fa8ce2..ae3531c 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -407,8 +407,15 @@ again:
lastpid = trypid;
p2 = newproc;
+ td2 = FIRST_THREAD_IN_PROC(newproc);
p2->p_state = PRS_NEW; /* protect against others */
p2->p_pid = trypid;
+ /*
+ * Allow the scheduler to initialize the child.
+ */
+ thread_lock(td);
+ sched_fork(td, td2);
+ thread_unlock(td);
AUDIT_ARG(pid, p2->p_pid);
LIST_INSERT_HEAD(&allproc, p2, p_list);
LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
@@ -476,8 +483,6 @@ again:
* Start by zeroing the section of proc that is zero-initialized,
* then copy the section that is copied directly from the parent.
*/
- td2 = FIRST_THREAD_IN_PROC(p2);
-
/* Allocate and switch to an alternate kstack if specified. */
if (pages != 0)
vm_thread_new_altkstack(td2, pages);
@@ -501,15 +506,9 @@ again:
p2->p_flag = 0;
if (p1->p_flag & P_PROFIL)
startprofclock(p2);
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p2);
p2->p_sflag = PS_INMEM;
- /*
- * Allow the scheduler to adjust the priority of the child and
- * parent while we hold the sched_lock.
- */
- sched_fork(td, td2);
-
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p2);
td2->td_ucred = crhold(p2->p_ucred);
#ifdef AUDIT
audit_proc_fork(p1, p2);
@@ -693,18 +692,20 @@ again:
* Set the child start time and mark the process as being complete.
*/
microuptime(&p2->p_stats->p_start);
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p2);
p2->p_state = PRS_NORMAL;
+ PROC_SUNLOCK(p2);
/*
* If RFSTOPPED not requested, make child runnable and add to
* run queue.
*/
if ((flags & RFSTOPPED) == 0) {
+ thread_lock(td2);
TD_SET_CAN_RUN(td2);
sched_add(td2, SRQ_BORING);
+ thread_unlock(td2);
}
- mtx_unlock_spin(&sched_lock);
/*
* Now can be swapped.
@@ -778,31 +779,14 @@ fork_exit(callout, arg, frame)
struct proc *p;
struct thread *td;
- /*
- * Finish setting up thread glue so that it begins execution in a
- * non-nested critical section with sched_lock held but not recursed.
- */
td = curthread;
p = td->td_proc;
- td->td_oncpu = PCPU_GET(cpuid);
KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
- sched_lock.mtx_lock = (uintptr_t)td;
- mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
td, td->td_sched, p->p_pid, p->p_comm);
- /*
- * Processes normally resume in mi_switch() after being
- * cpu_switch()'ed to, but when children start up they arrive here
- * instead, so we must do much the same things as mi_switch() would.
- */
- if ((td = PCPU_GET(deadthread))) {
- PCPU_SET(deadthread, NULL);
- thread_stash(td);
- }
- mtx_unlock_spin(&sched_lock);
-
+ sched_fork_exit(td);
/*
* cpu_set_fork_handler intercepts this function call to
* have this call a non-return function to stay in kernel mode.
OpenPOWER on IntegriCloud