summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorbde <bde@FreeBSD.org>2003-10-29 14:40:41 +0000
committerbde <bde@FreeBSD.org>2003-10-29 14:40:41 +0000
commit6bce6afbe7fdd1d2d6f8d6d50002cc47fdfa3b96 (patch)
treeecbcad096e75390f5b59a0145e94d46e33b45610 /sys/kern
parent86956669e331575dc91197028b2d219b1919b54f (diff)
downloadFreeBSD-src-6bce6afbe7fdd1d2d6f8d6d50002cc47fdfa3b96.zip
FreeBSD-src-6bce6afbe7fdd1d2d6f8d6d50002cc47fdfa3b96.tar.gz
Removed sched_nest variable in sched_switch(). Context switches always
begin with sched_lock held but not recursed, so this variable was always 0. Removed fixup of sched_lock.mtx_recurse after context switches in sched_switch(). Context switches always end with this variable in the same state that it began in, so there is no need to fix it up. Only sched_lock.mtx_lock really needs a fixup. Replaced fixup of sched_lock.mtx_recurse in fork_exit() by an assertion that sched_lock is owned and not recursed after it is fixed up. This assertion much match the one in mi_switch(), and if sched_lock were recursed then a non-null fixup of sched_lock.mtx_recurse would probably be needed again, unlike in sched_switch(), since fork_exit() doesn't return to its caller in the normal way.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_fork.c2
-rw-r--r--sys/kern/sched_4bsd.c3
-rw-r--r--sys/kern/sched_ule.c3
3 files changed, 1 insertions, 7 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 5330dcd..94b2553 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -778,7 +778,7 @@ fork_exit(callout, arg, frame)
* non-nested critical section with sched_lock held but not recursed.
*/
sched_lock.mtx_lock = (uintptr_t)td;
- sched_lock.mtx_recurse = 0;
+ mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
cpu_critical_fork_exit();
CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 6480b49..1a23157 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -567,7 +567,6 @@ void
sched_switch(struct thread *td)
{
struct thread *newtd;
- u_long sched_nest;
struct kse *ke;
struct proc *p;
@@ -597,11 +596,9 @@ sched_switch(struct thread *td)
*/
kse_reassign(ke);
}
- sched_nest = sched_lock.mtx_recurse;
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
- sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index a41d712..c8c4618 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -806,7 +806,6 @@ void
sched_switch(struct thread *td)
{
struct thread *newtd;
- u_int sched_nest;
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
@@ -847,11 +846,9 @@ sched_switch(struct thread *td)
if (td->td_proc->p_flag & P_SA)
kse_reassign(ke);
}
- sched_nest = sched_lock.mtx_recurse;
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
- sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
OpenPOWER on IntegriCloud