summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_fork.c2
-rw-r--r--sys/kern/sched_4bsd.c3
-rw-r--r--sys/kern/sched_ule.c3
3 files changed, 1 insertions, 7 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 5330dcd..94b2553 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -778,7 +778,7 @@ fork_exit(callout, arg, frame)
* non-nested critical section with sched_lock held but not recursed.
*/
sched_lock.mtx_lock = (uintptr_t)td;
- sched_lock.mtx_recurse = 0;
+ mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
cpu_critical_fork_exit();
CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 6480b49..1a23157 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -567,7 +567,6 @@ void
sched_switch(struct thread *td)
{
struct thread *newtd;
- u_long sched_nest;
struct kse *ke;
struct proc *p;
@@ -597,11 +596,9 @@ sched_switch(struct thread *td)
*/
kse_reassign(ke);
}
- sched_nest = sched_lock.mtx_recurse;
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
- sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index a41d712..c8c4618 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -806,7 +806,6 @@ void
sched_switch(struct thread *td)
{
struct thread *newtd;
- u_int sched_nest;
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
@@ -847,11 +846,9 @@ sched_switch(struct thread *td)
if (td->td_proc->p_flag & P_SA)
kse_reassign(ke);
}
- sched_nest = sched_lock.mtx_recurse;
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
- sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
OpenPOWER on IntegriCloud