summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_switch.c16
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/sched_4bsd.c2
-rw-r--r--sys/kern/sched_ule.c2
4 files changed, 14 insertions, 8 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 707a7f9..421cc31 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -357,7 +357,7 @@ maybe_preempt_in_ksegrp(struct thread *td)
return;
#ifdef PREEMPTION
if (running_thread->td_critnest > 1)
- running_thread->td_pflags |= TDP_OWEPREEMPT;
+ running_thread->td_owepreempt = 1;
else
mi_switch(SW_INVOL, NULL);
@@ -446,7 +446,7 @@ maybe_preempt_in_ksegrp(struct thread *td)
return;
#ifdef PREEMPTION
if (running_thread->td_critnest > 1)
- running_thread->td_pflags |= TDP_OWEPREEMPT;
+ running_thread->td_owepreempt = 1;
else
mi_switch(SW_INVOL, NULL);
@@ -598,15 +598,21 @@ critical_exit(void)
td->td_pflags &= ~TDP_WAKEPROC0;
wakeup(&proc0);
}
+
+ td->td_critnest = 0;
+
#ifdef PREEMPTION
mtx_assert(&sched_lock, MA_NOTOWNED);
- if (td->td_pflags & TDP_OWEPREEMPT) {
+ if (td->td_owepreempt) {
+ td->td_critnest = 1;
mtx_lock_spin(&sched_lock);
+ td->td_critnest--;
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
}
+
#endif
- td->td_critnest = 0;
+
} else {
td->td_critnest--;
}
@@ -672,7 +678,7 @@ maybe_preempt(struct thread *td)
if (ctd->td_critnest > 1) {
CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
ctd->td_critnest);
- ctd->td_pflags |= TDP_OWEPREEMPT;
+ ctd->td_owepreempt = 1;
return (0);
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index d86f570..001cf2a 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -286,7 +286,7 @@ mi_switch(int flags, struct thread *newtd)
mtx_assert(&Giant, MA_NOTOWNED);
#endif
KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
- (td->td_pflags & TDP_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 &&
+ (td->td_owepreempt) && (flags & SW_INVOL) != 0 &&
newtd == NULL) || panicstr,
("mi_switch: switch in a critical section"));
KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index fe90fee..f8a5c4f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -911,7 +911,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
td->td_lastcpu = td->td_oncpu;
td->td_flags &= ~TDF_NEEDRESCHED;
- td->td_pflags &= ~TDP_OWEPREEMPT;
+ td->td_owepreempt = 0;
td->td_oncpu = NOCPU;
/*
* At the last moment, if this thread is still marked RUNNING,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 44c7682..850f07e 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1346,7 +1346,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
td->td_flags &= ~TDF_NEEDRESCHED;
- td->td_pflags &= ~TDP_OWEPREEMPT;
+ td->td_owepreempt = 0;
/*
* If the KSE has been assigned it may be in the process of switching
OpenPOWER on IntegriCloud