summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorups <ups@FreeBSD.org>2005-04-08 03:37:53 +0000
committerups <ups@FreeBSD.org>2005-04-08 03:37:53 +0000
commit7bac02c1465f16f80ca4f8673c20be951b6922a0 (patch)
tree1dd51e535e839033d7b1c0ba6104fb1725aa3705
parent75973f60359658f053d14347abf7b60eaf3e0122 (diff)
downloadFreeBSD-src-7bac02c1465f16f80ca4f8673c20be951b6922a0.zip
FreeBSD-src-7bac02c1465f16f80ca4f8673c20be951b6922a0.tar.gz
Sprinkle some volatile magic and rearrange things a bit to avoid race
conditions in critical_exit now that it no longer blocks interrupts. Reviewed by: jhb
-rw-r--r--sys/kern/kern_switch.c16
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/sched_4bsd.c2
-rw-r--r--sys/kern/sched_ule.c2
-rw-r--r--sys/sys/proc.h6
5 files changed, 18 insertions, 10 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 707a7f9..421cc31 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -357,7 +357,7 @@ maybe_preempt_in_ksegrp(struct thread *td)
return;
#ifdef PREEMPTION
if (running_thread->td_critnest > 1)
- running_thread->td_pflags |= TDP_OWEPREEMPT;
+ running_thread->td_owepreempt = 1;
else
mi_switch(SW_INVOL, NULL);
@@ -446,7 +446,7 @@ maybe_preempt_in_ksegrp(struct thread *td)
return;
#ifdef PREEMPTION
if (running_thread->td_critnest > 1)
- running_thread->td_pflags |= TDP_OWEPREEMPT;
+ running_thread->td_owepreempt = 1;
else
mi_switch(SW_INVOL, NULL);
@@ -598,15 +598,21 @@ critical_exit(void)
td->td_pflags &= ~TDP_WAKEPROC0;
wakeup(&proc0);
}
+
+ td->td_critnest = 0;
+
#ifdef PREEMPTION
mtx_assert(&sched_lock, MA_NOTOWNED);
- if (td->td_pflags & TDP_OWEPREEMPT) {
+ if (td->td_owepreempt) {
+ td->td_critnest = 1;
mtx_lock_spin(&sched_lock);
+ td->td_critnest--;
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
}
+
#endif
- td->td_critnest = 0;
+
} else {
td->td_critnest--;
}
@@ -672,7 +678,7 @@ maybe_preempt(struct thread *td)
if (ctd->td_critnest > 1) {
CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
ctd->td_critnest);
- ctd->td_pflags |= TDP_OWEPREEMPT;
+ ctd->td_owepreempt = 1;
return (0);
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index d86f570..001cf2a 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -286,7 +286,7 @@ mi_switch(int flags, struct thread *newtd)
mtx_assert(&Giant, MA_NOTOWNED);
#endif
KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
- (td->td_pflags & TDP_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 &&
+ (td->td_owepreempt) && (flags & SW_INVOL) != 0 &&
newtd == NULL) || panicstr,
("mi_switch: switch in a critical section"));
KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index fe90fee..f8a5c4f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -911,7 +911,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
td->td_lastcpu = td->td_oncpu;
td->td_flags &= ~TDF_NEEDRESCHED;
- td->td_pflags &= ~TDP_OWEPREEMPT;
+ td->td_owepreempt = 0;
td->td_oncpu = NOCPU;
/*
* At the last moment, if this thread is still marked RUNNING,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 44c7682..850f07e 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1346,7 +1346,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
td->td_flags &= ~TDF_NEEDRESCHED;
- td->td_pflags &= ~TDP_OWEPREEMPT;
+ td->td_owepreempt = 0;
/*
* If the KSE has been assigned it may be in the process of switching
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 5396f54..1af6b71 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -134,6 +134,7 @@ struct pargs {
* i - by curproc or the master session mtx
* j - locked by sched_lock mtx
* k - only accessed by curthread
+ * k*- only accessed by curthread and from an interrupt
* l - the attaching proc or attaching proc parent
* m - Giant
* n - not locked, lazy
@@ -263,6 +264,7 @@ struct thread {
const char *td_wmesg; /* (j) Reason for sleep. */
u_char td_lastcpu; /* (j) Last cpu we were on. */
u_char td_oncpu; /* (j) Which cpu we are on. */
+ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
short td_locks; /* (k) DEBUG: lockmgr count of locks. */
struct turnstile *td_blocked; /* (j) Lock process is blocked on. */
struct ithd *td_ithd; /* (b) For interrupt threads only. */
@@ -317,7 +319,7 @@ struct thread {
struct vm_object *td_altkstack_obj;/* (a) Alternate kstack object. */
vm_offset_t td_altkstack; /* (a) Kernel VA of alternate kstack. */
int td_altkstack_pages; /* (a) Size of alternate kstack. */
- u_int td_critnest; /* (k) Critical section nest level. */
+ volatile u_int td_critnest; /* (k*) Critical section nest level. */
struct mdthread td_md; /* (k) Any machine-dependent fields. */
struct td_sched *td_sched; /* (*) Scheduler-specific data. */
};
@@ -366,7 +368,7 @@ struct thread {
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
#define TDP_SA 0x00000080 /* A scheduler activation based thread. */
-#define TDP_OWEPREEMPT 0x00000100 /* Thread has a pending preemption. */
+#define TDP_UNUSED8 0x00000100 /* --available -- */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_WAKEPROC0 0x00000400 /* Wants caller to wakeup(&proc0) */
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
OpenPOWER on IntegriCloud