summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-12-18 00:27:18 +0000
committerjhb <jhb@FreeBSD.org>2001-12-18 00:27:18 +0000
commita3b98398cbfb4b809f8577b6a95aabb2c30a1aeb (patch)
treebd1f842c61588e8478e798dece6dff8b2be41310 /sys/kern
parent090c933e94e7345e9c9e9a9fbe29ea6c8397a662 (diff)
downloadFreeBSD-src-a3b98398cbfb4b809f8577b6a95aabb2c30a1aeb.zip
FreeBSD-src-a3b98398cbfb4b809f8577b6a95aabb2c30a1aeb.tar.gz
Modify the critical section API as follows:
- The MD functions critical_enter/exit are renamed to start with a cpu_ prefix. - MI wrapper functions critical_enter/exit maintain a per-thread nesting count and a per-thread critical section saved state set when entering a critical section while at nesting level 0 and restored when exiting to nesting level 0. This moves the saved state out of spin mutexes so that interlocking spin mutexes works properly. - Most low-level MD code that used critical_enter/exit now use cpu_critical_enter/exit. MI code such as device drivers and spin mutexes use the MI wrappers. Note that since the MI wrappers store the state in the current thread, they do not have any return values or arguments. - mtx_intr_enable() is replaced with a constant CRITICAL_FORK which is assigned to curthread->td_savecrit during fork_exit(). Tested on: i386, alpha
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_fork.c9
-rw-r--r--sys/kern/kern_idle.c4
-rw-r--r--sys/kern/kern_ktr.c6
-rw-r--r--sys/kern/kern_mutex.c8
-rw-r--r--sys/kern/kern_switch.c25
-rw-r--r--sys/kern/kern_synch.c3
-rw-r--r--sys/kern/subr_prof.c10
-rw-r--r--sys/kern/subr_trap.c8
-rw-r--r--sys/kern/subr_turnstile.c8
-rw-r--r--sys/kern/subr_witness.c13
10 files changed, 53 insertions, 41 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index bb52a34..bc03078 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -467,9 +467,6 @@ again:
if (p1->p_sflag & PS_PROFIL)
startprofclock(p2);
mtx_unlock_spin(&sched_lock);
- /*
- * We start off holding one spinlock after fork: sched_lock.
- */
PROC_LOCK(p1);
p2->p_ucred = crhold(p1->p_ucred);
p2->p_thread.td_ucred = crhold(p2->p_ucred); /* XXXKSE */
@@ -766,10 +763,8 @@ fork_exit(callout, arg, frame)
*/
sched_lock.mtx_lock = (uintptr_t)td;
sched_lock.mtx_recurse = 0;
- /*
- * XXX: We really shouldn't have to do this.
- */
- mtx_intr_enable(&sched_lock);
+ td->td_critnest = 1;
+ td->td_savecrit = CRITICAL_FORK;
CTR3(KTR_PROC, "fork_exit: new proc %p (pid %d, %s)", p, p->p_pid,
p->p_comm);
if (PCPU_GET(switchtime.tv_sec) == 0)
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index 92e5cb3..4dee96d 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -47,8 +47,10 @@ idle_setup(void *dummy)
error = kthread_create(idle_proc, NULL, &p,
RFSTOPPED | RFHIGHPID, "idle: cpu%d", pc->pc_cpuid);
pc->pc_idlethread = &p->p_thread;
- if (pc->pc_curthread == NULL)
+ if (pc->pc_curthread == NULL) {
pc->pc_curthread = pc->pc_idlethread;
+ pc->pc_idlethread->td_critnest = 0;
+ }
#else
error = kthread_create(idle_proc, NULL, &p,
RFSTOPPED | RFHIGHPID, "idle");
diff --git a/sys/kern/kern_ktr.c b/sys/kern/kern_ktr.c
index 309d6e3..596a724 100644
--- a/sys/kern/kern_ktr.c
+++ b/sys/kern/kern_ktr.c
@@ -133,9 +133,9 @@ ktr_tracepoint(u_int mask, const char *format, u_long arg1, u_long arg2,
td = curthread;
if (td->td_inktr)
return;
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
if (((1 << KTR_CPU) & ktr_cpumask) == 0) {
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
return;
}
td->td_inktr++;
@@ -145,7 +145,7 @@ ktr_tracepoint(u_int mask, const char *format, u_long arg1, u_long arg2,
} while (atomic_cmpset_rel_int(&ktr_idx, saveindex, newindex) == 0);
entry = &ktr_buf[saveindex];
entry->ktr_cpu = KTR_CPU;
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
nanotime(&entry->ktr_tv);
#ifdef KTR_EXTEND
entry->ktr_filename = filename;
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index d4809af..852b570 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -427,8 +427,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* is handled inline.
*/
void
-_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
- int line)
+_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
{
int i = 0;
@@ -440,7 +439,7 @@ _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
break;
/* Give interrupts a chance while we spin. */
- critical_exit(mtx_crit);
+ critical_exit();
while (m->mtx_lock != MTX_UNOWNED) {
if (i++ < 1000000)
continue;
@@ -454,10 +453,9 @@ _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
panic("spin lock %s held by %p for > 5 seconds",
m->mtx_object.lo_name, (void *)m->mtx_lock);
}
- mtx_crit = critical_enter();
+ critical_enter();
}
- m->mtx_savecrit = mtx_crit;
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 65054df..1a2afa4 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -69,6 +69,31 @@ setrunqueue(struct thread *td)
runq_add(&runq, td->td_kse);
}
+/* Critical sections that prevent preemption. */
+void
+critical_enter(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ if (td->td_critnest == 0)
+ td->td_savecrit = cpu_critical_enter();
+ td->td_critnest++;
+}
+
+void
+critical_exit(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ if (td->td_critnest == 1) {
+ td->td_critnest = 0;
+ cpu_critical_exit(td->td_savecrit);
+ } else
+ td->td_critnest--;
+}
+
/*
* Clear the status bit of the queue corresponding to priority level pri,
* indicating that it is empty.
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 7034c0a..fce470f 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -698,7 +698,6 @@ mi_switch()
#if 0
register struct rlimit *rlim;
#endif
- critical_t sched_crit;
u_int sched_nest;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
@@ -773,14 +772,12 @@ mi_switch()
PCPU_SET(switchtime, new_switchtime);
CTR3(KTR_PROC, "mi_switch: old proc %p (pid %d, %s)", p, p->p_pid,
p->p_comm);
- sched_crit = sched_lock.mtx_savecrit;
sched_nest = sched_lock.mtx_recurse;
td->td_lastcpu = td->td_kse->ke_oncpu;
td->td_kse->ke_oncpu = NOCPU;
td->td_kse->ke_flags &= ~KEF_NEEDRESCHED;
cpu_switch();
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
- sched_lock.mtx_savecrit = sched_crit;
sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
CTR3(KTR_PROC, "mi_switch: new proc %p (pid %d, %s)", p, p->p_pid,
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 67629d4..8808651 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -90,7 +90,6 @@ kmupetext(uintfptr_t nhighpc)
struct gmonparam np; /* slightly large */
struct gmonparam *p = &_gmonparam;
char *cp;
- critical_t savecrit;
GIANT_REQUIRED;
bcopy(p, &np, sizeof(*p));
@@ -127,7 +126,7 @@ kmupetext(uintfptr_t nhighpc)
np.mcount_count = &KCOUNT(&np, PC_TO_I(&np, mcount));
np.mexitcount_count = &KCOUNT(&np, PC_TO_I(&np, mexitcount));
#endif
- savecrit = critical_enter();
+ critical_enter();
bcopy(p->tos, np.tos, p->tossize);
bzero((char *)np.tos + p->tossize, np.tossize - p->tossize);
bcopy(p->kcount, np.kcount, p->kcountsize);
@@ -137,7 +136,7 @@ kmupetext(uintfptr_t nhighpc)
bzero((char *)np.froms + p->fromssize, np.fromssize - p->fromssize);
cp = (char *)p->tos;
bcopy(&np, p, sizeof(*p));
- critical_exit(savecrit);
+ critical_exit();
free(cp, M_GPROF);
}
@@ -156,7 +155,6 @@ kmstartup(dummy)
int nullfunc_loop_overhead;
int nullfunc_loop_profiled_time;
uintfptr_t tmp_addr;
- critical_t savecrit;
#endif
/*
@@ -195,7 +193,7 @@ kmstartup(dummy)
* Disable interrupts to avoid interference while we calibrate
* things.
*/
- savecrit = critical_enter();
+ critical_enter();
/*
* Determine overheads.
@@ -249,7 +247,7 @@ kmstartup(dummy)
p->state = GMON_PROF_OFF;
stopguprof(p);
- critical_exit(savecrit);
+ critical_exit();
nullfunc_loop_profiled_time = 0;
for (tmp_addr = (uintfptr_t)nullfunc_loop_profiled;
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index a899576..6f17f8f 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -141,9 +141,9 @@ ast(framep)
panic("Returning to user mode with mutex(s) held");
#endif
mtx_assert(&Giant, MA_NOTOWNED);
- s = critical_enter();
+ s = cpu_critical_enter();
while ((ke->ke_flags & (KEF_ASTPENDING | KEF_NEEDRESCHED)) != 0) {
- critical_exit(s);
+ cpu_critical_exit(s);
td->td_frame = framep;
/*
* This updates the p_sflag's for the checks below in one
@@ -195,13 +195,13 @@ ast(framep)
crfree(td->td_ucred);
mtx_unlock(&Giant);
td->td_ucred = NULL;
- s = critical_enter();
+ s = cpu_critical_enter();
}
mtx_assert(&Giant, MA_NOTOWNED);
/*
* We need to keep interrupts disabled so that if any further AST's
* come in, the interrupt they come in on will be delayed until we
* finish returning to userland. We assume that the return to userland
- * will perform the equivalent of critical_exit().
+ * will perform the equivalent of cpu_critical_exit().
*/
}
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index d4809af..852b570 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -427,8 +427,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* is handled inline.
*/
void
-_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
- int line)
+_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
{
int i = 0;
@@ -440,7 +439,7 @@ _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
break;
/* Give interrupts a chance while we spin. */
- critical_exit(mtx_crit);
+ critical_exit();
while (m->mtx_lock != MTX_UNOWNED) {
if (i++ < 1000000)
continue;
@@ -454,10 +453,9 @@ _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
panic("spin lock %s held by %p for > 5 seconds",
m->mtx_object.lo_name, (void *)m->mtx_lock);
}
- mtx_crit = critical_enter();
+ critical_enter();
}
- m->mtx_savecrit = mtx_crit;
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 454fd55..5e4dc61 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -252,7 +252,6 @@ static struct mtx all_mtx = {
{ NULL }, /* mtx_object.lo_list */
NULL }, /* mtx_object.lo_witness */
MTX_UNOWNED, 0, /* mtx_lock, mtx_recurse */
- 0, /* mtx_savecrit */
TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
{ NULL, NULL } /* mtx_contested */
};
@@ -836,7 +835,7 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
instance->li_flags--;
goto out;
}
- s = critical_enter();
+ s = cpu_critical_enter();
CTR4(KTR_WITNESS,
"%s: pid %d removed %s from lle[%d]", __func__,
td->td_proc->p_pid,
@@ -846,7 +845,7 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
for (j = i; j < (*lock_list)->ll_count; j++)
(*lock_list)->ll_children[j] =
(*lock_list)->ll_children[j + 1];
- critical_exit(s);
+ cpu_critical_exit(s);
if ((*lock_list)->ll_count == 0) {
lle = *lock_list;
*lock_list = lle->ll_next;
@@ -896,7 +895,7 @@ witness_sleep(int check_only, struct lock_object *lock, const char *file,
/*
* Preemption bad because we need PCPU_PTR(spinlocks) to not change.
*/
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
td = curthread;
lock_list = &td->td_sleeplocks;
again:
@@ -931,7 +930,7 @@ again:
if (witness_ddb && n)
Debugger(__func__);
#endif /* DDB */
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
return (n);
}
@@ -1360,9 +1359,9 @@ witness_list(struct thread *td)
* Preemption bad because we need PCPU_PTR(spinlocks) to not
* change.
*/
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
nheld += witness_list_locks(PCPU_PTR(spinlocks));
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
}
return (nheld);
}
OpenPOWER on IntegriCloud