summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-12-18 00:27:18 +0000
committerjhb <jhb@FreeBSD.org>2001-12-18 00:27:18 +0000
commita3b98398cbfb4b809f8577b6a95aabb2c30a1aeb (patch)
treebd1f842c61588e8478e798dece6dff8b2be41310 /sys/amd64/amd64
parent090c933e94e7345e9c9e9a9fbe29ea6c8397a662 (diff)
downloadFreeBSD-src-a3b98398cbfb4b809f8577b6a95aabb2c30a1aeb.zip
FreeBSD-src-a3b98398cbfb4b809f8577b6a95aabb2c30a1aeb.tar.gz
Modify the critical section API as follows:
- The MD functions critical_enter/exit are renamed to start with a cpu_ prefix. - MI wrapper functions critical_enter/exit maintain a per-thread nesting count and a per-thread critical section saved state set when entering a critical section while at nesting level 0 and restored when exiting to nesting level 0. This moves the saved state out of spin mutexes so that interlocking spin mutexes works properly. - Most low-level MD code that used critical_enter/exit now use cpu_critical_enter/exit. MI code such as device drivers and spin mutexes use the MI wrappers. Note that since the MI wrappers store the state in the current thread, they do not have any return values or arguments. - mtx_intr_enable() is replaced with a constant CRITICAL_FORK which is assigned to curthread->td_savecrit during fork_exit(). Tested on: i386, alpha
Diffstat (limited to 'sys/amd64/amd64')
-rw-r--r--sys/amd64/amd64/db_interface.c4
-rw-r--r--sys/amd64/amd64/fpu.c16
-rw-r--r--sys/amd64/amd64/genassym.c1
-rw-r--r--sys/amd64/amd64/initcpu.c4
-rw-r--r--sys/amd64/amd64/sys_machdep.c4
-rw-r--r--sys/amd64/amd64/vm_machdep.c4
6 files changed, 16 insertions, 17 deletions
diff --git a/sys/amd64/amd64/db_interface.c b/sys/amd64/amd64/db_interface.c
index 9708cad..000085b 100644
--- a/sys/amd64/amd64/db_interface.c
+++ b/sys/amd64/amd64/db_interface.c
@@ -324,10 +324,10 @@ Debugger(msg)
return;
if (atomic_cmpset_acq_int(&in_Debugger, 0, 1)) {
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
db_printf("Debugger(\"%s\")\n", msg);
breakpoint();
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
atomic_store_rel_int(&in_Debugger, 0);
}
}
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index ff8c4cf..34a822a 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -515,7 +515,7 @@ npxinit(control)
* fnsave to throw away any junk in the fpu. npxsave() initializes
* the fpu and sets fpcurthread = NULL as important side effects.
*/
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
npxsave(&dummy);
stop_emulating();
#ifdef CPU_ENABLE_SSE
@@ -527,7 +527,7 @@ npxinit(control)
if (PCPU_GET(curpcb) != NULL)
fpusave(&PCPU_GET(curpcb)->pcb_save);
start_emulating();
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
}
/*
@@ -539,10 +539,10 @@ npxexit(td)
{
critical_t savecrit;
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
if (td == PCPU_GET(fpcurthread))
npxsave(&PCPU_GET(curpcb)->pcb_save);
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
#ifdef NPX_DEBUG
if (npx_exists) {
u_int masked_exceptions;
@@ -762,7 +762,7 @@ npxtrap()
PCPU_GET(fpcurthread), curthread, npx_exists);
panic("npxtrap from nowhere");
}
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
/*
* Interrupt handling (for another interrupt) may have pushed the
@@ -783,7 +783,7 @@ npxtrap()
GET_FPU_SW(curthread) &= ~0x80bf;
else
fnclex();
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
return (fpetable[status & ((~control & 0x3f) | 0x40)]);
}
@@ -807,7 +807,7 @@ npxdna()
PCPU_GET(fpcurthread), curthread);
panic("npxdna");
}
- s = critical_enter();
+ s = cpu_critical_enter();
stop_emulating();
/*
* Record new context early in case frstor causes an IRQ13.
@@ -829,7 +829,7 @@ npxdna()
* first FPU instruction after a context switch.
*/
fpurstor(&PCPU_GET(curpcb)->pcb_save);
- critical_exit(s);
+ cpu_critical_exit(s);
return (1);
}
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index 59f3147..99fa502 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -205,7 +205,6 @@ ASSYM(VM86_FRAMESIZE, sizeof(struct vm86frame));
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
ASSYM(MTX_RECURSECNT, offsetof(struct mtx, mtx_recurse));
-ASSYM(MTX_SAVECRIT, offsetof(struct mtx, mtx_savecrit));
#ifdef PC98
#include <machine/bus.h>
diff --git a/sys/amd64/amd64/initcpu.c b/sys/amd64/amd64/initcpu.c
index b32c786..8a9a2a5 100644
--- a/sys/amd64/amd64/initcpu.c
+++ b/sys/amd64/amd64/initcpu.c
@@ -646,7 +646,7 @@ enable_K5_wt_alloc(void)
* a stepping of 4 or greater.
*/
if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
msr = rdmsr(0x83); /* HWCR */
wrmsr(0x83, msr & !(0x10));
@@ -678,7 +678,7 @@ enable_K5_wt_alloc(void)
msr=rdmsr(0x83);
wrmsr(0x83, msr|0x10); /* enable write allocate */
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
}
}
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index a88acc9..b7beea9 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -532,13 +532,13 @@ i386_set_ldt(td, args)
}
/* Fill in range */
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
error = copyin(uap->descs,
&((union descriptor *)(pldt->ldt_base))[uap->start],
uap->num * sizeof(union descriptor));
if (!error)
td->td_retval[0] = uap->start;
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
return(error);
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 322b5e7..4f1dab2 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -153,10 +153,10 @@ cpu_fork(td1, p2, flags)
#ifdef DEV_NPX
if (td1 == curthread)
td1->td_pcb->pcb_gs = rgs();
- savecrit = critical_enter();
+ savecrit = cpu_critical_enter();
if (PCPU_GET(fpcurthread) == td1)
npxsave(&td1->td_pcb->pcb_save);
- critical_exit(savecrit);
+ cpu_critical_exit(savecrit);
#endif
/* Point the pcb to the top of the stack */
OpenPOWER on IntegriCloud