summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-12-18 00:27:18 +0000
committerjhb <jhb@FreeBSD.org>2001-12-18 00:27:18 +0000
commita3b98398cbfb4b809f8577b6a95aabb2c30a1aeb (patch)
treebd1f842c61588e8478e798dece6dff8b2be41310 /sys/powerpc
parent090c933e94e7345e9c9e9a9fbe29ea6c8397a662 (diff)
downloadFreeBSD-src-a3b98398cbfb4b809f8577b6a95aabb2c30a1aeb.zip
FreeBSD-src-a3b98398cbfb4b809f8577b6a95aabb2c30a1aeb.tar.gz
Modify the critical section API as follows:
- The MD functions critical_enter/exit are renamed to start with a cpu_ prefix. - MI wrapper functions critical_enter/exit maintain a per-thread nesting count and a per-thread critical section saved state set when entering a critical section while at nesting level 0 and restored when exiting to nesting level 0. This moves the saved state out of spin mutexes so that interlocking spin mutexes works properly. - Most low-level MD code that used critical_enter/exit now use cpu_critical_enter/exit. MI code such as device drivers and spin mutexes use the MI wrappers. Note that since the MI wrappers store the state in the current thread, they do not have any return values or arguments. - mtx_intr_enable() is replaced with a constant CRITICAL_FORK which is assigned to curthread->td_savecrit during fork_exit(). Tested on: i386, alpha
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/include/cpufunc.h6
-rw-r--r--sys/powerpc/include/mutex.h17
-rw-r--r--sys/powerpc/powerpc/genassym.c1
3 files changed, 5 insertions, 19 deletions
diff --git a/sys/powerpc/include/cpufunc.h b/sys/powerpc/include/cpufunc.h
index 0fd0dd0..25f37d3 100644
--- a/sys/powerpc/include/cpufunc.h
+++ b/sys/powerpc/include/cpufunc.h
@@ -35,6 +35,8 @@
#include <machine/psl.h>
+#define CRITICAL_FORK (mfmsr() |= PSL_EE)
+
#ifdef __GNUC__
static __inline void
@@ -114,7 +116,7 @@ save_intr(void)
}
static __inline critical_t
-critical_enter(void)
+cpu_critical_enter(void)
{
return ((critical_t)save_intr());
@@ -128,7 +130,7 @@ restore_intr(unsigned int msr)
}
static __inline void
-critical_exit(critical_t msr)
+cpu_critical_exit(critical_t msr)
{
return (restore_intr((unsigned int)msr));
diff --git a/sys/powerpc/include/mutex.h b/sys/powerpc/include/mutex.h
index 0aeaa74..0b7fe26 100644
--- a/sys/powerpc/include/mutex.h
+++ b/sys/powerpc/include/mutex.h
@@ -32,22 +32,7 @@
#ifndef _MACHINE_MUTEX_H_
#define _MACHINE_MUTEX_H_
-#ifndef LOCORE
-
-#ifdef _KERNEL
-
-#define mtx_intr_enable(mutex) do (mutex)->mtx_savecrit |= PSL_EE; while (0)
-
-/*
- * Assembly macros (for internal use only)
- *--------------------------------------------------------------------------
- */
-
-#define _V(x) __STRING(x)
-
-#endif /* _KERNEL */
-
-#else /* !LOCORE */
+#ifdef LOCORE
/*
* Simple assembly macros to get and release non-recursive spin locks
diff --git a/sys/powerpc/powerpc/genassym.c b/sys/powerpc/powerpc/genassym.c
index 7ab6e77..c212c6b 100644
--- a/sys/powerpc/powerpc/genassym.c
+++ b/sys/powerpc/powerpc/genassym.c
@@ -66,7 +66,6 @@ ASSYM(PC_SWITCHTIME, offsetof(struct pcpu, pc_switchtime));
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
ASSYM(MTX_RECURSECNT, offsetof(struct mtx, mtx_recurse));
-ASSYM(MTX_SAVECRIT, offsetof(struct mtx, mtx_savecrit));
ASSYM(PM_KERNELSR, offsetof(struct pmap, pm_sr[KERNEL_SR]));
ASSYM(PM_USERSR, offsetof(struct pmap, pm_sr[USER_SR]));
OpenPOWER on IntegriCloud