summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2001-01-20 04:14:25 +0000
committerjake <jake@FreeBSD.org>2001-01-20 04:14:25 +0000
commitcf5b2e3c3be9fc98dc2fe0b284bf95f26709dff4 (patch)
treeaa7d6b8edc7163761764c2dabb633a5b6915b12d /sys
parent18831b15201a666065836859262791a86f2e8166 (diff)
downloadFreeBSD-src-cf5b2e3c3be9fc98dc2fe0b284bf95f26709dff4.zip
FreeBSD-src-cf5b2e3c3be9fc98dc2fe0b284bf95f26709dff4.tar.gz
Simplify the i386 asm MTX_{ENTER,EXIT} macros to just call the
appropriate function, rather than doing a horse-and-buggy acquire. They now take the mutex type as an arg and can be used with sleep as well as spin mutexes.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/exception.S4
-rw-r--r--sys/amd64/amd64/exception.s4
-rw-r--r--sys/amd64/amd64/genassym.c7
-rw-r--r--sys/amd64/include/mutex.h118
-rw-r--r--sys/i386/i386/exception.s4
-rw-r--r--sys/i386/i386/genassym.c7
-rw-r--r--sys/i386/include/mutex.h118
-rw-r--r--sys/sys/mutex.h3
8 files changed, 40 insertions, 225 deletions
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index 99b91d0..603b4be 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -37,8 +37,8 @@
#include <machine/asmacros.h>
#include <sys/ipl.h>
+#include <sys/mutex.h>
#include <machine/lock.h>
-#include <machine/mutex.h>
#include <machine/psl.h>
#include <machine/trap.h>
#ifdef SMP
@@ -293,7 +293,7 @@ IDTVEC(int0x80_syscall)
jmp _doreti
ENTRY(fork_trampoline)
- MTX_EXIT(_sched_lock, %ecx)
+ MTX_EXIT(_sched_lock, MTX_SPIN)
sti /* XXX: we need this for kernel threads
created very early before interrupts
are enabled */
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index 99b91d0..603b4be 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -37,8 +37,8 @@
#include <machine/asmacros.h>
#include <sys/ipl.h>
+#include <sys/mutex.h>
#include <machine/lock.h>
-#include <machine/mutex.h>
#include <machine/psl.h>
#include <machine/trap.h>
#ifdef SMP
@@ -293,7 +293,7 @@ IDTVEC(int0x80_syscall)
jmp _doreti
ENTRY(fork_trampoline)
- MTX_EXIT(_sched_lock, %ecx)
+ MTX_EXIT(_sched_lock, MTX_SPIN)
sti /* XXX: we need this for kernel threads
created very early before interrupts
are enabled */
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index 82e5a54..eca44e0 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -231,10 +231,3 @@ ASSYM(VM86_FRAMESIZE, sizeof(struct vm86frame));
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
ASSYM(MTX_SAVEINTR, offsetof(struct mtx, mtx_saveintr));
-#ifdef WITNESS
-ASSYM(MTX_DEBUG, offsetof(struct mtx, mtx_debug));
-ASSYM(MTXD_WITNESS, offsetof(struct mtx_debug, mtxd_witness));
-#endif
-
-ASSYM(MTX_UNOWNED, MTX_UNOWNED);
-ASSYM(MTX_SPIN, MTX_SPIN);
diff --git a/sys/amd64/include/mutex.h b/sys/amd64/include/mutex.h
index 61951fd..0e58108 100644
--- a/sys/amd64/include/mutex.h
+++ b/sys/amd64/include/mutex.h
@@ -253,110 +253,24 @@ extern char STR_SIEN[];
#else /* !LOCORE */
/*
- * Simple assembly macros to get and release spin locks.
+ * Simple assembly macros to get and release mutexes.
*/
-#ifdef WITNESS
-#define WITNESS_ENTER(lck, reg) \
- movl lck+MTX_DEBUG,reg; \
- cmpl $0,MTXD_WITNESS(reg); \
- jz 1f; \
- pushl $0; \
- pushl $0; \
- pushl $MTX_SPIN; \
- pushl $lck; \
- call witness_enter; \
- addl $0x10,%esp; \
-1:
+#define MTX_ENTER(lck, type) \
+ pushl $0 ; /* dummy __LINE__ */ \
+ pushl $0 ; /* dummy __FILE__ */ \
+ pushl $type ; \
+ pushl $lck ; \
+ call _mtx_enter ; \
+ addl $16,%esp
+
+#define MTX_EXIT(lck, type) \
+ pushl $0 ; /* dummy __LINE__ */ \
+ pushl $0 ; /* dummy __FILE__ */ \
+ pushl $type ; \
+ pushl $lck ; \
+ call _mtx_exit ; \
+ addl $16,%esp
-#define WITNESS_EXIT(lck, reg) \
- movl lck+MTX_DEBUG,reg; \
- cmpl $0,MTXD_WITNESS(reg); \
- jz 1f; \
- pushl $0; \
- pushl $0; \
- pushl $MTX_SPIN; \
- pushl $lck; \
- call witness_exit; \
- addl $0x10,%esp; \
-1:
-
-#else
-#define WITNESS_ENTER(lck, reg)
-#define WITNESS_EXIT(lck, reg)
-#endif
-
-#if defined(I386_CPU)
-
-#define MTX_ENTER(lck, reg) \
- movl _curproc,reg; \
- pushfl; \
- cli; \
- movl reg,lck+MTX_LOCK; \
- popl lck+MTX_SAVEINTR; \
- WITNESS_ENTER(lck, reg)
-
-#define MTX_EXIT(lck, reg) \
- WITNESS_EXIT(lck, reg) \
- pushl lck+MTX_SAVEINTR; \
- movl $ MTX_UNOWNED,lck+MTX_LOCK; \
- popfl;
-
-#else /* I386_CPU */
-
-#define MTX_ENTER(lck, reg) \
- movl _curproc,reg; \
- pushfl; \
- cli; \
-9: movl $ MTX_UNOWNED,%eax; \
- MPLOCKED \
- cmpxchgl reg,lck+MTX_LOCK; \
- jnz 9b; \
- popl lck+MTX_SAVEINTR; \
- WITNESS_ENTER(lck, reg)
-
-/* Must use locked bus op (cmpxchg) when setting to unowned (barrier) */
-#define MTX_EXIT(lck, reg) \
- WITNESS_EXIT(lck, reg) \
- pushl lck+MTX_SAVEINTR; \
- movl lck+MTX_LOCK,%eax; \
- movl $ MTX_UNOWNED,reg; \
- MPLOCKED \
- cmpxchgl reg,lck+MTX_LOCK; \
- popfl;
-
-#define MTX_ENTER_WITH_RECURSION(lck, reg) \
- pushf; \
- cli; \
- movl lck+MTX_LOCK,%eax; \
- cmpl _curproc,%eax; \
- jne 7f; \
- incl lck+MTX_RECURSE; \
- jmp 8f; \
-7: movl $ MTX_UNOWNED,%eax; \
- MPLOCKED \
- cmpxchgl reg,lck+MTX_LOCK; \
- jnz 7b; \
- popl lck+MTX_SAVEINTR; \
- jmp 9f; \
-8: add $4,%esp; \
-9: WITNESS_ENTER(lck, reg)
-
-#define MTX_EXIT_WITH_RECURSION(lck, reg) \
- WITNESS_EXIT(lck, reg) \
- movl lck+MTX_RECURSE,%eax; \
- decl %eax; \
- js 8f; \
- movl %eax,lck+MTX_RECURSE; \
- jmp 9f; \
-8: pushl lck+MTX_SAVEINTR; \
- movl lck+MTX_LOCK,%eax; \
- movl $ MTX_UNOWNED,reg; \
- MPLOCKED \
- cmpxchgl reg,lck+MTX_LOCK; \
- popf; \
-9:
-
-#endif /* I386_CPU */
#endif /* !LOCORE */
#endif /* __MACHINE_MUTEX_H */
diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s
index 99b91d0..603b4be 100644
--- a/sys/i386/i386/exception.s
+++ b/sys/i386/i386/exception.s
@@ -37,8 +37,8 @@
#include <machine/asmacros.h>
#include <sys/ipl.h>
+#include <sys/mutex.h>
#include <machine/lock.h>
-#include <machine/mutex.h>
#include <machine/psl.h>
#include <machine/trap.h>
#ifdef SMP
@@ -293,7 +293,7 @@ IDTVEC(int0x80_syscall)
jmp _doreti
ENTRY(fork_trampoline)
- MTX_EXIT(_sched_lock, %ecx)
+ MTX_EXIT(_sched_lock, MTX_SPIN)
sti /* XXX: we need this for kernel threads
created very early before interrupts
are enabled */
diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c
index 82e5a54..eca44e0 100644
--- a/sys/i386/i386/genassym.c
+++ b/sys/i386/i386/genassym.c
@@ -231,10 +231,3 @@ ASSYM(VM86_FRAMESIZE, sizeof(struct vm86frame));
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
ASSYM(MTX_SAVEINTR, offsetof(struct mtx, mtx_saveintr));
-#ifdef WITNESS
-ASSYM(MTX_DEBUG, offsetof(struct mtx, mtx_debug));
-ASSYM(MTXD_WITNESS, offsetof(struct mtx_debug, mtxd_witness));
-#endif
-
-ASSYM(MTX_UNOWNED, MTX_UNOWNED);
-ASSYM(MTX_SPIN, MTX_SPIN);
diff --git a/sys/i386/include/mutex.h b/sys/i386/include/mutex.h
index 61951fd..0e58108 100644
--- a/sys/i386/include/mutex.h
+++ b/sys/i386/include/mutex.h
@@ -253,110 +253,24 @@ extern char STR_SIEN[];
#else /* !LOCORE */
/*
- * Simple assembly macros to get and release spin locks.
+ * Simple assembly macros to get and release mutexes.
*/
-#ifdef WITNESS
-#define WITNESS_ENTER(lck, reg) \
- movl lck+MTX_DEBUG,reg; \
- cmpl $0,MTXD_WITNESS(reg); \
- jz 1f; \
- pushl $0; \
- pushl $0; \
- pushl $MTX_SPIN; \
- pushl $lck; \
- call witness_enter; \
- addl $0x10,%esp; \
-1:
+#define MTX_ENTER(lck, type) \
+ pushl $0 ; /* dummy __LINE__ */ \
+ pushl $0 ; /* dummy __FILE__ */ \
+ pushl $type ; \
+ pushl $lck ; \
+ call _mtx_enter ; \
+ addl $16,%esp
+
+#define MTX_EXIT(lck, type) \
+ pushl $0 ; /* dummy __LINE__ */ \
+ pushl $0 ; /* dummy __FILE__ */ \
+ pushl $type ; \
+ pushl $lck ; \
+ call _mtx_exit ; \
+ addl $16,%esp
-#define WITNESS_EXIT(lck, reg) \
- movl lck+MTX_DEBUG,reg; \
- cmpl $0,MTXD_WITNESS(reg); \
- jz 1f; \
- pushl $0; \
- pushl $0; \
- pushl $MTX_SPIN; \
- pushl $lck; \
- call witness_exit; \
- addl $0x10,%esp; \
-1:
-
-#else
-#define WITNESS_ENTER(lck, reg)
-#define WITNESS_EXIT(lck, reg)
-#endif
-
-#if defined(I386_CPU)
-
-#define MTX_ENTER(lck, reg) \
- movl _curproc,reg; \
- pushfl; \
- cli; \
- movl reg,lck+MTX_LOCK; \
- popl lck+MTX_SAVEINTR; \
- WITNESS_ENTER(lck, reg)
-
-#define MTX_EXIT(lck, reg) \
- WITNESS_EXIT(lck, reg) \
- pushl lck+MTX_SAVEINTR; \
- movl $ MTX_UNOWNED,lck+MTX_LOCK; \
- popfl;
-
-#else /* I386_CPU */
-
-#define MTX_ENTER(lck, reg) \
- movl _curproc,reg; \
- pushfl; \
- cli; \
-9: movl $ MTX_UNOWNED,%eax; \
- MPLOCKED \
- cmpxchgl reg,lck+MTX_LOCK; \
- jnz 9b; \
- popl lck+MTX_SAVEINTR; \
- WITNESS_ENTER(lck, reg)
-
-/* Must use locked bus op (cmpxchg) when setting to unowned (barrier) */
-#define MTX_EXIT(lck, reg) \
- WITNESS_EXIT(lck, reg) \
- pushl lck+MTX_SAVEINTR; \
- movl lck+MTX_LOCK,%eax; \
- movl $ MTX_UNOWNED,reg; \
- MPLOCKED \
- cmpxchgl reg,lck+MTX_LOCK; \
- popfl;
-
-#define MTX_ENTER_WITH_RECURSION(lck, reg) \
- pushf; \
- cli; \
- movl lck+MTX_LOCK,%eax; \
- cmpl _curproc,%eax; \
- jne 7f; \
- incl lck+MTX_RECURSE; \
- jmp 8f; \
-7: movl $ MTX_UNOWNED,%eax; \
- MPLOCKED \
- cmpxchgl reg,lck+MTX_LOCK; \
- jnz 7b; \
- popl lck+MTX_SAVEINTR; \
- jmp 9f; \
-8: add $4,%esp; \
-9: WITNESS_ENTER(lck, reg)
-
-#define MTX_EXIT_WITH_RECURSION(lck, reg) \
- WITNESS_EXIT(lck, reg) \
- movl lck+MTX_RECURSE,%eax; \
- decl %eax; \
- js 8f; \
- movl %eax,lck+MTX_RECURSE; \
- jmp 9f; \
-8: pushl lck+MTX_SAVEINTR; \
- movl lck+MTX_LOCK,%eax; \
- movl $ MTX_UNOWNED,reg; \
- MPLOCKED \
- cmpxchgl reg,lck+MTX_LOCK; \
- popf; \
-9:
-
-#endif /* I386_CPU */
#endif /* !LOCORE */
#endif /* __MACHINE_MUTEX_H */
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index dff8098..e4660ec 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -45,7 +45,6 @@
#include <machine/mutex.h>
-#ifndef LOCORE
#ifdef _KERNEL
/*
@@ -88,6 +87,8 @@
#endif /* _KERNEL */
+#ifndef LOCORE
+
#ifdef WITNESS
struct mtx_debug {
struct witness *mtxd_witness;
OpenPOWER on IntegriCloud