summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorfsmp <fsmp@FreeBSD.org>1997-08-10 20:59:07 +0000
committerfsmp <fsmp@FreeBSD.org>1997-08-10 20:59:07 +0000
commitce530fb8fa4f206001fcd8ea72ed1e688fb50ae6 (patch)
treeccb9245bba12bfb56b21461ccd55e2ba5dfdd235 /sys
parent1dfa4285cfa0b9ccaf496d58a56e76482ffdaaaf (diff)
downloadFreeBSD-src-ce530fb8fa4f206001fcd8ea72ed1e688fb50ae6.zip
FreeBSD-src-ce530fb8fa4f206001fcd8ea72ed1e688fb50ae6.tar.gz
Added trap specific lock calls: get_fpu_lock, etc.
All resolve to the GIANT_LOCK at this time, it is purely a logical partitioning.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/apic_vector.S21
-rw-r--r--sys/amd64/amd64/exception.S79
-rw-r--r--sys/amd64/amd64/exception.s79
-rw-r--r--sys/i386/i386/apic_vector.s21
-rw-r--r--sys/i386/i386/exception.s79
-rw-r--r--sys/i386/i386/mplock.s257
-rw-r--r--sys/i386/isa/apic_vector.s21
-rw-r--r--sys/i386/isa/ipl.s15
8 files changed, 315 insertions, 257 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 2134552..f73ddc5 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -1,13 +1,16 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $
+ * $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
*/
+#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** PEND_INTS, various counters */
+
#include "i386/isa/intr_machdep.h"
+
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
@@ -31,7 +34,9 @@
lock ; /* MP-safe */ \
btsl $(irq_num),iactive ; /* lazy masking */ \
jc 6f ; /* already active */ \
- TRY_ISRLOCK(irq_num) ; /* try to get lock */ \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPtrylock ; /* try to get lock */ \
+ add $4, %esp ; \
testl %eax, %eax ; /* did we get it? */ \
jnz 8f ; /* yes, enter kernel */ \
6: ; /* active or locked */ \
@@ -83,7 +88,7 @@
; \
ALIGN_TEXT ; \
1: ; \
- GET_MPLOCK /* SMP Spin lock */
+ call _get_mplock /* SMP Spin lock */
#endif /* PEND_INTS */
@@ -123,7 +128,7 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
- GET_ISRLOCK(irq_num) ; \
+ call _get_isrlock ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
movl $0, lapic_eoi ; \
@@ -137,7 +142,9 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
- REL_ISRLOCK(irq_num) ; \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@@ -210,7 +217,9 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
- REL_ISRLOCK(irq_num) ; \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp ; \
popl %es ; \
popl %ds ; \
popal ; \
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index 2d81b17..1bb667c 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.35 1997/08/09 00:02:31 dyson Exp $
+ * $Id: exception.s,v 1.8 1997/08/10 20:51:52 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@@ -42,27 +42,8 @@
#ifdef SMP
-#include <machine/apic.h> /* for apic_vector.s */
-#include <machine/smptests.h> /** PEND_INTS */
-
-#ifndef PEND_INTS
-/* generic giant-lock calls */
-#define GET_MPLOCK call _get_mplock
-#define REL_MPLOCK call _rel_mplock
-#endif /* PEND_INTS */
-
-/* ISR specific giant-lock calls */
-#define GET_ISRLOCK(N) call _get_isrlock
-#define TRY_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPtrylock ; \
- add $4, %esp
-#define REL_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPrellock ; \
- add $4, %esp
-
-#define MP_INSTR_LOCK lock
+#define MP_INSTR_LOCK \
+ lock /* MP-safe */
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@@ -77,13 +58,9 @@
#else
-#define GET_MPLOCK /* NOP get Kernel Mutex */
-#define REL_MPLOCK /* NOP release mutex */
-#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
-#define REL_ISRLOCK(N) /* NOP release mutex */
-#define MP_INSTR_LOCK /* NOP instruction lock */
-#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
-#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
+#define MP_INSTR_LOCK /* NOP */
+#define IMASK_LOCK /* NOP */
+#define IMASK_UNLOCK /* NOP */
#endif /* SMP */
@@ -171,19 +148,19 @@ IDTVEC(fpu)
* interrupts, but now it is fairly easy - mask nested ones the
* same as SWI_AST's.
*/
- pushl $0 /* dummy error code */
- pushl $0 /* dummy trap type */
+ pushl $0 /* dummy error code */
+ pushl $0 /* dummy trap type */
pushal
pushl %ds
- pushl %es /* now the stack frame is a trap frame */
+ pushl %es /* now stack frame is a trap frame */
movl $KDSEL,%eax
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
movl _cpl,%eax
pushl %eax
- pushl $0 /* dummy unit to finish building intr frame */
- GET_ISRLOCK(-1)
+ pushl $0 /* dummy unit to finish intr frame */
+ call _get_fpu_lock
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@@ -209,8 +186,8 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
- GET_ISRLOCK(-1)
- FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
+ call _get_align_lock
+ FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@@ -251,26 +228,26 @@ calltrap:
*/
SUPERALIGN_TEXT
IDTVEC(syscall)
- pushfl /* save eflags in tf_err for now */
- subl $4,%esp /* skip over tf_trapno */
+ pushfl /* save eflags in tf_err for now */
+ subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
+ movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
- movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
+ movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -281,23 +258,23 @@ IDTVEC(syscall)
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
- subl $8,%esp /* skip over tf_trapno and tf_err */
+ subl $8,%esp /* skip over tf_trapno and tf_err */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
+ movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_int0x80_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -314,14 +291,14 @@ ENTRY(fork_trampoline)
* have this call a non-return function to stay in kernel mode.
* initproc has it's own fork handler, but it does return.
*/
- pushl %ebx /* arg1 */
- call %esi /* function */
+ pushl %ebx /* arg1 */
+ call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index 2d81b17..1bb667c 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.35 1997/08/09 00:02:31 dyson Exp $
+ * $Id: exception.s,v 1.8 1997/08/10 20:51:52 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@@ -42,27 +42,8 @@
#ifdef SMP
-#include <machine/apic.h> /* for apic_vector.s */
-#include <machine/smptests.h> /** PEND_INTS */
-
-#ifndef PEND_INTS
-/* generic giant-lock calls */
-#define GET_MPLOCK call _get_mplock
-#define REL_MPLOCK call _rel_mplock
-#endif /* PEND_INTS */
-
-/* ISR specific giant-lock calls */
-#define GET_ISRLOCK(N) call _get_isrlock
-#define TRY_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPtrylock ; \
- add $4, %esp
-#define REL_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPrellock ; \
- add $4, %esp
-
-#define MP_INSTR_LOCK lock
+#define MP_INSTR_LOCK \
+ lock /* MP-safe */
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@@ -77,13 +58,9 @@
#else
-#define GET_MPLOCK /* NOP get Kernel Mutex */
-#define REL_MPLOCK /* NOP release mutex */
-#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
-#define REL_ISRLOCK(N) /* NOP release mutex */
-#define MP_INSTR_LOCK /* NOP instruction lock */
-#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
-#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
+#define MP_INSTR_LOCK /* NOP */
+#define IMASK_LOCK /* NOP */
+#define IMASK_UNLOCK /* NOP */
#endif /* SMP */
@@ -171,19 +148,19 @@ IDTVEC(fpu)
* interrupts, but now it is fairly easy - mask nested ones the
* same as SWI_AST's.
*/
- pushl $0 /* dummy error code */
- pushl $0 /* dummy trap type */
+ pushl $0 /* dummy error code */
+ pushl $0 /* dummy trap type */
pushal
pushl %ds
- pushl %es /* now the stack frame is a trap frame */
+ pushl %es /* now stack frame is a trap frame */
movl $KDSEL,%eax
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
movl _cpl,%eax
pushl %eax
- pushl $0 /* dummy unit to finish building intr frame */
- GET_ISRLOCK(-1)
+ pushl $0 /* dummy unit to finish intr frame */
+ call _get_fpu_lock
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@@ -209,8 +186,8 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
- GET_ISRLOCK(-1)
- FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
+ call _get_align_lock
+ FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@@ -251,26 +228,26 @@ calltrap:
*/
SUPERALIGN_TEXT
IDTVEC(syscall)
- pushfl /* save eflags in tf_err for now */
- subl $4,%esp /* skip over tf_trapno */
+ pushfl /* save eflags in tf_err for now */
+ subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
+ movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
- movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
+ movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -281,23 +258,23 @@ IDTVEC(syscall)
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
- subl $8,%esp /* skip over tf_trapno and tf_err */
+ subl $8,%esp /* skip over tf_trapno and tf_err */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
+ movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_int0x80_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -314,14 +291,14 @@ ENTRY(fork_trampoline)
* have this call a non-return function to stay in kernel mode.
* initproc has it's own fork handler, but it does return.
*/
- pushl %ebx /* arg1 */
- call %esi /* function */
+ pushl %ebx /* arg1 */
+ call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index 2134552..f73ddc5 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -1,13 +1,16 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $
+ * $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
*/
+#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** PEND_INTS, various counters */
+
#include "i386/isa/intr_machdep.h"
+
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
@@ -31,7 +34,9 @@
lock ; /* MP-safe */ \
btsl $(irq_num),iactive ; /* lazy masking */ \
jc 6f ; /* already active */ \
- TRY_ISRLOCK(irq_num) ; /* try to get lock */ \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPtrylock ; /* try to get lock */ \
+ add $4, %esp ; \
testl %eax, %eax ; /* did we get it? */ \
jnz 8f ; /* yes, enter kernel */ \
6: ; /* active or locked */ \
@@ -83,7 +88,7 @@
; \
ALIGN_TEXT ; \
1: ; \
- GET_MPLOCK /* SMP Spin lock */
+ call _get_mplock /* SMP Spin lock */
#endif /* PEND_INTS */
@@ -123,7 +128,7 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
- GET_ISRLOCK(irq_num) ; \
+ call _get_isrlock ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
movl $0, lapic_eoi ; \
@@ -137,7 +142,9 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
- REL_ISRLOCK(irq_num) ; \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@@ -210,7 +217,9 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
- REL_ISRLOCK(irq_num) ; \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp ; \
popl %es ; \
popl %ds ; \
popal ; \
diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s
index 2d81b17..1bb667c 100644
--- a/sys/i386/i386/exception.s
+++ b/sys/i386/i386/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.35 1997/08/09 00:02:31 dyson Exp $
+ * $Id: exception.s,v 1.8 1997/08/10 20:51:52 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@@ -42,27 +42,8 @@
#ifdef SMP
-#include <machine/apic.h> /* for apic_vector.s */
-#include <machine/smptests.h> /** PEND_INTS */
-
-#ifndef PEND_INTS
-/* generic giant-lock calls */
-#define GET_MPLOCK call _get_mplock
-#define REL_MPLOCK call _rel_mplock
-#endif /* PEND_INTS */
-
-/* ISR specific giant-lock calls */
-#define GET_ISRLOCK(N) call _get_isrlock
-#define TRY_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPtrylock ; \
- add $4, %esp
-#define REL_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPrellock ; \
- add $4, %esp
-
-#define MP_INSTR_LOCK lock
+#define MP_INSTR_LOCK \
+ lock /* MP-safe */
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@@ -77,13 +58,9 @@
#else
-#define GET_MPLOCK /* NOP get Kernel Mutex */
-#define REL_MPLOCK /* NOP release mutex */
-#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
-#define REL_ISRLOCK(N) /* NOP release mutex */
-#define MP_INSTR_LOCK /* NOP instruction lock */
-#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
-#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
+#define MP_INSTR_LOCK /* NOP */
+#define IMASK_LOCK /* NOP */
+#define IMASK_UNLOCK /* NOP */
#endif /* SMP */
@@ -171,19 +148,19 @@ IDTVEC(fpu)
* interrupts, but now it is fairly easy - mask nested ones the
* same as SWI_AST's.
*/
- pushl $0 /* dummy error code */
- pushl $0 /* dummy trap type */
+ pushl $0 /* dummy error code */
+ pushl $0 /* dummy trap type */
pushal
pushl %ds
- pushl %es /* now the stack frame is a trap frame */
+ pushl %es /* now stack frame is a trap frame */
movl $KDSEL,%eax
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
movl _cpl,%eax
pushl %eax
- pushl $0 /* dummy unit to finish building intr frame */
- GET_ISRLOCK(-1)
+ pushl $0 /* dummy unit to finish intr frame */
+ call _get_fpu_lock
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@@ -209,8 +186,8 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
- GET_ISRLOCK(-1)
- FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
+ call _get_align_lock
+ FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@@ -251,26 +228,26 @@ calltrap:
*/
SUPERALIGN_TEXT
IDTVEC(syscall)
- pushfl /* save eflags in tf_err for now */
- subl $4,%esp /* skip over tf_trapno */
+ pushfl /* save eflags in tf_err for now */
+ subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
+ movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
- movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
+ movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -281,23 +258,23 @@ IDTVEC(syscall)
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
- subl $8,%esp /* skip over tf_trapno and tf_err */
+ subl $8,%esp /* skip over tf_trapno and tf_err */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
+ movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_int0x80_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -314,14 +291,14 @@ ENTRY(fork_trampoline)
* have this call a non-return function to stay in kernel mode.
* initproc has it's own fork handler, but it does return.
*/
- pushl %ebx /* arg1 */
- call %esi /* function */
+ pushl %ebx /* arg1 */
+ call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
diff --git a/sys/i386/i386/mplock.s b/sys/i386/i386/mplock.s
index 30f7c93..3a73ee5 100644
--- a/sys/i386/i386/mplock.s
+++ b/sys/i386/i386/mplock.s
@@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
- * $Id: mplock.s,v 1.14 1997/08/04 17:19:17 smp Exp smp $
+ * $Id: mplock.s,v 1.15 1997/08/10 20:51:52 smp Exp smp $
*
* Functions for locking between CPUs in a SMP system.
*
@@ -40,6 +40,25 @@ _tryhits: 9938 2196d 44cc
#define FREE_FIRST
#define GLPROFILE
+#ifdef CHEAP_TPR
+
+/* we assumme that the 'reserved bits' can be written with zeros */
+
+#else /* CHEAP_TPR */
+
+#error HEADS UP: this code needs work
+/*
+ * The APIC doc says that reserved bits must be written with whatever
+ * value they currently contain, ie you should: read, modify, write,
+ * instead of just writing new values to the TPR register. Current
+ * silicon seems happy with just writing. If the behaviour of the
+ * silicon changes, all code that access the lapic_tpr must be modified.
+ * The last version to contain such code was:
+ * $Id: mplock.s,v 1.15 1997/08/10 20:51:52 smp Exp smp $
+ */
+
+#endif /* CHEAP_TPR */
+
#ifdef GRAB_LOPRIO
/*
* Claim LOWest PRIOrity, ie. attempt to grab ALL INTerrupts.
@@ -48,44 +67,20 @@ _tryhits: 9938 2196d 44cc
/* location of saved TPR on stack */
#define TPR_TARGET 12(%esp)
-/* we assumme that the 'reserved bits' can be written with zeros */
-#ifdef CHEAP_TPR
-
/* after 1st acquire of lock we attempt to grab all hardware INTs */
-#define GRAB_HWI \
- movl $ALLHWI_LEVEL, TPR_TARGET /* task prio to 'all HWI' */
-
-#define GRAB_HWI_2 \
- movl $ALLHWI_LEVEL, lapic_tpr /* task prio to 'all HWI' */
+#define GRAB_HWI movl $ALLHWI_LEVEL, TPR_TARGET
+#define GRAB_HWI_2 movl $ALLHWI_LEVEL, lapic_tpr /* CHEAP_TPR */
/* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
-#define ARB_HWI \
- movl $LOPRIO_LEVEL, lapic_tpr /* task prio to 'arbitrate' */
-
-#else /** CHEAP_TPR */
-
-#define GRAB_HWI \
- andl $~APIC_TPR_PRIO, TPR_TARGET /* task prio to 'all HWI' */
+#define ARB_HWI movl $LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */
-#define GRAB_HWI_2 \
- andl $~APIC_TPR_PRIO, lapic_tpr /* task prio to 'all HWI' */
+#else /* GRAB_LOPRIO */
-#define ARB_HWI \
- movl lapic_tpr, %eax ; /* TPR */ \
- andl $~APIC_TPR_PRIO, %eax ; /* clear TPR field */ \
- orl $LOPRIO_LEVEL, %eax ; /* prio to arbitrate */ \
- movl %eax, lapic_tpr ; /* set it */ \
- movl (%edx), %eax /* reload %eax with lock */
+#define GRAB_HWI /* nop */
+#define GRAB_HWI_2 /* nop */
+#define ARB_HWI /* nop */
-#endif /** CHEAP_TPR */
-
-#else /** GRAB_LOPRIO */
-
-#define GRAB_HWI /* nop */
-#define GRAB_HWI_2 /* nop */
-#define ARB_HWI /* nop */
-
-#endif /** GRAB_LOPRIO */
+#endif /* GRAB_LOPRIO */
.text
@@ -308,22 +303,11 @@ NON_GPROF_ENTRY(get_mplock)
pushl %edx
/* block all HW INTs via Task Priority Register */
-#ifdef CHEAP_TPR
pushl lapic_tpr /* save current TPR */
pushfl /* save current EFLAGS */
testl $(1<<9), (%esp) /* test EI bit */
jnz 1f /* INTs currently enabled */
- movl $TPR_BLOCK_HWI, lapic_tpr
-#else
- movl lapic_tpr, %eax /* get current TPR */
- pushl %eax /* save current TPR */
- pushfl /* save current EFLAGS */
- testl $(1<<9), (%esp) /* test EI bit */
- jnz 1f /* INTs currently enabled */
- andl $~APIC_TPR_PRIO, %eax /* clear task priority field */
- orl $TPR_BLOCK_HWI, %eax /* only allow IPIs */
- movl %eax, lapic_tpr
-#endif /** CHEAP_TPR */
+ movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
sti /* allow IPI (and only IPI) INTs */
1:
pushl $_mp_lock
@@ -338,6 +322,40 @@ NON_GPROF_ENTRY(get_mplock)
ret
/***********************************************************************
+ * void try_mplock()
+ * -----------------
+ * reg %eax == 1 if success
+ */
+
+NON_GPROF_ENTRY(try_mplock)
+ pushl %ecx
+ pushl %edx
+ pushl $_mp_lock
+ call _MPtrylock
+ add $4, %esp
+ popl %edx
+ popl %ecx
+ ret
+
+/***********************************************************************
+ * void rel_mplock()
+ * -----------------
+ * All registers preserved
+ */
+
+NON_GPROF_ENTRY(rel_mplock)
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushl $_mp_lock
+ call _MPrellock
+ add $4, %esp
+ popl %edx
+ popl %ecx
+ popl %eax
+ ret
+
+/***********************************************************************
* void get_isrlock()
* -----------------
* no registers preserved, assummed the calling ISR does!
@@ -352,20 +370,11 @@ NON_GPROF_ENTRY(get_mplock)
NON_GPROF_ENTRY(get_isrlock)
/* block all HW INTs via Task Priority Register */
-#ifdef CHEAP_TPR
pushl lapic_tpr /* save current TPR */
pushfl /* save current EFLAGS */
- movl $TPR_BLOCK_HWI, lapic_tpr
-#else
- movl lapic_tpr, %eax /* get current TPR */
- pushl %eax /* save current TPR */
- pushfl /* save current EFLAGS */
- andl $~APIC_TPR_PRIO, %eax /* clear task priority field */
- orl $TPR_BLOCK_HWI, %eax /* only allow IPIs */
- movl %eax, lapic_tpr
-#endif /** CHEAP_TPR */
+ movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
sti /* allow IPI (and only IPI) INTs */
-1:
+
pushl $_mp_lock
call _MPgetlock
add $4, %esp
@@ -376,64 +385,154 @@ NON_GPROF_ENTRY(get_isrlock)
/***********************************************************************
- * void try_mplock()
+ * void try_isrlock()
* -----------------
+ * no registers preserved, assummed the calling ISR does!
* reg %eax == 1 if success
*/
-NON_GPROF_ENTRY(try_mplock)
- pushl %ecx
- pushl %edx
+NON_GPROF_ENTRY(try_isrlock)
pushl $_mp_lock
call _MPtrylock
add $4, %esp
- popl %edx
- popl %ecx
ret
+
/***********************************************************************
- * void try_isrlock()
+ * void rel_isrlock()
* -----------------
* no registers preserved, assummed the calling ISR does!
- * reg %eax == 1 if success
*/
-NON_GPROF_ENTRY(try_isrlock)
+NON_GPROF_ENTRY(rel_isrlock)
+ pushl $_mp_lock
+ call _MPrellock
+ add $4, %esp
+ ret
+
+
+/***********************************************************************
+ * FPU locks
+ */
+
+NON_GPROF_ENTRY(get_fpu_lock)
+ pushl lapic_tpr
+ pushfl
+ movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
+ sti
+ pushl $_mp_lock
+ call _MPgetlock
+ add $4, %esp
+ popfl
+ popl lapic_tpr
+ ret
+
+#ifdef notneeded
+NON_GPROF_ENTRY(try_fpu_lock)
pushl $_mp_lock
call _MPtrylock
add $4, %esp
ret
+NON_GPROF_ENTRY(rel_fpu_lock)
+ pushl $_mp_lock
+ call _MPrellock
+ add $4, %esp
+ ret
+#endif /* notneeded */
+
/***********************************************************************
- * void rel_mplock()
- * -----------------
- * All registers preserved
+ * align locks
*/
-NON_GPROF_ENTRY(rel_mplock)
- pushl %eax
- pushl %ecx
- pushl %edx
+NON_GPROF_ENTRY(get_align_lock)
+ pushl lapic_tpr
+ pushfl
+ movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
+ sti
+ pushl $_mp_lock
+ call _MPgetlock
+ add $4, %esp
+ popfl
+ popl lapic_tpr
+ ret
+
+#ifdef notneeded
+NON_GPROF_ENTRY(try_align_lock)
+ pushl $_mp_lock
+ call _MPtrylock
+ add $4, %esp
+ ret
+
+NON_GPROF_ENTRY(rel_align_lock)
pushl $_mp_lock
call _MPrellock
add $4, %esp
- popl %edx
- popl %ecx
- popl %eax
ret
+#endif /* notneeded */
+
/***********************************************************************
- * void rel_isrlock()
- * -----------------
- * no registers preserved, assummed the calling ISR does!
+ * syscall locks
*/
-NON_GPROF_ENTRY(rel_isrlock)
+NON_GPROF_ENTRY(get_syscall_lock)
+ pushl lapic_tpr
+ pushfl
+ movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
+ sti
+ pushl $_mp_lock
+ call _MPgetlock
+ add $4, %esp
+ popfl
+ popl lapic_tpr
+ ret
+
+#ifdef notneeded
+NON_GPROF_ENTRY(try_syscall_lock)
+ pushl $_mp_lock
+ call _MPtrylock
+ add $4, %esp
+ ret
+
+NON_GPROF_ENTRY(rel_syscall_lock)
+ pushl $_mp_lock
+ call _MPrellock
+ add $4, %esp
+ ret
+#endif /* notneeded */
+
+
+/***********************************************************************
+ * int0x80_syscall locks
+ */
+
+NON_GPROF_ENTRY(get_int0x80_syscall_lock)
+ pushl lapic_tpr
+ pushfl
+ movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
+ sti
+ pushl $_mp_lock
+ call _MPgetlock
+ add $4, %esp
+ popfl
+ popl lapic_tpr
+ ret
+
+#ifdef notneeded
+NON_GPROF_ENTRY(try_int0x80_syscall_lock)
+ pushl $_mp_lock
+ call _MPtrylock
+ add $4, %esp
+ ret
+
+NON_GPROF_ENTRY(rel_int0x80_syscall_lock)
pushl $_mp_lock
call _MPrellock
add $4, %esp
ret
+#endif /* notneeded */
/***********************************************************************
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index 2134552..f73ddc5 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -1,13 +1,16 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $
+ * $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
*/
+#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** PEND_INTS, various counters */
+
#include "i386/isa/intr_machdep.h"
+
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
@@ -31,7 +34,9 @@
lock ; /* MP-safe */ \
btsl $(irq_num),iactive ; /* lazy masking */ \
jc 6f ; /* already active */ \
- TRY_ISRLOCK(irq_num) ; /* try to get lock */ \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPtrylock ; /* try to get lock */ \
+ add $4, %esp ; \
testl %eax, %eax ; /* did we get it? */ \
jnz 8f ; /* yes, enter kernel */ \
6: ; /* active or locked */ \
@@ -83,7 +88,7 @@
; \
ALIGN_TEXT ; \
1: ; \
- GET_MPLOCK /* SMP Spin lock */
+ call _get_mplock /* SMP Spin lock */
#endif /* PEND_INTS */
@@ -123,7 +128,7 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
- GET_ISRLOCK(irq_num) ; \
+ call _get_isrlock ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
movl $0, lapic_eoi ; \
@@ -137,7 +142,9 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
- REL_ISRLOCK(irq_num) ; \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@@ -210,7 +217,9 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
- REL_ISRLOCK(irq_num) ; \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp ; \
popl %es ; \
popl %ds ; \
popal ; \
diff --git a/sys/i386/isa/ipl.s b/sys/i386/isa/ipl.s
index 45b0de5..acf8621 100644
--- a/sys/i386/isa/ipl.s
+++ b/sys/i386/isa/ipl.s
@@ -36,7 +36,7 @@
*
* @(#)ipl.s
*
- * $Id: ipl.s,v 1.5 1997/07/31 05:42:06 fsmp Exp $
+ * $Id: ipl.s,v 1.5 1997/08/10 20:47:53 smp Exp smp $
*/
@@ -137,11 +137,12 @@ doreti_stop:
nop
1:
#endif /* VM86 */
-#if 0
- REL_MPLOCK
-#else
- REL_ISRLOCK(-1)
-#endif
+
+ /* release the kernel lock */
+ pushl $_mp_lock /* GIANT_LOCK */
+ call _MPrellock
+ add $4, %esp
+
.globl doreti_popl_es
doreti_popl_es:
popl %es
@@ -356,4 +357,4 @@ swi_tty:
#include "i386/isa/apic_ipl.s"
#else
#include "i386/isa/icu_ipl.s"
-#endif /* APIC_IO */
+#endif /* APIC_IO */
OpenPOWER on IntegriCloud