summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorfsmp <fsmp@FreeBSD.org>1997-08-10 20:59:07 +0000
committerfsmp <fsmp@FreeBSD.org>1997-08-10 20:59:07 +0000
commitce530fb8fa4f206001fcd8ea72ed1e688fb50ae6 (patch)
treeccb9245bba12bfb56b21461ccd55e2ba5dfdd235 /sys/amd64
parent1dfa4285cfa0b9ccaf496d58a56e76482ffdaaaf (diff)
downloadFreeBSD-src-ce530fb8fa4f206001fcd8ea72ed1e688fb50ae6.zip
FreeBSD-src-ce530fb8fa4f206001fcd8ea72ed1e688fb50ae6.tar.gz
Added trap specific lock calls: get_fpu_lock, etc.
All resolve to the GIANT_LOCK at this time, it is purely a logical partitioning.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/apic_vector.S21
-rw-r--r--sys/amd64/amd64/exception.S79
-rw-r--r--sys/amd64/amd64/exception.s79
3 files changed, 71 insertions, 108 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 2134552..f73ddc5 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -1,13 +1,16 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $
+ * $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
*/
+#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** PEND_INTS, various counters */
+
#include "i386/isa/intr_machdep.h"
+
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
@@ -31,7 +34,9 @@
lock ; /* MP-safe */ \
btsl $(irq_num),iactive ; /* lazy masking */ \
jc 6f ; /* already active */ \
- TRY_ISRLOCK(irq_num) ; /* try to get lock */ \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPtrylock ; /* try to get lock */ \
+ add $4, %esp ; \
testl %eax, %eax ; /* did we get it? */ \
jnz 8f ; /* yes, enter kernel */ \
6: ; /* active or locked */ \
@@ -83,7 +88,7 @@
; \
ALIGN_TEXT ; \
1: ; \
- GET_MPLOCK /* SMP Spin lock */
+ call _get_mplock /* SMP Spin lock */
#endif /* PEND_INTS */
@@ -123,7 +128,7 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
- GET_ISRLOCK(irq_num) ; \
+ call _get_isrlock ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
movl $0, lapic_eoi ; \
@@ -137,7 +142,9 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
- REL_ISRLOCK(irq_num) ; \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@@ -210,7 +217,9 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
- REL_ISRLOCK(irq_num) ; \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp ; \
popl %es ; \
popl %ds ; \
popal ; \
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index 2d81b17..1bb667c 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.35 1997/08/09 00:02:31 dyson Exp $
+ * $Id: exception.s,v 1.8 1997/08/10 20:51:52 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@@ -42,27 +42,8 @@
#ifdef SMP
-#include <machine/apic.h> /* for apic_vector.s */
-#include <machine/smptests.h> /** PEND_INTS */
-
-#ifndef PEND_INTS
-/* generic giant-lock calls */
-#define GET_MPLOCK call _get_mplock
-#define REL_MPLOCK call _rel_mplock
-#endif /* PEND_INTS */
-
-/* ISR specific giant-lock calls */
-#define GET_ISRLOCK(N) call _get_isrlock
-#define TRY_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPtrylock ; \
- add $4, %esp
-#define REL_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPrellock ; \
- add $4, %esp
-
-#define MP_INSTR_LOCK lock
+#define MP_INSTR_LOCK \
+ lock /* MP-safe */
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@@ -77,13 +58,9 @@
#else
-#define GET_MPLOCK /* NOP get Kernel Mutex */
-#define REL_MPLOCK /* NOP release mutex */
-#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
-#define REL_ISRLOCK(N) /* NOP release mutex */
-#define MP_INSTR_LOCK /* NOP instruction lock */
-#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
-#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
+#define MP_INSTR_LOCK /* NOP */
+#define IMASK_LOCK /* NOP */
+#define IMASK_UNLOCK /* NOP */
#endif /* SMP */
@@ -171,19 +148,19 @@ IDTVEC(fpu)
* interrupts, but now it is fairly easy - mask nested ones the
* same as SWI_AST's.
*/
- pushl $0 /* dummy error code */
- pushl $0 /* dummy trap type */
+ pushl $0 /* dummy error code */
+ pushl $0 /* dummy trap type */
pushal
pushl %ds
- pushl %es /* now the stack frame is a trap frame */
+ pushl %es /* now stack frame is a trap frame */
movl $KDSEL,%eax
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
movl _cpl,%eax
pushl %eax
- pushl $0 /* dummy unit to finish building intr frame */
- GET_ISRLOCK(-1)
+ pushl $0 /* dummy unit to finish intr frame */
+ call _get_fpu_lock
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@@ -209,8 +186,8 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
- GET_ISRLOCK(-1)
- FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
+ call _get_align_lock
+ FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@@ -251,26 +228,26 @@ calltrap:
*/
SUPERALIGN_TEXT
IDTVEC(syscall)
- pushfl /* save eflags in tf_err for now */
- subl $4,%esp /* skip over tf_trapno */
+ pushfl /* save eflags in tf_err for now */
+ subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
+ movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
- movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
+ movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -281,23 +258,23 @@ IDTVEC(syscall)
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
- subl $8,%esp /* skip over tf_trapno and tf_err */
+ subl $8,%esp /* skip over tf_trapno and tf_err */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
+ movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_int0x80_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -314,14 +291,14 @@ ENTRY(fork_trampoline)
* have this call a non-return function to stay in kernel mode.
* initproc has it's own fork handler, but it does return.
*/
- pushl %ebx /* arg1 */
- call %esi /* function */
+ pushl %ebx /* arg1 */
+ call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index 2d81b17..1bb667c 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.35 1997/08/09 00:02:31 dyson Exp $
+ * $Id: exception.s,v 1.8 1997/08/10 20:51:52 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@@ -42,27 +42,8 @@
#ifdef SMP
-#include <machine/apic.h> /* for apic_vector.s */
-#include <machine/smptests.h> /** PEND_INTS */
-
-#ifndef PEND_INTS
-/* generic giant-lock calls */
-#define GET_MPLOCK call _get_mplock
-#define REL_MPLOCK call _rel_mplock
-#endif /* PEND_INTS */
-
-/* ISR specific giant-lock calls */
-#define GET_ISRLOCK(N) call _get_isrlock
-#define TRY_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPtrylock ; \
- add $4, %esp
-#define REL_ISRLOCK(N) \
- pushl $_mp_lock ; \
- call _MPrellock ; \
- add $4, %esp
-
-#define MP_INSTR_LOCK lock
+#define MP_INSTR_LOCK \
+ lock /* MP-safe */
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@@ -77,13 +58,9 @@
#else
-#define GET_MPLOCK /* NOP get Kernel Mutex */
-#define REL_MPLOCK /* NOP release mutex */
-#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
-#define REL_ISRLOCK(N) /* NOP release mutex */
-#define MP_INSTR_LOCK /* NOP instruction lock */
-#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
-#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
+#define MP_INSTR_LOCK /* NOP */
+#define IMASK_LOCK /* NOP */
+#define IMASK_UNLOCK /* NOP */
#endif /* SMP */
@@ -171,19 +148,19 @@ IDTVEC(fpu)
* interrupts, but now it is fairly easy - mask nested ones the
* same as SWI_AST's.
*/
- pushl $0 /* dummy error code */
- pushl $0 /* dummy trap type */
+ pushl $0 /* dummy error code */
+ pushl $0 /* dummy trap type */
pushal
pushl %ds
- pushl %es /* now the stack frame is a trap frame */
+ pushl %es /* now stack frame is a trap frame */
movl $KDSEL,%eax
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
movl _cpl,%eax
pushl %eax
- pushl $0 /* dummy unit to finish building intr frame */
- GET_ISRLOCK(-1)
+ pushl $0 /* dummy unit to finish intr frame */
+ call _get_fpu_lock
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@@ -209,8 +186,8 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
- GET_ISRLOCK(-1)
- FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
+ call _get_align_lock
+ FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@@ -251,26 +228,26 @@ calltrap:
*/
SUPERALIGN_TEXT
IDTVEC(syscall)
- pushfl /* save eflags in tf_err for now */
- subl $4,%esp /* skip over tf_trapno */
+ pushfl /* save eflags in tf_err for now */
+ subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
+ movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
- movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
+ movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -281,23 +258,23 @@ IDTVEC(syscall)
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
- subl $8,%esp /* skip over tf_trapno and tf_err */
+ subl $8,%esp /* skip over tf_trapno and tf_err */
pushal
pushl %ds
pushl %es
- movl $KDSEL,%eax /* switch to kernel segments */
+ movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
- movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
+ movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
- GET_ISRLOCK(-1)
+ call _get_int0x80_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@@ -314,14 +291,14 @@ ENTRY(fork_trampoline)
* have this call a non-return function to stay in kernel mode.
* initproc has it's own fork handler, but it does return.
*/
- pushl %ebx /* arg1 */
- call %esi /* function */
+ pushl %ebx /* arg1 */
+ call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
- pushl $0 /* cpl to restore */
+ pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
OpenPOWER on IntegriCloud