summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorfsmp <fsmp@FreeBSD.org>1997-08-20 05:25:48 +0000
committerfsmp <fsmp@FreeBSD.org>1997-08-20 05:25:48 +0000
commit2c414e3eff799c18e5d3e8edfe37d7dc71f35c7a (patch)
tree4060f694285dcb8b0a5d572747820da61322b0df /sys/amd64
parentea72b8e976791b005527662091bc146ad0bfd3b6 (diff)
downloadFreeBSD-src-2c414e3eff799c18e5d3e8edfe37d7dc71f35c7a.zip
FreeBSD-src-2c414e3eff799c18e5d3e8edfe37d7dc71f35c7a.tar.gz
Preperation for moving cpl into critical region access.
Several new fine-grained locks. New FAST_INTR() methods: - separate simplelock for FAST_INTR, no more giant lock. - FAST_INTR()s no longer checks ipending on way out of ISR. sio made MP-safe (I hope).
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/apic_vector.S80
-rw-r--r--sys/amd64/amd64/exception.S56
-rw-r--r--sys/amd64/amd64/exception.s56
-rw-r--r--sys/amd64/amd64/mp_machdep.c21
-rw-r--r--sys/amd64/amd64/mptable.c21
-rw-r--r--sys/amd64/amd64/trap.c10
-rw-r--r--sys/amd64/include/mptable.h21
7 files changed, 207 insertions, 58 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index f73ddc5..f2a8bce 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
+ * $Id: apic_vector.s,v 1.15 1997/08/10 20:58:57 fsmp Exp $
*/
@@ -11,12 +11,41 @@
#include "i386/isa/intr_machdep.h"
+#ifdef FAST_SIMPLELOCK
+
+#define GET_FAST_INTR_LOCK \
+ pushl $_fast_intr_lock ; /* address of lock */ \
+ call _s_lock ; /* MP-safe */ \
+ addl $4,%esp
+
+#define REL_FAST_INTR_LOCK \
+ pushl $_fast_intr_lock ; /* address of lock */ \
+ call _s_unlock ; /* MP-safe */ \
+ addl $4,%esp
+
+#else /* FAST_SIMPLELOCK */
+
+#define GET_FAST_INTR_LOCK \
+ call _get_isrlock
+#define REL_FAST_INTR_LOCK \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp
+
+#endif /* FAST_SIMPLELOCK */
+
+#define REL_ISR_LOCK \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp
+
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
/* make an index into the IO APIC from the IRQ# */
#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
+
/*
* 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
*/
@@ -115,6 +144,8 @@
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
+#ifdef FAST_WITHOUTCPL
+
#define FAST_INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
@@ -128,11 +159,45 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
- call _get_isrlock ; \
+ GET_FAST_INTR_LOCK ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
+ addl $4, %esp ; \
movl $0, lapic_eoi ; \
+ lock ; \
+ incl _cnt+V_INTR ; /* book-keeping can wait */ \
+ movl _intr_countp + (irq_num) * 4, %eax ; \
+ lock ; \
+ incl (%eax) ; \
+ MEXITCOUNT ; \
+ REL_FAST_INTR_LOCK ; \
+ MAYBE_POPL_ES ; \
+ popl %ds ; \
+ popl %edx ; \
+ popl %ecx ; \
+ popl %eax ; \
+ iret
+
+#else
+
+#define FAST_INTR(irq_num, vec_name) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ pushl %eax ; /* save only call-used registers */ \
+ pushl %ecx ; \
+ pushl %edx ; \
+ pushl %ds ; \
+ MAYBE_PUSHL_ES ; \
+ movl $KDSEL,%eax ; \
+ movl %ax,%ds ; \
+ MAYBE_MOVW_AX_ES ; \
+ FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
+ GET_FAST_INTR_LOCK ; \
+ pushl _intr_unit + (irq_num) * 4 ; \
+ call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4,%esp ; \
+ movl $0, lapic_eoi ; \
incl _cnt+V_INTR ; /* book-keeping can wait */ \
movl _intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
@@ -142,9 +207,7 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
- pushl $_mp_lock ; /* GIANT_LOCK */ \
- call _MPrellock ; \
- add $4, %esp ; \
+ REL_FAST_INTR_LOCK ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@@ -178,6 +241,9 @@ IDTVEC(vec_name) ; \
MEXITCOUNT ; \
jmp _doreti
+#endif /** FAST_WITHOUTCPL */
+
+
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
@@ -217,9 +283,7 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
- pushl $_mp_lock ; /* GIANT_LOCK */ \
- call _MPrellock ; \
- add $4, %esp ; \
+ REL_ISR_LOCK ; \
popl %es ; \
popl %ds ; \
popal ; \
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index 6305724..09635fa 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.37 1997/08/10 20:59:06 fsmp Exp $
+ * $Id: exception.s,v 1.38 1997/08/10 21:18:01 fsmp Exp $
*/
#include "npx.h" /* NNPX */
@@ -42,8 +42,12 @@
#ifdef SMP
-#define MP_INSTR_LOCK \
- lock /* MP-safe */
+#define MPLOCKED lock ;
+
+#define FPU_LOCK call _get_fpu_lock
+#define ALIGN_LOCK call _get_align_lock
+#define SYSCALL_LOCK call _get_syscall_lock
+#define ALTSYSCALL_LOCK call _get_altsyscall_lock
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@@ -56,11 +60,25 @@
call _s_unlock ; /* MP-safe */ \
addl $4,%esp
-#else
+/* protects cpl updates as a critical region */
+#define CPL_LOCK \
+ pushl $_cpl_lock ; /* address of lock */ \
+ call _s_lock ; /* MP-safe */ \
+ addl $4,%esp
+
+#define CPL_UNLOCK \
+ pushl $_cpl_lock ; /* address of lock */ \
+ call _s_unlock ; /* MP-safe */ \
+ addl $4,%esp
-#define MP_INSTR_LOCK /* NOP */
-#define IMASK_LOCK /* NOP */
-#define IMASK_UNLOCK /* NOP */
+#else /* SMP */
+
+#define MPLOCKED /* NOP */
+
+#define FPU_LOCK /* NOP */
+#define ALIGN_LOCK /* NOP */
+#define SYSCALL_LOCK /* NOP */
+#define ALTSYSCALL_LOCK /* NOP */
#endif /* SMP */
@@ -160,10 +178,8 @@ IDTVEC(fpu)
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
-#ifdef SMP
- call _get_fpu_lock
-#endif /* SMP */
- incl _cnt+V_TRAP
+ MPLOCKED incl _cnt+V_TRAP
+ FPU_LOCK
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
call _npxintr
@@ -188,11 +204,9 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
-#ifdef SMP
- call _get_align_lock
-#endif /* SMP */
+ ALIGN_LOCK
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
- incl _cnt+V_TRAP
+ MPLOCKED incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@@ -244,10 +258,8 @@ IDTVEC(syscall)
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
-#ifdef SMP
- call _get_syscall_lock
-#endif /* SMP */
- incl _cnt+V_SYSCALL
+ SYSCALL_LOCK
+ MPLOCKED incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
@@ -273,10 +285,8 @@ IDTVEC(int0x80_syscall)
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
-#ifdef SMP
- call _get_int0x80_syscall_lock
-#endif /* SMP */
- incl _cnt+V_SYSCALL
+ ALTSYSCALL_LOCK
+ MPLOCKED incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index 6305724..09635fa 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.37 1997/08/10 20:59:06 fsmp Exp $
+ * $Id: exception.s,v 1.38 1997/08/10 21:18:01 fsmp Exp $
*/
#include "npx.h" /* NNPX */
@@ -42,8 +42,12 @@
#ifdef SMP
-#define MP_INSTR_LOCK \
- lock /* MP-safe */
+#define MPLOCKED lock ;
+
+#define FPU_LOCK call _get_fpu_lock
+#define ALIGN_LOCK call _get_align_lock
+#define SYSCALL_LOCK call _get_syscall_lock
+#define ALTSYSCALL_LOCK call _get_altsyscall_lock
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@@ -56,11 +60,25 @@
call _s_unlock ; /* MP-safe */ \
addl $4,%esp
-#else
+/* protects cpl updates as a critical region */
+#define CPL_LOCK \
+ pushl $_cpl_lock ; /* address of lock */ \
+ call _s_lock ; /* MP-safe */ \
+ addl $4,%esp
+
+#define CPL_UNLOCK \
+ pushl $_cpl_lock ; /* address of lock */ \
+ call _s_unlock ; /* MP-safe */ \
+ addl $4,%esp
-#define MP_INSTR_LOCK /* NOP */
-#define IMASK_LOCK /* NOP */
-#define IMASK_UNLOCK /* NOP */
+#else /* SMP */
+
+#define MPLOCKED /* NOP */
+
+#define FPU_LOCK /* NOP */
+#define ALIGN_LOCK /* NOP */
+#define SYSCALL_LOCK /* NOP */
+#define ALTSYSCALL_LOCK /* NOP */
#endif /* SMP */
@@ -160,10 +178,8 @@ IDTVEC(fpu)
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
-#ifdef SMP
- call _get_fpu_lock
-#endif /* SMP */
- incl _cnt+V_TRAP
+ MPLOCKED incl _cnt+V_TRAP
+ FPU_LOCK
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
call _npxintr
@@ -188,11 +204,9 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
-#ifdef SMP
- call _get_align_lock
-#endif /* SMP */
+ ALIGN_LOCK
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
- incl _cnt+V_TRAP
+ MPLOCKED incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@@ -244,10 +258,8 @@ IDTVEC(syscall)
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
-#ifdef SMP
- call _get_syscall_lock
-#endif /* SMP */
- incl _cnt+V_SYSCALL
+ SYSCALL_LOCK
+ MPLOCKED incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
@@ -273,10 +285,8 @@ IDTVEC(int0x80_syscall)
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
-#ifdef SMP
- call _get_int0x80_syscall_lock
-#endif /* SMP */
- incl _cnt+V_SYSCALL
+ ALTSYSCALL_LOCK
+ MPLOCKED incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index ce107d9..e5fb310 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.40 1997/08/09 23:01:03 fsmp Exp $
+ * $Id: mp_machdep.c,v 1.41 1997/08/10 19:32:38 fsmp Exp $
*/
#include "opt_smp.h"
@@ -1418,6 +1418,10 @@ default_mp_table(int type)
/*
* initialize all the SMP locks
*/
+
+/* lock the com (tty) data structures */
+struct simplelock com_lock;
+
static void
init_locks(void)
{
@@ -1427,8 +1431,23 @@ init_locks(void)
*/
mp_lock = 0x00000001;
+ /* ISR uses its own "giant lock" */
+ isr_lock = 0x00000000;
+
+ /* serializes FAST_INTR() accesses */
+ s_lock_init((struct simplelock*)&fast_intr_lock);
+
+ /* serializes INTR() accesses */
+ s_lock_init((struct simplelock*)&intr_lock);
+
/* locks the IO APIC and apic_imen accesses */
s_lock_init((struct simplelock*)&imen_lock);
+
+ /* locks cpl accesses */
+ s_lock_init((struct simplelock*)&cpl_lock);
+
+ /* locks com (tty) data/hardware accesses: a FASTINTR() */
+ s_lock_init((struct simplelock*)&com_lock);
}
diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c
index ce107d9..e5fb310 100644
--- a/sys/amd64/amd64/mptable.c
+++ b/sys/amd64/amd64/mptable.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.40 1997/08/09 23:01:03 fsmp Exp $
+ * $Id: mp_machdep.c,v 1.41 1997/08/10 19:32:38 fsmp Exp $
*/
#include "opt_smp.h"
@@ -1418,6 +1418,10 @@ default_mp_table(int type)
/*
* initialize all the SMP locks
*/
+
+/* lock the com (tty) data structures */
+struct simplelock com_lock;
+
static void
init_locks(void)
{
@@ -1427,8 +1431,23 @@ init_locks(void)
*/
mp_lock = 0x00000001;
+ /* ISR uses its own "giant lock" */
+ isr_lock = 0x00000000;
+
+ /* serializes FAST_INTR() accesses */
+ s_lock_init((struct simplelock*)&fast_intr_lock);
+
+ /* serializes INTR() accesses */
+ s_lock_init((struct simplelock*)&intr_lock);
+
/* locks the IO APIC and apic_imen accesses */
s_lock_init((struct simplelock*)&imen_lock);
+
+ /* locks cpl accesses */
+ s_lock_init((struct simplelock*)&cpl_lock);
+
+ /* locks com (tty) data/hardware accesses: a FASTINTR() */
+ s_lock_init((struct simplelock*)&com_lock);
}
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 72df964..de901c0 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
- * $Id: trap.c,v 1.104 1997/08/12 19:07:42 dyson Exp $
+ * $Id: trap.c,v 1.1 1997/08/19 20:26:01 smp Exp smp $
*/
/*
@@ -778,6 +778,14 @@ trap_fatal(frame)
printf("bio ");
if (cpl == 0)
printf("none");
+#ifdef SMP
+/**
+ * XXX FIXME:
+ * we probably SHOULD have stopped the other CPUs before now!
+ * another CPU COULD have been touching cpl at this moment...
+ */
+ printf(" <- SMP: XXX");
+#endif
printf("\n");
#ifdef KDB
diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h
index ce107d9..e5fb310 100644
--- a/sys/amd64/include/mptable.h
+++ b/sys/amd64/include/mptable.h
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.40 1997/08/09 23:01:03 fsmp Exp $
+ * $Id: mp_machdep.c,v 1.41 1997/08/10 19:32:38 fsmp Exp $
*/
#include "opt_smp.h"
@@ -1418,6 +1418,10 @@ default_mp_table(int type)
/*
* initialize all the SMP locks
*/
+
+/* lock the com (tty) data structures */
+struct simplelock com_lock;
+
static void
init_locks(void)
{
@@ -1427,8 +1431,23 @@ init_locks(void)
*/
mp_lock = 0x00000001;
+ /* ISR uses its own "giant lock" */
+ isr_lock = 0x00000000;
+
+ /* serializes FAST_INTR() accesses */
+ s_lock_init((struct simplelock*)&fast_intr_lock);
+
+ /* serializes INTR() accesses */
+ s_lock_init((struct simplelock*)&intr_lock);
+
/* locks the IO APIC and apic_imen accesses */
s_lock_init((struct simplelock*)&imen_lock);
+
+ /* locks cpl accesses */
+ s_lock_init((struct simplelock*)&cpl_lock);
+
+ /* locks com (tty) data/hardware accesses: a FASTINTR() */
+ s_lock_init((struct simplelock*)&com_lock);
}
OpenPOWER on IntegriCloud