summaryrefslogtreecommitdiffstats
path: root/sys/i386/isa/apic_vector.s
diff options
context:
space:
mode:
authorfsmp <fsmp@FreeBSD.org>1997-08-20 05:25:48 +0000
committerfsmp <fsmp@FreeBSD.org>1997-08-20 05:25:48 +0000
commit2c414e3eff799c18e5d3e8edfe37d7dc71f35c7a (patch)
tree4060f694285dcb8b0a5d572747820da61322b0df /sys/i386/isa/apic_vector.s
parentea72b8e976791b005527662091bc146ad0bfd3b6 (diff)
downloadFreeBSD-src-2c414e3eff799c18e5d3e8edfe37d7dc71f35c7a.zip
FreeBSD-src-2c414e3eff799c18e5d3e8edfe37d7dc71f35c7a.tar.gz
Preperation for moving cpl into critical region access.
Several new fine-grained locks. New FAST_INTR() methods: - separate simplelock for FAST_INTR, no more giant lock. - FAST_INTR()s no longer checks ipending on way out of ISR. sio made MP-safe (I hope).
Diffstat (limited to 'sys/i386/isa/apic_vector.s')
-rw-r--r--sys/i386/isa/apic_vector.s80
1 files changed, 72 insertions, 8 deletions
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index f73ddc5..f2a8bce 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
+ * $Id: apic_vector.s,v 1.15 1997/08/10 20:58:57 fsmp Exp $
*/
@@ -11,12 +11,41 @@
#include "i386/isa/intr_machdep.h"
+#ifdef FAST_SIMPLELOCK
+
+#define GET_FAST_INTR_LOCK \
+ pushl $_fast_intr_lock ; /* address of lock */ \
+ call _s_lock ; /* MP-safe */ \
+ addl $4,%esp
+
+#define REL_FAST_INTR_LOCK \
+ pushl $_fast_intr_lock ; /* address of lock */ \
+ call _s_unlock ; /* MP-safe */ \
+ addl $4,%esp
+
+#else /* FAST_SIMPLELOCK */
+
+#define GET_FAST_INTR_LOCK \
+ call _get_isrlock
+#define REL_FAST_INTR_LOCK \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp
+
+#endif /* FAST_SIMPLELOCK */
+
+#define REL_ISR_LOCK \
+ pushl $_mp_lock ; /* GIANT_LOCK */ \
+ call _MPrellock ; \
+ add $4, %esp
+
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
/* make an index into the IO APIC from the IRQ# */
#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
+
/*
* 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
*/
@@ -115,6 +144,8 @@
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
+#ifdef FAST_WITHOUTCPL
+
#define FAST_INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
@@ -128,11 +159,45 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
- call _get_isrlock ; \
+ GET_FAST_INTR_LOCK ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
+ addl $4, %esp ; \
movl $0, lapic_eoi ; \
+ lock ; \
+ incl _cnt+V_INTR ; /* book-keeping can wait */ \
+ movl _intr_countp + (irq_num) * 4, %eax ; \
+ lock ; \
+ incl (%eax) ; \
+ MEXITCOUNT ; \
+ REL_FAST_INTR_LOCK ; \
+ MAYBE_POPL_ES ; \
+ popl %ds ; \
+ popl %edx ; \
+ popl %ecx ; \
+ popl %eax ; \
+ iret
+
+#else
+
+#define FAST_INTR(irq_num, vec_name) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ pushl %eax ; /* save only call-used registers */ \
+ pushl %ecx ; \
+ pushl %edx ; \
+ pushl %ds ; \
+ MAYBE_PUSHL_ES ; \
+ movl $KDSEL,%eax ; \
+ movl %ax,%ds ; \
+ MAYBE_MOVW_AX_ES ; \
+ FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
+ GET_FAST_INTR_LOCK ; \
+ pushl _intr_unit + (irq_num) * 4 ; \
+ call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4,%esp ; \
+ movl $0, lapic_eoi ; \
incl _cnt+V_INTR ; /* book-keeping can wait */ \
movl _intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
@@ -142,9 +207,7 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
- pushl $_mp_lock ; /* GIANT_LOCK */ \
- call _MPrellock ; \
- add $4, %esp ; \
+ REL_FAST_INTR_LOCK ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@@ -178,6 +241,9 @@ IDTVEC(vec_name) ; \
MEXITCOUNT ; \
jmp _doreti
+#endif /** FAST_WITHOUTCPL */
+
+
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
@@ -217,9 +283,7 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
- pushl $_mp_lock ; /* GIANT_LOCK */ \
- call _MPrellock ; \
- add $4, %esp ; \
+ REL_ISR_LOCK ; \
popl %es ; \
popl %ds ; \
popal ; \
OpenPOWER on IntegriCloud