summaryrefslogtreecommitdiffstats
path: root/sys/i386/isa
diff options
context:
space:
mode:
authortegge <tegge@FreeBSD.org>1998-03-03 22:56:30 +0000
committertegge <tegge@FreeBSD.org>1998-03-03 22:56:30 +0000
commit9f3982f0f6d7493912022ff7b37436e9d976fb84 (patch)
tree2f6bf35c42516c9c0cdb317a15559bbb0da91450 /sys/i386/isa
parentbeae57c5b35bad7c8aa9705208f9552264588380 (diff)
downloadFreeBSD-src-9f3982f0f6d7493912022ff7b37436e9d976fb84.zip
FreeBSD-src-9f3982f0f6d7493912022ff7b37436e9d976fb84.tar.gz
When entering the apic version of slow interrupt handler, level
interrupts are masked, and EOI is sent iff the corresponding ISR bit is set in the local apic. If the CPU cannot obtain the interrupt service lock (currently the global kernel lock) the interrupt is forwarded to the CPU holding that lock. Clock interrupts now have higher priority than other slow interrupts.
Diffstat (limited to 'sys/i386/isa')
-rw-r--r--sys/i386/isa/apic_ipl.s5
-rw-r--r--sys/i386/isa/apic_vector.s343
-rw-r--r--sys/i386/isa/intr_machdep.c38
-rw-r--r--sys/i386/isa/intr_machdep.h6
-rw-r--r--sys/i386/isa/ipl.s26
-rw-r--r--sys/i386/isa/nmi.c38
6 files changed, 389 insertions, 67 deletions
diff --git a/sys/i386/isa/apic_ipl.s b/sys/i386/isa/apic_ipl.s
index 7635445..bc38b47 100644
--- a/sys/i386/isa/apic_ipl.s
+++ b/sys/i386/isa/apic_ipl.s
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: apic_ipl.s,v 1.16 1997/09/07 22:02:28 fsmp Exp $
+ * $Id: apic_ipl.s,v 1.17 1997/12/15 02:18:33 tegge Exp $
*/
@@ -195,6 +195,7 @@ _vec8254:
lock /* MP-safe */
andl %eax, iactive
MEXITCOUNT
+ APIC_ITRACE(apic_itrace_splz, 0, APIC_ITRACE_SPLZ)
movl _Xintr8254, %eax
jmp %eax /* XXX might need _Xfastintr# */
@@ -212,6 +213,7 @@ vec8:
lock /* MP-safe */
andl $~IRQ_BIT(8), iactive /* lazy masking */
MEXITCOUNT
+ APIC_ITRACE(apic_itrace_splz, 8, APIC_ITRACE_SPLZ)
jmp _Xintr8 /* XXX might need _Xfastintr8 */
/*
@@ -229,6 +231,7 @@ __CONCAT(vec,irq_num): ; \
lock ; /* MP-safe */ \
andl $~IRQ_BIT(irq_num), iactive ; /* lazy masking */ \
MEXITCOUNT ; \
+ APIC_ITRACE(apic_itrace_splz, irq_num, APIC_ITRACE_SPLZ) ; \
jmp __CONCAT(_Xintr,irq_num)
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index 4603440..05cf190 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.25 1998/01/15 07:33:58 gibbs Exp $
+ * $Id: apic_vector.s,v 1.26 1998/03/03 20:55:24 tegge Exp $
*/
@@ -166,43 +166,64 @@ IDTVEC(vec_name) ; \
popal ; \
addl $4+4,%esp
-/*
- * Test to see whether we are handling an edge or level triggered INT.
- * Level-triggered INTs must still be masked as we don't clear the source,
- * and the EOI cycle would cause redundant INTs to occur.
- */
-#define MASK_LEVEL_IRQ(irq_num) \
- testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
- jz 8f ; /* edge, don't mask */ \
+#define MASK_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
+ testl $IRQ_BIT(irq_num), _apic_imen ; \
+ jne 7f ; /* masked, don't mask */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
- IMASK_UNLOCK ; \
-8:
+7: ; /* already masked */ \
+ IMASK_UNLOCK
+/*
+ * Test to see whether we are handling an edge or level triggered INT.
+ * Level-triggered INTs must still be masked as we don't clear the source,
+ * and the EOI cycle would cause redundant INTs to occur.
+ */
+#define MASK_LEVEL_IRQ(irq_num) \
+ testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
+ jz 9f ; /* edge, don't mask */ \
+ MASK_IRQ(irq_num) ; \
+9:
+
+#ifdef APIC_INTR_REORDER
+#define EOI_IRQ(irq_num) \
+ movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
+ movl (%eax), %eax ; \
+ testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
+ jz 9f ; /* not active */ \
+ movl $0, lapic_eoi ; \
+ APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
+9:
+
+#else
+#define EOI_IRQ(irq_num) \
+ testl $IRQ_BIT(irq_num), lapic_isr1; \
+ jz 9f ; /* not active */ \
+ movl $0, lapic_eoi; \
+ APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
+9:
+#endif
+
+
/*
* Test to see if the source is currntly masked, clear if so.
*/
#define UNMASK_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_imen ; \
- jne 7f ; /* bit set, masked */ \
- testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
- jz 9f ; /* edge, don't EOI */ \
- movl $0, lapic_eoi ; /* should be safe */ \
- jmp 9f ; /* skip unmasking */ \
-7: \
+ je 7f ; /* bit clear, not masked */ \
andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
movl _ioapic,%ecx ; /* ioapic[0]addr */ \
movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
-9: ; \
+7: ; \
IMASK_UNLOCK
#ifdef INTR_SIMPLELOCK
@@ -213,11 +234,75 @@ IDTVEC(vec_name) ; \
#define ENLOCK \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
- jz 1f
+ jz 3f
#define DELOCK ISR_RELLOCK
#define LATELOCK
#endif
+#ifdef APIC_INTR_DIAGNOSTIC
+#ifdef APIC_INTR_DIAGNOSTIC_IRQ
+log_intr_event:
+ pushf
+ cli
+ pushl $CNAME(apic_itrace_debuglock)
+ call _s_lock_np
+ addl $4, %esp
+ movl CNAME(apic_itrace_debugbuffer_idx), %ecx
+ andl $32767, %ecx
+ movl _cpuid, %eax
+ shll $8, %eax
+ orl 8(%esp), %eax
+ movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
+ incl %ecx
+ andl $32767, %ecx
+ movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
+ pushl $CNAME(apic_itrace_debuglock)
+ call _s_unlock_np
+ addl $4, %esp
+ popf
+ ret
+
+
+#define APIC_ITRACE(name, irq_num, id) \
+ lock ; /* MP-safe */ \
+ incl CNAME(name) + (irq_num) * 4 ; \
+ pushl %eax ; \
+ pushl %ecx ; \
+ pushl %edx ; \
+ movl $(irq_num), %eax ; \
+ cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
+ jne 7f ; \
+ pushl $id ; \
+ call log_intr_event ; \
+ addl $4, %esp ; \
+7: ; \
+ popl %edx ; \
+ popl %ecx ; \
+ popl %eax
+#else
+#define APIC_ITRACE(name, irq_num, id) \
+ lock ; /* MP-safe */ \
+ incl CNAME(name) + (irq_num) * 4
+#endif
+
+#define APIC_ITRACE_ENTER 1
+#define APIC_ITRACE_EOI 2
+#define APIC_ITRACE_TRYISRLOCK 3
+#define APIC_ITRACE_GOTISRLOCK 4
+#define APIC_ITRACE_ENTER2 5
+#define APIC_ITRACE_LEAVE 6
+#define APIC_ITRACE_UNMASK 7
+#define APIC_ITRACE_ACTIVE 8
+#define APIC_ITRACE_MASKED 9
+#define APIC_ITRACE_NOISRLOCK 10
+#define APIC_ITRACE_MASKED2 11
+#define APIC_ITRACE_SPLZ 12
+#define APIC_ITRACE_DORETI 13
+
+#else
+#define APIC_ITRACE(name, irq_num, id)
+#endif
+
#ifdef CPL_AND_CML
#define INTR(irq_num, vec_name) \
@@ -230,12 +315,18 @@ IDTVEC(vec_name) ; \
movl %ax, %ds ; \
movl %ax, %es ; \
; \
+ APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
lock ; /* MP-safe */ \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
+ MASK_LEVEL_IRQ(irq_num) ; \
+ EOI_IRQ(irq_num) ; \
+0: ; \
+ APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
ENLOCK ; \
; \
+ APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; /* this INT masked */ \
@@ -244,7 +335,6 @@ IDTVEC(vec_name) ; \
orl $IRQ_BIT(irq_num), _cil ; \
AVCPL_UNLOCK ; \
; \
-;;; movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
@@ -263,39 +353,67 @@ __CONCAT(Xresume,irq_num): ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
incl _inside_intr ; \
+ APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
+ APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
decl _inside_intr ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
lock ; andl $~IRQ_BIT(irq_num), _cil ; \
UNMASK_IRQ(irq_num) ; \
+ APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
LATELOCK ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
-1: ; /* active or locked */ \
- MASK_LEVEL_IRQ(irq_num) ; \
- movl $0, lapic_eoi ; /* do the EOI */ \
-; \
+1: ; /* active */ \
+ APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
-; \
+ MASK_IRQ(irq_num) ; \
+ EOI_IRQ(irq_num) ; \
+ btsl $(irq_num), iactive ; /* still active */ \
+ jnc 0b ; /* retry */ \
POP_FRAME ; \
iret ; \
; \
ALIGN_TEXT ; \
2: ; /* masked by cpl|cml */ \
+ APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
+ orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
DELOCK ; /* XXX this is going away... */ \
- jmp 1b
+ POP_FRAME ; \
+ iret ; \
+ ALIGN_TEXT ; \
+3: ; /* other cpu has isr lock */ \
+ APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
+ AVCPL_LOCK ; /* MP-safe */ \
+ orl $IRQ_BIT(irq_num), _ipending ; \
+ testl $IRQ_BIT(irq_num), _cpl ; \
+ jne 4f ; /* this INT masked */ \
+ testl $IRQ_BIT(irq_num), _cml ; \
+ jne 4f ; /* this INT masked */ \
+ orl $IRQ_BIT(irq_num), _cil ; \
+ AVCPL_UNLOCK ; \
+ call forward_irq ; /* forward irq to lock holder */ \
+ POP_FRAME ; /* and return */ \
+ iret ; \
+ ALIGN_TEXT ; \
+4: ; /* blocked */ \
+ APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
+ AVCPL_UNLOCK ; \
+ POP_FRAME ; /* and return */ \
+ iret
#else /* CPL_AND_CML */
+
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
@@ -306,20 +424,25 @@ IDTVEC(vec_name) ; \
movl %ax, %ds ; \
movl %ax, %es ; \
; \
+ APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
lock ; /* MP-safe */ \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
+ MASK_LEVEL_IRQ(irq_num) ; \
+ EOI_IRQ(irq_num) ; \
+0: ; \
+ APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
- jz 1f ; /* no */ \
+ jz 3f ; /* no */ \
; \
+ APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; /* this INT masked */ \
AVCPL_UNLOCK ; \
; \
-;;; movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
@@ -334,36 +457,60 @@ __CONCAT(Xresume,irq_num): ; \
pushl %eax ; \
orl _intr_mask + (irq_num) * 4, %eax ; \
movl %eax, _cpl ; \
+ andl $~IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
+ APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
+ APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
UNMASK_IRQ(irq_num) ; \
+ APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
-1: ; /* active or locked */ \
- MASK_LEVEL_IRQ(irq_num) ; \
- movl $0, lapic_eoi ; /* do the EOI */ \
-; \
+1: ; /* active */ \
+ APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
-; \
+ MASK_IRQ(irq_num) ; \
+ EOI_IRQ(irq_num) ; \
+ btsl $(irq_num), iactive ; /* still active */ \
+ jnc 0b ; /* retry */ \
POP_FRAME ; \
- iret ; \
-; \
+ iret ; /* XXX: iactive bit might be 0 now */ \
ALIGN_TEXT ; \
-2: ; /* masked by cpl */ \
+2: ; /* masked by cpl, leave iactive set */ \
+ APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
+ orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
- jmp 1b
+ POP_FRAME ; \
+ iret ; \
+ ALIGN_TEXT ; \
+3: ; /* other cpu has isr lock */ \
+ APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
+ AVCPL_LOCK ; /* MP-safe */ \
+ orl $IRQ_BIT(irq_num), _ipending ; \
+ testl $IRQ_BIT(irq_num), _cpl ; \
+ jne 4f ; /* this INT masked */ \
+ AVCPL_UNLOCK ; \
+ call forward_irq ; /* forward irq to lock holder */ \
+ POP_FRAME ; /* and return */ \
+ iret ; \
+ ALIGN_TEXT ; \
+4: ; /* blocked */ \
+ APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
+ AVCPL_UNLOCK ; \
+ POP_FRAME ; /* and return */ \
+ iret
#endif /* CPL_AND_CML */
@@ -515,6 +662,8 @@ _Xcpuast:
movl _cpl, %eax
#endif
pushl %eax
+ lock
+ orl $SWI_AST_PENDING, _ipending
AVCPL_UNLOCK
lock
incb _intr_nesting_level
@@ -522,9 +671,6 @@ _Xcpuast:
pushl $0
- lock
- orl $SWI_AST_PENDING, _ipending
-
movl _cpuid, %eax
lock
btrl %eax, _checkstate_pending_ast
@@ -536,6 +682,113 @@ _Xcpuast:
POP_FRAME
iret
+
+/*
+ * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
+ */
+
+ .text
+ SUPERALIGN_TEXT
+ .globl _Xforward_irq
+_Xforward_irq:
+ PUSH_FRAME
+ movl $KDSEL, %eax
+ movl %ax, %ds /* use KERNEL data segment */
+ movl %ax, %es
+
+ movl $0, lapic_eoi /* End Of Interrupt to APIC */
+
+ FAKE_MCOUNT(12*4(%esp))
+
+ ISR_TRYLOCK
+ testl %eax,%eax /* Did we get the lock ? */
+ jz 1f /* No */
+
+ lock
+ incl CNAME(forward_irq_hitcnt)
+ cmpb $4, _intr_nesting_level
+ jae 2f
+
+ jmp 3f
+
+ AVCPL_LOCK
+#ifdef CPL_AND_CML
+ movl _cml, %eax
+#else
+ movl _cpl, %eax
+#endif
+ pushl %eax
+ AVCPL_UNLOCK
+ lock
+ incb _intr_nesting_level
+ sti
+
+ pushl $0
+
+ MEXITCOUNT
+ jmp _doreti /* Handle forwarded interrupt */
+4:
+ lock
+ decb _intr_nesting_level
+ ISR_RELLOCK
+ MEXITCOUNT
+ addl $8, %esp
+ POP_FRAME
+ iret
+1:
+ lock
+ incl CNAME(forward_irq_misscnt)
+ call forward_irq /* Oops, we've lost the isr lock */
+ MEXITCOUNT
+ POP_FRAME
+ iret
+2:
+ lock
+ incl CNAME(forward_irq_toodeepcnt)
+3:
+ ISR_RELLOCK
+ MEXITCOUNT
+ POP_FRAME
+ iret
+
+/*
+ *
+ */
+forward_irq:
+ MCOUNT
+ cmpl $0,_invltlb_ok
+ jz 4f
+
+ cmpl $0, CNAME(forward_irq_enabled)
+ jz 4f
+
+ movl _mp_lock,%eax
+ cmpl $FREE_LOCK,%eax
+ jne 1f
+ movl $0, %eax /* Pick CPU #0 if noone has lock */
+1:
+ shrl $24,%eax
+ movl _cpu_num_to_apic_id(,%eax,4),%ecx
+ shll $24,%ecx
+ movl lapic_icr_hi, %eax
+ andl $~APIC_ID_MASK, %eax
+ orl %ecx, %eax
+ movl %eax, lapic_icr_hi
+
+2:
+ movl lapic_icr_lo, %eax
+ andl $APIC_DELSTAT_MASK,%eax
+ jnz 2b
+ movl lapic_icr_lo, %eax
+ andl $APIC_RESV2_MASK, %eax
+ orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
+ movl %eax, lapic_icr_lo
+3:
+ movl lapic_icr_lo, %eax
+ andl $APIC_DELSTAT_MASK,%eax
+ jnz 3b
+4:
+ ret
/*
* Executed by a CPU when it receives an Xcpustop IPI from another CPU,
@@ -702,6 +955,16 @@ _checkstate_need_ast:
.long 0
_checkstate_pending_ast:
.long 0
+ .globl CNAME(forward_irq_misscnt)
+ .globl CNAME(forward_irq_toodeepcnt)
+ .globl CNAME(forward_irq_hitcnt)
+CNAME(forward_irq_misscnt):
+ .long 0
+CNAME(forward_irq_hitcnt):
+ .long 0
+CNAME(forward_irq_toodeepcnt):
+ .long 0
+
.globl _apic_pin_trigger
_apic_pin_trigger:
diff --git a/sys/i386/isa/intr_machdep.c b/sys/i386/isa/intr_machdep.c
index 4a593ea7..0cdbfc0 100644
--- a/sys/i386/isa/intr_machdep.c
+++ b/sys/i386/isa/intr_machdep.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa.c 7.2 (Berkeley) 5/13/91
- * $Id: intr_machdep.c,v 1.7 1997/09/28 15:48:34 mckay Exp $
+ * $Id: intr_machdep.c,v 1.8 1998/02/09 06:08:30 eivind Exp $
*/
#include "opt_auto_eoi.h"
@@ -444,19 +444,30 @@ icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
vector = TPR_FAST_INTS + intr;
setidt(vector, fastintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-
- /*
- * XXX MULTIPLE_IOAPICSXXX
- * Reprogram the vector in the IO APIC.
- */
- select = (intr * 2) + IOAPIC_REDTBL0;
- value = io_apic_read(0, select) & ~IOART_INTVEC;
- io_apic_write(0, select, value | vector);
}
- else
- setidt(TPR_SLOW_INTS + intr, slowintr[intr],
+ else {
+ vector = TPR_SLOW_INTS + intr;
+#ifdef APIC_INTR_REORDER
+#ifdef APIC_INTR_HIGHPRI_CLOCK
+ /* XXX: Hack (kludge?) for more accurate clock. */
+ if (intr == 0 || intr == 8) {
+ vector = TPR_FAST_INTS + intr;
+ }
+#endif
+#endif
+ setidt(vector, slowintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-
+ }
+#ifdef APIC_INTR_REORDER
+ set_lapic_isrloc(intr, vector);
+#endif
+ /*
+ * XXX MULTIPLE_IOAPICSXXX
+ * Reprogram the vector in the IO APIC.
+ */
+ select = (intr * 2) + IOAPIC_REDTBL0;
+ value = io_apic_read(0, select) & ~IOART_INTVEC;
+ io_apic_write(0, select, value | vector);
#else
setidt(ICU_OFFSET + intr,
flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
@@ -505,6 +516,9 @@ icu_unset(intr, handler)
setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
#else /* FAST_HI */
+#ifdef APIC_INTR_REORDER
+ set_lapic_isrloc(intr, ICU_OFFSET + intr);
+#endif
setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */
diff --git a/sys/i386/isa/intr_machdep.h b/sys/i386/isa/intr_machdep.h
index 7714c0d..97122e6 100644
--- a/sys/i386/isa/intr_machdep.h
+++ b/sys/i386/isa/intr_machdep.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa_device.h 7.1 (Berkeley) 5/9/91
- * $Id: intr_machdep.h,v 1.9 1998/02/13 06:59:22 bde Exp $
+ * $Id: intr_machdep.h,v 1.10 1998/03/03 20:55:24 tegge Exp $
*/
#ifndef _I386_ISA_INTR_MACHDEP_H_
@@ -116,6 +116,9 @@
/* IPI to generate an additional software trap at the target CPU */
#define XCPUAST_OFFSET (ICU_OFFSET + 48)
+/* IPI to signal the CPU holding the ISR lock that another IRQ has appeared */
+#define XFORWARD_IRQ_OFFSET (ICU_OFFSET + 49)
+
/* IPI to signal CPUs to stop and wait for another CPU to restart them */
#define XCPUSTOP_OFFSET (ICU_OFFSET + 128)
@@ -174,6 +177,7 @@ inthand_t
Xcpucheckstate, /* Check cpu state */
#endif
Xcpuast, /* Additional software trap on other cpu */
+ Xforward_irq, /* Forward irq to cpu holding ISR lock */
Xcpustop, /* CPU stops & waits for another CPU to restart it */
Xspuriousint; /* handle APIC "spurious INTs" */
diff --git a/sys/i386/isa/ipl.s b/sys/i386/isa/ipl.s
index 38e6934..bd586ee 100644
--- a/sys/i386/isa/ipl.s
+++ b/sys/i386/isa/ipl.s
@@ -36,7 +36,7 @@
*
* @(#)ipl.s
*
- * $Id: ipl.s,v 1.18 1997/10/13 00:01:53 fsmp Exp $
+ * $Id: ipl.s,v 1.19 1997/12/15 02:18:35 tegge Exp $
*/
@@ -263,6 +263,9 @@ doreti_unpend:
cli
#ifdef SMP
pushl %edx /* preserve %edx */
+#ifdef APIC_INTR_DIAGNOSTIC
+ pushl %ecx
+#endif
pushl %eax /* preserve %eax */
ICPL_LOCK
#ifdef CPL_AND_CML
@@ -271,11 +274,32 @@ doreti_unpend:
popl _cpl
#endif
FAST_ICPL_UNLOCK
+#ifdef APIC_INTR_DIAGNOSTIC
+ popl %ecx
+#endif
popl %edx
#else
movl %eax,_cpl
#endif
MEXITCOUNT
+#ifdef APIC_INTR_DIAGNOSTIC
+ lock
+ incl CNAME(apic_itrace_doreti)(,%ecx,4)
+#ifdef APIC_INTR_DIAGNOSTIC_IRQ
+ cmpl $APIC_INTR_DIAGNOSTIC_IRQ,%ecx
+ jne 9f
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushl $APIC_ITRACE_DORETI
+ call log_intr_event
+ addl $4,%esp
+ popl %edx
+ popl %ecx
+ popl %eax
+9:
+#endif
+#endif
jmp %edx
ALIGN_TEXT
diff --git a/sys/i386/isa/nmi.c b/sys/i386/isa/nmi.c
index 4a593ea7..0cdbfc0 100644
--- a/sys/i386/isa/nmi.c
+++ b/sys/i386/isa/nmi.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa.c 7.2 (Berkeley) 5/13/91
- * $Id: intr_machdep.c,v 1.7 1997/09/28 15:48:34 mckay Exp $
+ * $Id: intr_machdep.c,v 1.8 1998/02/09 06:08:30 eivind Exp $
*/
#include "opt_auto_eoi.h"
@@ -444,19 +444,30 @@ icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
vector = TPR_FAST_INTS + intr;
setidt(vector, fastintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-
- /*
- * XXX MULTIPLE_IOAPICSXXX
- * Reprogram the vector in the IO APIC.
- */
- select = (intr * 2) + IOAPIC_REDTBL0;
- value = io_apic_read(0, select) & ~IOART_INTVEC;
- io_apic_write(0, select, value | vector);
}
- else
- setidt(TPR_SLOW_INTS + intr, slowintr[intr],
+ else {
+ vector = TPR_SLOW_INTS + intr;
+#ifdef APIC_INTR_REORDER
+#ifdef APIC_INTR_HIGHPRI_CLOCK
+ /* XXX: Hack (kludge?) for more accurate clock. */
+ if (intr == 0 || intr == 8) {
+ vector = TPR_FAST_INTS + intr;
+ }
+#endif
+#endif
+ setidt(vector, slowintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-
+ }
+#ifdef APIC_INTR_REORDER
+ set_lapic_isrloc(intr, vector);
+#endif
+ /*
+ * XXX MULTIPLE_IOAPICSXXX
+ * Reprogram the vector in the IO APIC.
+ */
+ select = (intr * 2) + IOAPIC_REDTBL0;
+ value = io_apic_read(0, select) & ~IOART_INTVEC;
+ io_apic_write(0, select, value | vector);
#else
setidt(ICU_OFFSET + intr,
flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
@@ -505,6 +516,9 @@ icu_unset(intr, handler)
setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
#else /* FAST_HI */
+#ifdef APIC_INTR_REORDER
+ set_lapic_isrloc(intr, ICU_OFFSET + intr);
+#endif
setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */
OpenPOWER on IntegriCloud