summaryrefslogtreecommitdiffstats
path: root/sys/i386/isa/vector.s
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>1997-04-26 11:46:25 +0000
committerpeter <peter@FreeBSD.org>1997-04-26 11:46:25 +0000
commit6323aa10bffe459912ba8b2f8592c7ac4ffd8705 (patch)
treebf48960e09e26f0de373de093c89322724bbdd64 /sys/i386/isa/vector.s
parent96efe480c0c091aecb2f359675c74aca30f36a4a (diff)
downloadFreeBSD-src-6323aa10bffe459912ba8b2f8592c7ac4ffd8705.zip
FreeBSD-src-6323aa10bffe459912ba8b2f8592c7ac4ffd8705.tar.gz
Man the liferafts! Here comes the long awaited SMP -> -current merge!
There are various options documented in i386/conf/LINT, there is more to come over the next few days. The kernel should run pretty much "as before" without the options to activate SMP mode. There are a handful of known "loose ends" that need to be fixed, but have been put off since the SMP kernel is in a moderately good condition at the moment. This commit is the result of the tinkering and testing over the last 14 months by many people. A special thanks to Steve Passe for implementing the APIC code!
Diffstat (limited to 'sys/i386/isa/vector.s')
-rw-r--r--sys/i386/isa/vector.s291
1 files changed, 280 insertions, 11 deletions
diff --git a/sys/i386/isa/vector.s b/sys/i386/isa/vector.s
index fa2fd42..270ae20 100644
--- a/sys/i386/isa/vector.s
+++ b/sys/i386/isa/vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id$
+ * $Id: vector.s,v 1.25 1997/02/22 09:37:23 peter Exp $
*/
/*
@@ -8,6 +8,12 @@
*/
#include "opt_auto_eoi.h"
+#include "opt_smp.h"
+
+#if defined(SMP)
+#include <machine/smpasm.h> /* this includes <machine/apic.h> */
+#include <machine/smptests.h> /** TEST_CPUHITS */
+#endif /* SMP */
#include <i386/isa/icu.h>
#ifdef PC98
@@ -22,6 +28,111 @@
#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
#endif
+
+#if defined(SMP)
+
+#define GET_MPLOCK call _get_mplock
+#define REL_MPLOCK call _rel_mplock
+
+#else
+
+#define GET_MPLOCK /* NOP get Kernel Mutex */
+#define REL_MPLOCK /* NOP release mutex */
+
+#endif /* SMP */
+
+
+#if defined(APIC_IO)
+
+#if defined(SMP) && defined(TEST_CPUHITS)
+
+#undef GET_MPLOCK
+#define GET_MPLOCK \
+ call _get_mplock ; \
+ GETCPUID(%eax) ; \
+ incl _cpuhits(,%eax,4)
+
+#endif /* SMP && TEST_CPUHITS */
+
+#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
+#define IRQ_BIT(irq_num) (1 << (irq_num))
+
+#define ENABLE_APIC \
+ movl _apic_base, %eax ; \
+ movl $0, APIC_EOI(%eax)
+
+#define ENABLE_ICU1 ENABLE_APIC
+#define ENABLE_ICU1_AND_2 ENABLE_APIC
+
+#define MASK_IRQ(irq_num,icu) \
+ orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \
+ movl _io_apic_base,%ecx ; /* io apic addr */ \
+ movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \
+ movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
+ orl $IOART_INTMASK,%eax ; /* set the mask */ \
+ movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */
+
+#define UNMASK_IRQ(irq_num,icu) \
+ andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \
+ movl _io_apic_base,%ecx ; /* io apic addr */ \
+ movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \
+ movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
+ andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
+ movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */
+
+#define TEST_IRQ(irq_num,reg) \
+ testl $IRQ_BIT(irq_num),%eax
+
+#define SET_IPENDING(irq_num) \
+ orl $IRQ_BIT(irq_num),_ipending
+
+/*
+ * 'lazy masking' code submitted by: Bruce Evans <bde@zeta.org.au>
+ */
+#define MAYBE_MASK_IRQ(irq_num,icu) \
+ testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \
+ je 1f ; /* NOT currently active */ \
+ MASK_IRQ(irq_num,icu) ; \
+ ENABLE_APIC ; \
+ SET_IPENDING(irq_num) ; \
+ REL_MPLOCK ; /* SMP release global lock */ \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp ; \
+ iret ; \
+; \
+ ALIGN_TEXT ; \
+1: ; \
+ orl $IRQ_BIT(irq_num),iactive
+
+#define MAYBE_UNMASK_IRQ(irq_num,icu) \
+ andl $~IRQ_BIT(irq_num),iactive ; \
+ testl $IRQ_BIT(irq_num),_imen ; \
+ je 3f ; \
+ UNMASK_IRQ(irq_num,icu) ; \
+3:
+
+#else /* APIC_IO */
+
+#define MASK_IRQ(irq_num,icu) \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
+
+#define UNMASK_IRQ(irq_num,icu) \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
+
+#define TEST_IRQ(irq_num,reg) \
+ testb $IRQ_BIT(irq_num),%reg
+
+#define SET_IPENDING(irq_num) \
+ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num)
+
#define ICU_EOI 0x20 /* XXX - define elsewhere */
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
@@ -50,6 +161,15 @@
OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
#endif
+#define MAYBE_MASK_IRQ(irq_num,icu) \
+ MASK_IRQ(irq_num,icu)
+
+#define MAYBE_UNMASK_IRQ(irq_num,icu) \
+ UNMASK_IRQ(irq_num,icu)
+
+#endif /* APIC_IO */
+
+
#ifdef FAST_INTR_HANDLER_USES_ES
#define ACTUALLY_PUSHED 1
#define MAYBE_MOVW_AX_ES movl %ax,%es
@@ -125,6 +245,7 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
+ GET_MPLOCK ; /* SMP Spin lock */ \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
@@ -138,6 +259,7 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
+ REL_MPLOCK ; /* SMP release global lock */ \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@@ -183,13 +305,11 @@ IDTVEC(vec_name) ; \
movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
movl %ax,%ds ; /* ... early for obsolete reasons */ \
movl %ax,%es ; \
- movb _imen + IRQ_BYTE(irq_num),%al ; \
- orb $IRQ_BIT(irq_num),%al ; \
- movb %al,_imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET ; \
+ GET_MPLOCK ; /* SMP Spin lock */ \
+ MAYBE_MASK_IRQ(irq_num,icu) ; \
enable_icus ; \
movl _cpl,%eax ; \
- testb $IRQ_BIT(irq_num),%reg ; \
+ TEST_IRQ(irq_num,reg) ; \
jne 2f ; \
incb _intr_nesting_level ; \
__CONCAT(Xresume,irq_num): ; \
@@ -205,10 +325,7 @@ __CONCAT(Xresume,irq_num): ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; /* must unmask _imen and icu atomically */ \
- movb _imen + IRQ_BYTE(irq_num),%al ; \
- andb $~IRQ_BIT(irq_num),%al ; \
- movb %al,_imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET ; \
+ MAYBE_UNMASK_IRQ(irq_num,icu) ; \
sti ; /* XXX _doreti repeats the cli/sti */ \
MEXITCOUNT ; \
/* We could usually avoid the following jmp by inlining some of */ \
@@ -218,13 +335,66 @@ __CONCAT(Xresume,irq_num): ; \
ALIGN_TEXT ; \
2: ; \
/* XXX skip mcounting here to avoid double count */ \
- orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \
+ SET_IPENDING(irq_num) ; \
+ REL_MPLOCK ; /* SMP release global lock */ \
popl %es ; \
popl %ds ; \
popal ; \
addl $4+4,%esp ; \
iret
+
+#if defined(APIC_IO) && defined(IPI_INTS)
+/*
+ * A simple IPI_INTR() macro based on a heavily cut down FAST_INTR().
+ * call it's handler, EOI and return.
+ */
+#define IPI_INTR(irq_num, vec_name) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ pushl %eax ; /* save only call-used registers */ \
+ pushl %ecx ; \
+ pushl %edx ; \
+ pushl %ds ; \
+ MAYBE_PUSHL_ES ; \
+ movl $KDSEL,%eax ; \
+ movl %ax,%ds ; \
+ MAYBE_MOVW_AX_ES ; \
+ pushl _intr_unit + (irq_num) * 4 ; \
+ call *_intr_handler + (irq_num) * 4 ; \
+ ENABLE_APIC ; \
+ addl $4,%esp ; \
+ incl _cnt+V_INTR ; /* book-keeping can wait */ \
+ movl _intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ MAYBE_POPL_ES ; \
+ popl %ds ; \
+ popl %edx ; \
+ popl %ecx ; \
+ popl %eax ; \
+ iret
+#endif /* APIC_IO && IPI_INTS */
+
+#if defined(XFAST_IPI32)
+ .text
+ SUPERALIGN_TEXT
+ .globl _Xfastipi32
+_Xfastipi32:
+ pushl %eax
+ movl %cr3, %eax
+ movl %eax, %cr3
+ pushl %ds
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ incl _ipihits
+ movl _apic_base, %eax
+ movl $0, APIC_EOI(%eax)
+ popl %ds
+ popl %eax
+ iret
+#endif /* XFAST_IPI32 */
+
MCOUNT_LABEL(bintr)
FAST_INTR(0,fastintr0, ENABLE_ICU1)
FAST_INTR(1,fastintr1, ENABLE_ICU1)
@@ -242,6 +412,16 @@ MCOUNT_LABEL(bintr)
FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+#if defined(APIC_IO)
+ FAST_INTR(16,fastintr16, ENABLE_ICU1_AND_2)
+ FAST_INTR(17,fastintr17, ENABLE_ICU1_AND_2)
+ FAST_INTR(18,fastintr18, ENABLE_ICU1_AND_2)
+ FAST_INTR(19,fastintr19, ENABLE_ICU1_AND_2)
+ FAST_INTR(20,fastintr20, ENABLE_ICU1_AND_2)
+ FAST_INTR(21,fastintr21, ENABLE_ICU1_AND_2)
+ FAST_INTR(22,fastintr22, ENABLE_ICU1_AND_2)
+ FAST_INTR(23,fastintr23, ENABLE_ICU1_AND_2)
+#endif /* APIC_IO */
INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al)
INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al)
INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al)
@@ -258,6 +438,22 @@ MCOUNT_LABEL(bintr)
INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah)
INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah)
INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+#if defined(APIC_IO)
+ INTR(16,intr16, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+ INTR(17,intr17, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+ INTR(18,intr18, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+ INTR(19,intr19, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+ INTR(20,intr20, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+ INTR(21,intr21, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+ INTR(22,intr22, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+ INTR(23,intr23, IO_ICU2, ENABLE_ICU1_AND_2, ah)
+#if defined(IPI_INTS)
+ IPI_INTR(24, ipi24)
+ IPI_INTR(25, ipi25)
+ IPI_INTR(26, ipi26)
+ IPI_INTR(27, ipi27)
+#endif /* IPI_INTS */
+#endif /* APIC_IO */
MCOUNT_LABEL(eintr)
.data
@@ -267,17 +463,74 @@ ihandlers: /* addresses of interrupt handlers */
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
+#if defined(APIC_IO)
+ .long Xresume16, Xresume17, Xresume18, Xresume19
+ .long Xresume20, Xresume21, Xresume22, Xresume23
+ .long 0, 0, 0, 0, swi_tty, swi_net, _softclock, swi_ast
+#else
.long swi_tty, swi_net, 0, 0, 0, 0, 0, 0
.long 0, 0, 0, 0, 0, 0, _softclock, swi_ast
+#endif /* APIC_IO */
+
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
+
+#if defined(APIC_IO)
+#if defined(IPI_INTS)
+ /* these 4 IPI slots are counted as HARDWARE INTs, ie NHWI, above */
+#else
+ .long 0, 0, 0, 0 /* padding */
+#endif /* IPI_INTS */
+ .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK
+#else
.long SWI_TTY_MASK, SWI_NET_MASK, 0, 0, 0, 0, 0, 0
.long 0, 0, 0, 0, 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+#endif /* APIC_IO */
+
.globl _intr_nesting_level
_intr_nesting_level:
.byte 0
.space 3
+#if defined(APIC_IO)
+
+ .globl _ivectors
+_ivectors:
+ .long _Xintr0, _Xintr1, _Xintr2, _Xintr3
+ .long _Xintr4, _Xintr5, _Xintr6, _Xintr7
+ .long _Xintr8, _Xintr9, _Xintr10, _Xintr11
+ .long _Xintr12, _Xintr13, _Xintr14, _Xintr15
+ .long _Xintr16, _Xintr17, _Xintr18, _Xintr19
+ .long _Xintr20, _Xintr21, _Xintr22, _Xintr23
+#if defined(IPI_INTS)
+ .long _Xipi24, _Xipi25, _Xipi26, _Xipi27
+#endif /* IPI_INTS */
+
+/* active flag for lazy masking */
+iactive:
+ .long 0
+
+#if defined(XFAST_IPI32)
+ .globl _ipihits
+_ipihits:
+ .long 0
+#endif /* XFAST_IPI32 */
+
+#if defined(TEST_CPUHITS)
+ .globl _cpuhits
+_cpuhits:
+#if !defined(NCPU)
+/**
+ * FIXME: need a way to pass NCPU to .s files.
+ * NCPU currently defined in smp.h IF NOT defined in opt_smp.h.
+ */
+#define NCPU 4
+#endif /* NCPU */
+ .space NCPU*4
+#endif /* TEST_CPUHITS */
+
+#endif /* APIC_IO */
+
/*
* Interrupt counters and names. The format of these and the label names
* must agree with what vmstat expects. The tables are indexed by device
@@ -309,6 +562,22 @@ _intrnames:
.asciz "stray irq13"
.asciz "stray irq14"
.asciz "stray irq15"
+#if defined(APIC_IO)
+ .asciz "stray irq16"
+ .asciz "stray irq17"
+ .asciz "stray irq18"
+ .asciz "stray irq19"
+ .asciz "stray irq20"
+ .asciz "stray irq21"
+ .asciz "stray irq22"
+ .asciz "stray irq23"
+#if defined(IPI_INTS)
+ .asciz "stray irq24"
+ .asciz "stray irq25"
+ .asciz "stray irq26"
+ .asciz "stray irq27"
+#endif /* IPI_INTS */
+#endif /* APIC_IO */
_eintrnames:
.text
OpenPOWER on IntegriCloud