summaryrefslogtreecommitdiffstats
path: root/sys/i386/isa/vector.s
diff options
context:
space:
mode:
authorfsmp <fsmp@FreeBSD.org>1997-05-26 17:58:27 +0000
committerfsmp <fsmp@FreeBSD.org>1997-05-26 17:58:27 +0000
commitbe6c5ef7bc636a1678c8fd766997c08bbe726a9c (patch)
tree463eaef10905a846f22931c31aaae5adb52f7ede /sys/i386/isa/vector.s
parent7571227e557be0d10a456413c0ba31a7d2acc414 (diff)
downloadFreeBSD-src-be6c5ef7bc636a1678c8fd766997c08bbe726a9c.zip
FreeBSD-src-be6c5ef7bc636a1678c8fd766997c08bbe726a9c.tar.gz
Split vector.s into UP and SMP specific files:
- vector.s <- stub called by i386/exception.s - icu_vector.s <- UP - apic_vector.s <- SMP Split icu.s into UP and SMP specific files: - ipl.s <- stub called by i386/exception.s (formerly icu.s) - icu_ipl.s <- UP - apic_ipl.s <- SMP This was done in preparation for massive changes to the SMP INTerrupt mechanisms. More fine tuning, such as merging ipl.s into exception.s, may be appropriate.
Diffstat (limited to 'sys/i386/isa/vector.s')
-rw-r--r--sys/i386/isa/vector.s430
1 files changed, 15 insertions, 415 deletions
diff --git a/sys/i386/isa/vector.s b/sys/i386/isa/vector.s
index e20d9f5..1fc5668 100644
--- a/sys/i386/isa/vector.s
+++ b/sys/i386/isa/vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: vector.s,v 1.28 1997/04/28 01:47:55 fsmp Exp $
+ * $Id: vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $
*/
/*
@@ -8,11 +8,6 @@
*/
#include "opt_auto_eoi.h"
-#include "opt_smp.h"
-
-#if defined(SMP)
-#include <machine/smpasm.h> /* this includes <machine/apic.h> */
-#endif /* SMP */
#include <i386/isa/icu.h>
#ifdef PC98
@@ -21,144 +16,6 @@
#include <i386/isa/isa.h>
#endif
-#ifdef PC98
-#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
-#else
-#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
-#endif
-
-
-#if defined(SMP)
-
-#define GET_MPLOCK call _get_mplock
-#define REL_MPLOCK call _rel_mplock
-
-#else
-
-#define GET_MPLOCK /* NOP get Kernel Mutex */
-#define REL_MPLOCK /* NOP release mutex */
-
-#endif /* SMP */
-
-
-#if defined(APIC_IO)
-
-#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
-#define IRQ_BIT(irq_num) (1 << (irq_num))
-
-#define ENABLE_APIC \
- movl _apic_base, %eax ; \
- movl $0, APIC_EOI(%eax)
-
-#define ENABLE_ICU1 ENABLE_APIC
-#define ENABLE_ICU1_AND_2 ENABLE_APIC
-
-#define MASK_IRQ(irq_num,icu) \
- orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \
- movl _io_apic_base,%ecx ; /* io apic addr */ \
- movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \
- movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
- orl $IOART_INTMASK,%eax ; /* set the mask */ \
- movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */
-
-#define UNMASK_IRQ(irq_num,icu) \
- andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \
- movl _io_apic_base,%ecx ; /* io apic addr */ \
- movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \
- movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
- andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
- movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */
-
-#define TEST_IRQ(irq_num,reg) \
- testl $IRQ_BIT(irq_num),%eax
-
-#define SET_IPENDING(irq_num) \
- orl $IRQ_BIT(irq_num),_ipending
-
-/*
- * 'lazy masking' code submitted by: Bruce Evans <bde@zeta.org.au>
- */
-#define MAYBE_MASK_IRQ(irq_num,icu) \
- testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \
- je 1f ; /* NOT currently active */ \
- MASK_IRQ(irq_num,icu) ; \
- ENABLE_APIC ; \
- SET_IPENDING(irq_num) ; \
- REL_MPLOCK ; /* SMP release global lock */ \
- popl %es ; \
- popl %ds ; \
- popal ; \
- addl $4+4,%esp ; \
- iret ; \
-; \
- ALIGN_TEXT ; \
-1: ; \
- orl $IRQ_BIT(irq_num),iactive
-
-#define MAYBE_UNMASK_IRQ(irq_num,icu) \
- andl $~IRQ_BIT(irq_num),iactive ; \
- testl $IRQ_BIT(irq_num),_imen ; \
- je 3f ; \
- UNMASK_IRQ(irq_num,icu) ; \
-3:
-
-#else /* APIC_IO */
-
-#define MASK_IRQ(irq_num,icu) \
- movb _imen + IRQ_BYTE(irq_num),%al ; \
- orb $IRQ_BIT(irq_num),%al ; \
- movb %al,_imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET
-
-#define UNMASK_IRQ(irq_num,icu) \
- movb _imen + IRQ_BYTE(irq_num),%al ; \
- andb $~IRQ_BIT(irq_num),%al ; \
- movb %al,_imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET
-
-#define TEST_IRQ(irq_num,reg) \
- testb $IRQ_BIT(irq_num),%reg
-
-#define SET_IPENDING(irq_num) \
- orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num)
-
-#define ICU_EOI 0x20 /* XXX - define elsewhere */
-
-#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
-#define IRQ_BYTE(irq_num) ((irq_num) / 8)
-
-#ifdef AUTO_EOI_1
-#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
-#define OUTB_ICU1
-#else
-#define ENABLE_ICU1 \
- movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
- OUTB_ICU1 /* ... to clear in service bit */
-#define OUTB_ICU1 \
- outb %al,$IO_ICU1
-#endif
-
-#ifdef AUTO_EOI_2
-/*
- * The data sheet says no auto-EOI on slave, but it sometimes works.
- */
-#define ENABLE_ICU1_AND_2 ENABLE_ICU1
-#else
-#define ENABLE_ICU1_AND_2 \
- movb $ICU_EOI,%al ; /* as above */ \
- outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
- OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
-#endif
-
-#define MAYBE_MASK_IRQ(irq_num,icu) \
- MASK_IRQ(irq_num,icu)
-
-#define MAYBE_UNMASK_IRQ(irq_num,icu) \
- UNMASK_IRQ(irq_num,icu)
-
-#endif /* APIC_IO */
-
-
#ifdef FAST_INTR_HANDLER_USES_ES
#define ACTUALLY_PUSHED 1
#define MAYBE_MOVW_AX_ES movl %ax,%es
@@ -176,6 +33,16 @@
#define MAYBE_PUSHL_ES
#endif
+ .data
+ ALIGN_DATA
+
+ .globl _intr_nesting_level
+_intr_nesting_level:
+ .byte 0
+ .space 3
+
+ .text
+
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*
@@ -221,275 +88,8 @@
* loading segregs.
*/
-#define FAST_INTR(irq_num, vec_name, enable_icus) \
- .text ; \
- SUPERALIGN_TEXT ; \
-IDTVEC(vec_name) ; \
- pushl %eax ; /* save only call-used registers */ \
- pushl %ecx ; \
- pushl %edx ; \
- pushl %ds ; \
- MAYBE_PUSHL_ES ; \
- movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
- MAYBE_MOVW_AX_ES ; \
- FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
- GET_MPLOCK ; /* SMP Spin lock */ \
- pushl _intr_unit + (irq_num) * 4 ; \
- call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
- enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
- addl $4,%esp ; \
- incl _cnt+V_INTR ; /* book-keeping can wait */ \
- movl _intr_countp + (irq_num) * 4,%eax ; \
- incl (%eax) ; \
- movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \
- notl %eax ; \
- andl _ipending,%eax ; \
- jne 2f ; /* yes, maybe handle them */ \
-1: ; \
- MEXITCOUNT ; \
- REL_MPLOCK ; /* SMP release global lock */ \
- MAYBE_POPL_ES ; \
- popl %ds ; \
- popl %edx ; \
- popl %ecx ; \
- popl %eax ; \
- iret ; \
-; \
- ALIGN_TEXT ; \
-2: ; \
- cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \
- jae 1b ; /* no, return */ \
- movl _cpl,%eax ; \
- /* XXX next line is probably unnecessary now. */ \
- movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \
- incb _intr_nesting_level ; /* ... really limit it ... */ \
- sti ; /* ... to do this as early as possible */ \
- MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
- popl %ecx ; /* ... original %ds ... */ \
- popl %edx ; \
- xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \
- pushal ; /* build fat frame (grrr) ... */ \
- pushl %ecx ; /* ... actually %ds ... */ \
- pushl %es ; \
- movl $KDSEL,%eax ; \
- movl %ax,%es ; \
- movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
- movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \
- movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
- pushl %eax ; \
- subl $4,%esp ; /* junk for unit number */ \
- MEXITCOUNT ; \
- jmp _doreti
-
-#define INTR(irq_num, vec_name, icu, enable_icus, reg) \
- .text ; \
- SUPERALIGN_TEXT ; \
-IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; /* save our data and extra segments ... */ \
- pushl %es ; \
- movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
- movl %ax,%ds ; /* ... early for obsolete reasons */ \
- movl %ax,%es ; \
- GET_MPLOCK ; /* SMP Spin lock */ \
- MAYBE_MASK_IRQ(irq_num,icu) ; \
- enable_icus ; \
- movl _cpl,%eax ; \
- TEST_IRQ(irq_num,reg) ; \
- jne 2f ; \
- incb _intr_nesting_level ; \
-__CONCAT(Xresume,irq_num): ; \
- FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \
- incl _cnt+V_INTR ; /* tally interrupts */ \
- movl _intr_countp + (irq_num) * 4,%eax ; \
- incl (%eax) ; \
- movl _cpl,%eax ; \
- pushl %eax ; \
- pushl _intr_unit + (irq_num) * 4 ; \
- orl _intr_mask + (irq_num) * 4,%eax ; \
- movl %eax,_cpl ; \
- sti ; \
- call *_intr_handler + (irq_num) * 4 ; \
- cli ; /* must unmask _imen and icu atomically */ \
- MAYBE_UNMASK_IRQ(irq_num,icu) ; \
- sti ; /* XXX _doreti repeats the cli/sti */ \
- MEXITCOUNT ; \
- /* We could usually avoid the following jmp by inlining some of */ \
- /* _doreti, but it's probably better to use less cache. */ \
- jmp _doreti ; \
-; \
- ALIGN_TEXT ; \
-2: ; \
- /* XXX skip mcounting here to avoid double count */ \
- SET_IPENDING(irq_num) ; \
- REL_MPLOCK ; /* SMP release global lock */ \
- popl %es ; \
- popl %ds ; \
- popal ; \
- addl $4+4,%esp ; \
- iret
-
-#if defined(APIC_IO)
- .text
- SUPERALIGN_TEXT
- .globl _Xinvltlb
-_Xinvltlb:
- pushl %eax
- movl %cr3, %eax
- movl %eax, %cr3
- ss
- movl _apic_base, %eax
- ss
- movl $0, APIC_EOI(%eax)
- popl %eax
- iret
-#endif /* APIC_IO */
-
-MCOUNT_LABEL(bintr)
- FAST_INTR(0,fastintr0, ENABLE_ICU1)
- FAST_INTR(1,fastintr1, ENABLE_ICU1)
- FAST_INTR(2,fastintr2, ENABLE_ICU1)
- FAST_INTR(3,fastintr3, ENABLE_ICU1)
- FAST_INTR(4,fastintr4, ENABLE_ICU1)
- FAST_INTR(5,fastintr5, ENABLE_ICU1)
- FAST_INTR(6,fastintr6, ENABLE_ICU1)
- FAST_INTR(7,fastintr7, ENABLE_ICU1)
- FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
- FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
- FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
- FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
- FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
- FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
- FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
- FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
-#if defined(APIC_IO)
- FAST_INTR(16,fastintr16, ENABLE_ICU1_AND_2)
- FAST_INTR(17,fastintr17, ENABLE_ICU1_AND_2)
- FAST_INTR(18,fastintr18, ENABLE_ICU1_AND_2)
- FAST_INTR(19,fastintr19, ENABLE_ICU1_AND_2)
- FAST_INTR(20,fastintr20, ENABLE_ICU1_AND_2)
- FAST_INTR(21,fastintr21, ENABLE_ICU1_AND_2)
- FAST_INTR(22,fastintr22, ENABLE_ICU1_AND_2)
- FAST_INTR(23,fastintr23, ENABLE_ICU1_AND_2)
-#endif /* APIC_IO */
- INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al)
- INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al)
- INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al)
- INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al)
- INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al)
- INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al)
- INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al)
- INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al)
- INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah)
-#if defined(APIC_IO)
- INTR(16,intr16, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(17,intr17, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(18,intr18, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(19,intr19, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(20,intr20, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(21,intr21, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(22,intr22, IO_ICU2, ENABLE_ICU1_AND_2, ah)
- INTR(23,intr23, IO_ICU2, ENABLE_ICU1_AND_2, ah)
-#endif /* APIC_IO */
-MCOUNT_LABEL(eintr)
-
- .data
-ihandlers: /* addresses of interrupt handlers */
- /* actually resumption addresses for HWI's */
- .long Xresume0, Xresume1, Xresume2, Xresume3
- .long Xresume4, Xresume5, Xresume6, Xresume7
- .long Xresume8, Xresume9, Xresume10, Xresume11
- .long Xresume12, Xresume13, Xresume14, Xresume15
-#if defined(APIC_IO)
- .long Xresume16, Xresume17, Xresume18, Xresume19
- .long Xresume20, Xresume21, Xresume22, Xresume23
+#ifdef APIC_IO
+#include "i386/isa/apic_vector.s"
#else
- .long 0, 0, 0, 0, 0, 0, 0, 0
-#endif /* APIC_IO */
- .long 0, 0, 0, 0, swi_tty, swi_net, _softclock, swi_ast
-
-imasks: /* masks for interrupt handlers */
- .space NHWI*4 /* padding; HWI masks are elsewhere */
-
-#if !defined(APIC_IO) /* Less padding for APIC_IO, NHWI is higher */
- .long 0, 0, 0, 0, 0, 0, 0, 0
-#endif /* APIC_IO */
- .long 0, 0, 0, 0
- .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK
-
- .globl _intr_nesting_level
-_intr_nesting_level:
- .byte 0
- .space 3
-
-#if defined(APIC_IO)
-
- .globl _ivectors
-_ivectors:
- .long _Xintr0, _Xintr1, _Xintr2, _Xintr3
- .long _Xintr4, _Xintr5, _Xintr6, _Xintr7
- .long _Xintr8, _Xintr9, _Xintr10, _Xintr11
- .long _Xintr12, _Xintr13, _Xintr14, _Xintr15
- .long _Xintr16, _Xintr17, _Xintr18, _Xintr19
- .long _Xintr20, _Xintr21, _Xintr22, _Xintr23
-
-/* active flag for lazy masking */
-iactive:
- .long 0
-
-#endif /* APIC_IO */
-
-/*
- * Interrupt counters and names. The format of these and the label names
- * must agree with what vmstat expects. The tables are indexed by device
- * ids so that we don't have to move the names around as devices are
- * attached.
- */
-#include "vector.h"
- .globl _intrcnt, _eintrcnt
-_intrcnt:
- .space (NR_DEVICES + ICU_LEN) * 4
-_eintrcnt:
-
- .globl _intrnames, _eintrnames
-_intrnames:
- .ascii DEVICE_NAMES
- .asciz "stray irq0"
- .asciz "stray irq1"
- .asciz "stray irq2"
- .asciz "stray irq3"
- .asciz "stray irq4"
- .asciz "stray irq5"
- .asciz "stray irq6"
- .asciz "stray irq7"
- .asciz "stray irq8"
- .asciz "stray irq9"
- .asciz "stray irq10"
- .asciz "stray irq11"
- .asciz "stray irq12"
- .asciz "stray irq13"
- .asciz "stray irq14"
- .asciz "stray irq15"
-#if defined(APIC_IO)
- .asciz "stray irq16"
- .asciz "stray irq17"
- .asciz "stray irq18"
- .asciz "stray irq19"
- .asciz "stray irq20"
- .asciz "stray irq21"
- .asciz "stray irq22"
- .asciz "stray irq23"
-#endif /* APIC_IO */
-_eintrnames:
-
- .text
+#include "i386/isa/icu_vector.s"
+#endif /* APIC_IO */
OpenPOWER on IntegriCloud