summaryrefslogtreecommitdiffstats
path: root/sys/amd64/isa
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2002-03-27 05:39:23 +0000
committerdillon <dillon@FreeBSD.org>2002-03-27 05:39:23 +0000
commitdc5aafeb94ddee4f835e390dffaecbb0eec5d5e2 (patch)
tree8233f61cf29e01829b91c6a5cf27defe60e6b8d8 /sys/amd64/isa
parent9b5143f94f573dc8954cb0913f3edb055e6caf0f (diff)
downloadFreeBSD-src-dc5aafeb94ddee4f835e390dffaecbb0eec5d5e2.zip
FreeBSD-src-dc5aafeb94ddee4f835e390dffaecbb0eec5d5e2.tar.gz
Compromise for critical*()/cpu_critical*() recommit. Cleanup the interrupt
disablement assumptions in kern_fork.c by adding another API call, cpu_critical_fork_exit(). Cleanup the td_savecrit field by moving it from MI to MD. Temporarily move cpu_critical*() from <arch>/include/cpufunc.h to <arch>/<arch>/critical.c (stage-2 will clean this up). Implement interrupt deferral for i386 that allows interrupts to remain enabled inside critical sections. This also fixes an IPI interlock bug, and requires uses of icu_lock to be enclosed in a true interrupt disablement. This is the stage-1 commit. Stage-2 will occur after stage-1 has stabilized, and will move cpu_critical*() into its own header file(s) + other things. This commit may break non-i386 architectures in trivial ways. This should be temporary. Reviewed by: core Approved by: core
Diffstat (limited to 'sys/amd64/isa')
-rw-r--r--sys/amd64/isa/atpic_vector.S221
-rw-r--r--sys/amd64/isa/clock.c12
-rw-r--r--sys/amd64/isa/icu_vector.S221
-rw-r--r--sys/amd64/isa/icu_vector.s221
-rw-r--r--sys/amd64/isa/intr_machdep.c46
-rw-r--r--sys/amd64/isa/intr_machdep.h14
-rw-r--r--sys/amd64/isa/nmi.c46
-rw-r--r--sys/amd64/isa/npx.c12
8 files changed, 619 insertions, 174 deletions
diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S
index 4e10cc2..01a804b 100644
--- a/sys/amd64/isa/atpic_vector.S
+++ b/sys/amd64/isa/atpic_vector.S
@@ -16,17 +16,23 @@
#define ICU_EOI 0x20 /* XXX - define elsewhere */
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_LBIT(irq_num) (1 << (irq_num))
#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
#ifdef AUTO_EOI_1
+
#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
#define OUTB_ICU1
+
#else
+
#define ENABLE_ICU1 \
movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
OUTB_ICU1 /* ... to clear in service bit */
+
#define OUTB_ICU1 \
outb %al,$IO_ICU1
+
#endif
#ifdef AUTO_EOI_2
@@ -34,48 +40,127 @@
* The data sheet says no auto-EOI on slave, but it sometimes works.
*/
#define ENABLE_ICU1_AND_2 ENABLE_ICU1
+
#else
+
#define ENABLE_ICU1_AND_2 \
movb $ICU_EOI,%al ; /* as above */ \
outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
+
#endif
+#define PUSH_FRAME \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ pushal ; /* 8 ints */ \
+ pushl %ds ; /* save data and extra segments ... */ \
+ pushl %es ; \
+ pushl %fs
+
+#define PUSH_DUMMY \
+ pushfl ; /* eflags */ \
+ pushl %cs ; /* cs */ \
+ pushl 12(%esp) ; /* original caller eip */ \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ subl $11*4,%esp
+
+#define POP_FRAME \
+ popl %fs ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp
+
+#define POP_DUMMY \
+ addl $16*4,%esp
+
+#define MASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
+
+#define UNMASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
-#define FAST_INTR(irq_num, vec_name, enable_icus) \
+#define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; \
- pushl %es ; \
- pushl %fs ; \
+ PUSH_FRAME ; \
mov $KDSEL,%ax ; \
mov %ax,%ds ; \
mov %ax,%es ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+; \
+ movl $1,PCPU(INT_PENDING) ; \
+ orl $IRQ_LBIT(irq_num),PCPU(FPENDING) ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_CRITNEST(%ebx) ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
- call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
- enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
+ call *intr_handler + (irq_num) * 4 ; \
addl $4,%esp ; \
+ enable_icus ; \
incl cnt+V_INTR ; /* book-keeping can wait */ \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
+ decl TD_CRITNEST(%ebx) ; \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 2f ; \
+; \
+ call unpend ; \
+2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- call critical_exit ; \
+10: ; \
MEXITCOUNT ; \
jmp doreti
+/*
+ * Restart a fast interrupt that was held up by a critical section.
+ * This routine is called from unpend(). unpend() ensures we are
+ * in a critical section and deals with the interrupt nesting level
+ * for us. If we previously masked the irq, we have to unmask it.
+ *
+ * We have a choice. We can regenerate the irq using the 'int'
+ * instruction or we can create a dummy frame and call the interrupt
+ * handler directly. I've chosen to use the dummy-frame method.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+; \
+ pushl %ebp ; \
+ movl %esp, %ebp ; \
+ PUSH_DUMMY ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
+ addl $4, %esp ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ UNMASK_IRQ(icu, irq_num) ; \
+ POP_DUMMY ; \
+ popl %ebp ; \
+ ret
+
/*
* Slow, threaded interrupts.
*
@@ -85,74 +170,96 @@ IDTVEC(vec_name) ; \
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
-#define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
+#define INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; /* save our data and extra segments ... */ \
- pushl %es ; \
- pushl %fs ; \
+ PUSH_FRAME ; \
mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
mov %ax,%ds ; \
mov %ax,%es ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
+; \
maybe_extra_ipending ; \
- movb imen + IRQ_BYTE(irq_num),%al ; \
- orb $IRQ_BIT(irq_num),%al ; \
- movb %al,imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET ; \
+ MASK_IRQ(icu, irq_num) ; \
enable_icus ; \
+; \
movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+ movl $1,PCPU(INT_PENDING); \
+ orl $IRQ_LBIT(irq_num),PCPU(IPENDING) ; \
+ jmp 10f ; \
+1: ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 9f ; \
+ call unpend ; \
+9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
addl $4, %esp ; /* discard the parameter */ \
+; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
MEXITCOUNT ; \
- /* We could usually avoid the following jmp by inlining some of */ \
- /* doreti, but it's probably better to use less cache. */ \
- jmp doreti /* and catch up inside doreti */
+ jmp doreti
MCOUNT_LABEL(bintr)
- FAST_INTR(0,fastintr0, ENABLE_ICU1)
- FAST_INTR(1,fastintr1, ENABLE_ICU1)
- FAST_INTR(2,fastintr2, ENABLE_ICU1)
- FAST_INTR(3,fastintr3, ENABLE_ICU1)
- FAST_INTR(4,fastintr4, ENABLE_ICU1)
- FAST_INTR(5,fastintr5, ENABLE_ICU1)
- FAST_INTR(6,fastintr6, ENABLE_ICU1)
- FAST_INTR(7,fastintr7, ENABLE_ICU1)
- FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
- FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
- FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
- FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
- FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
- FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
- FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
- FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+ FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
- INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
- INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
- INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
- INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
- INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
- INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
- INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
- INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
- INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+ INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING)
+ INTR(1,intr1, IO_ICU1, ENABLE_ICU1,)
+ INTR(2,intr2, IO_ICU1, ENABLE_ICU1,)
+ INTR(3,intr3, IO_ICU1, ENABLE_ICU1,)
+ INTR(4,intr4, IO_ICU1, ENABLE_ICU1,)
+ INTR(5,intr5, IO_ICU1, ENABLE_ICU1,)
+ INTR(6,intr6, IO_ICU1, ENABLE_ICU1,)
+ INTR(7,intr7, IO_ICU1, ENABLE_ICU1,)
+ INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,)
+ FAST_UNPEND(0,fastunpend0, IO_ICU1)
+ FAST_UNPEND(1,fastunpend1, IO_ICU1)
+ FAST_UNPEND(2,fastunpend2, IO_ICU1)
+ FAST_UNPEND(3,fastunpend3, IO_ICU1)
+ FAST_UNPEND(4,fastunpend4, IO_ICU1)
+ FAST_UNPEND(5,fastunpend5, IO_ICU1)
+ FAST_UNPEND(6,fastunpend6, IO_ICU1)
+ FAST_UNPEND(7,fastunpend7, IO_ICU1)
+ FAST_UNPEND(8,fastunpend8, IO_ICU2)
+ FAST_UNPEND(9,fastunpend9, IO_ICU2)
+ FAST_UNPEND(10,fastunpend10, IO_ICU2)
+ FAST_UNPEND(11,fastunpend11, IO_ICU2)
+ FAST_UNPEND(12,fastunpend12, IO_ICU2)
+ FAST_UNPEND(13,fastunpend13, IO_ICU2)
+ FAST_UNPEND(14,fastunpend14, IO_ICU2)
+ FAST_UNPEND(15,fastunpend15, IO_ICU2)
MCOUNT_LABEL(eintr)
+
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index 49516028..810fbe7 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -995,6 +995,7 @@ cpu_initclocks()
int apic_8254_trial;
void *clkdesc;
#endif /* APIC_IO */
+ register_t crit;
if (statclock_disable) {
/*
@@ -1029,9 +1030,11 @@ cpu_initclocks()
inthand_add("clk", apic_8254_intr, (driver_intr_t *)clkintr, NULL,
INTR_TYPE_CLK | INTR_FAST, &clkdesc);
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTREN(1 << apic_8254_intr);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
#else /* APIC_IO */
@@ -1042,9 +1045,11 @@ cpu_initclocks()
*/
inthand_add("clk", 0, (driver_intr_t *)clkintr, NULL,
INTR_TYPE_CLK | INTR_FAST, NULL);
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTREN(IRQ0);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
#endif /* APIC_IO */
@@ -1067,6 +1072,7 @@ cpu_initclocks()
inthand_add("rtc", 8, (driver_intr_t *)rtcintr, NULL,
INTR_TYPE_CLK | INTR_FAST, NULL);
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
#ifdef APIC_IO
INTREN(APIC_IRQ8);
@@ -1074,6 +1080,7 @@ cpu_initclocks()
INTREN(IRQ8);
#endif /* APIC_IO */
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
writertc(RTC_STATUSB, rtc_statusb);
@@ -1090,9 +1097,12 @@ cpu_initclocks()
* on the IO APIC.
* Workaround: Limited variant of mixed mode.
*/
+
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << apic_8254_intr);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
inthand_remove(clkdesc);
printf("APIC_IO: Broken MP table detected: "
"8254 is not connected to "
@@ -1115,9 +1125,11 @@ cpu_initclocks()
inthand_add("clk", apic_8254_intr,
(driver_intr_t *)clkintr, NULL,
INTR_TYPE_CLK | INTR_FAST, NULL);
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTREN(1 << apic_8254_intr);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
}
diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S
index 4e10cc2..01a804b 100644
--- a/sys/amd64/isa/icu_vector.S
+++ b/sys/amd64/isa/icu_vector.S
@@ -16,17 +16,23 @@
#define ICU_EOI 0x20 /* XXX - define elsewhere */
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_LBIT(irq_num) (1 << (irq_num))
#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
#ifdef AUTO_EOI_1
+
#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
#define OUTB_ICU1
+
#else
+
#define ENABLE_ICU1 \
movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
OUTB_ICU1 /* ... to clear in service bit */
+
#define OUTB_ICU1 \
outb %al,$IO_ICU1
+
#endif
#ifdef AUTO_EOI_2
@@ -34,48 +40,127 @@
* The data sheet says no auto-EOI on slave, but it sometimes works.
*/
#define ENABLE_ICU1_AND_2 ENABLE_ICU1
+
#else
+
#define ENABLE_ICU1_AND_2 \
movb $ICU_EOI,%al ; /* as above */ \
outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
+
#endif
+#define PUSH_FRAME \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ pushal ; /* 8 ints */ \
+ pushl %ds ; /* save data and extra segments ... */ \
+ pushl %es ; \
+ pushl %fs
+
+#define PUSH_DUMMY \
+ pushfl ; /* eflags */ \
+ pushl %cs ; /* cs */ \
+ pushl 12(%esp) ; /* original caller eip */ \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ subl $11*4,%esp
+
+#define POP_FRAME \
+ popl %fs ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp
+
+#define POP_DUMMY \
+ addl $16*4,%esp
+
+#define MASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
+
+#define UNMASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
-#define FAST_INTR(irq_num, vec_name, enable_icus) \
+#define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; \
- pushl %es ; \
- pushl %fs ; \
+ PUSH_FRAME ; \
mov $KDSEL,%ax ; \
mov %ax,%ds ; \
mov %ax,%es ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+; \
+ movl $1,PCPU(INT_PENDING) ; \
+ orl $IRQ_LBIT(irq_num),PCPU(FPENDING) ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_CRITNEST(%ebx) ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
- call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
- enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
+ call *intr_handler + (irq_num) * 4 ; \
addl $4,%esp ; \
+ enable_icus ; \
incl cnt+V_INTR ; /* book-keeping can wait */ \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
+ decl TD_CRITNEST(%ebx) ; \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 2f ; \
+; \
+ call unpend ; \
+2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- call critical_exit ; \
+10: ; \
MEXITCOUNT ; \
jmp doreti
+/*
+ * Restart a fast interrupt that was held up by a critical section.
+ * This routine is called from unpend(). unpend() ensures we are
+ * in a critical section and deals with the interrupt nesting level
+ * for us. If we previously masked the irq, we have to unmask it.
+ *
+ * We have a choice. We can regenerate the irq using the 'int'
+ * instruction or we can create a dummy frame and call the interrupt
+ * handler directly. I've chosen to use the dummy-frame method.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+; \
+ pushl %ebp ; \
+ movl %esp, %ebp ; \
+ PUSH_DUMMY ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
+ addl $4, %esp ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ UNMASK_IRQ(icu, irq_num) ; \
+ POP_DUMMY ; \
+ popl %ebp ; \
+ ret
+
/*
* Slow, threaded interrupts.
*
@@ -85,74 +170,96 @@ IDTVEC(vec_name) ; \
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
-#define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
+#define INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; /* save our data and extra segments ... */ \
- pushl %es ; \
- pushl %fs ; \
+ PUSH_FRAME ; \
mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
mov %ax,%ds ; \
mov %ax,%es ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
+; \
maybe_extra_ipending ; \
- movb imen + IRQ_BYTE(irq_num),%al ; \
- orb $IRQ_BIT(irq_num),%al ; \
- movb %al,imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET ; \
+ MASK_IRQ(icu, irq_num) ; \
enable_icus ; \
+; \
movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+ movl $1,PCPU(INT_PENDING); \
+ orl $IRQ_LBIT(irq_num),PCPU(IPENDING) ; \
+ jmp 10f ; \
+1: ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 9f ; \
+ call unpend ; \
+9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
addl $4, %esp ; /* discard the parameter */ \
+; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
MEXITCOUNT ; \
- /* We could usually avoid the following jmp by inlining some of */ \
- /* doreti, but it's probably better to use less cache. */ \
- jmp doreti /* and catch up inside doreti */
+ jmp doreti
MCOUNT_LABEL(bintr)
- FAST_INTR(0,fastintr0, ENABLE_ICU1)
- FAST_INTR(1,fastintr1, ENABLE_ICU1)
- FAST_INTR(2,fastintr2, ENABLE_ICU1)
- FAST_INTR(3,fastintr3, ENABLE_ICU1)
- FAST_INTR(4,fastintr4, ENABLE_ICU1)
- FAST_INTR(5,fastintr5, ENABLE_ICU1)
- FAST_INTR(6,fastintr6, ENABLE_ICU1)
- FAST_INTR(7,fastintr7, ENABLE_ICU1)
- FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
- FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
- FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
- FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
- FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
- FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
- FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
- FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+ FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
- INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
- INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
- INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
- INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
- INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
- INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
- INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
- INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
- INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+ INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING)
+ INTR(1,intr1, IO_ICU1, ENABLE_ICU1,)
+ INTR(2,intr2, IO_ICU1, ENABLE_ICU1,)
+ INTR(3,intr3, IO_ICU1, ENABLE_ICU1,)
+ INTR(4,intr4, IO_ICU1, ENABLE_ICU1,)
+ INTR(5,intr5, IO_ICU1, ENABLE_ICU1,)
+ INTR(6,intr6, IO_ICU1, ENABLE_ICU1,)
+ INTR(7,intr7, IO_ICU1, ENABLE_ICU1,)
+ INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,)
+ FAST_UNPEND(0,fastunpend0, IO_ICU1)
+ FAST_UNPEND(1,fastunpend1, IO_ICU1)
+ FAST_UNPEND(2,fastunpend2, IO_ICU1)
+ FAST_UNPEND(3,fastunpend3, IO_ICU1)
+ FAST_UNPEND(4,fastunpend4, IO_ICU1)
+ FAST_UNPEND(5,fastunpend5, IO_ICU1)
+ FAST_UNPEND(6,fastunpend6, IO_ICU1)
+ FAST_UNPEND(7,fastunpend7, IO_ICU1)
+ FAST_UNPEND(8,fastunpend8, IO_ICU2)
+ FAST_UNPEND(9,fastunpend9, IO_ICU2)
+ FAST_UNPEND(10,fastunpend10, IO_ICU2)
+ FAST_UNPEND(11,fastunpend11, IO_ICU2)
+ FAST_UNPEND(12,fastunpend12, IO_ICU2)
+ FAST_UNPEND(13,fastunpend13, IO_ICU2)
+ FAST_UNPEND(14,fastunpend14, IO_ICU2)
+ FAST_UNPEND(15,fastunpend15, IO_ICU2)
MCOUNT_LABEL(eintr)
+
diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s
index 4e10cc2..01a804b 100644
--- a/sys/amd64/isa/icu_vector.s
+++ b/sys/amd64/isa/icu_vector.s
@@ -16,17 +16,23 @@
#define ICU_EOI 0x20 /* XXX - define elsewhere */
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_LBIT(irq_num) (1 << (irq_num))
#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
#ifdef AUTO_EOI_1
+
#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
#define OUTB_ICU1
+
#else
+
#define ENABLE_ICU1 \
movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
OUTB_ICU1 /* ... to clear in service bit */
+
#define OUTB_ICU1 \
outb %al,$IO_ICU1
+
#endif
#ifdef AUTO_EOI_2
@@ -34,48 +40,127 @@
* The data sheet says no auto-EOI on slave, but it sometimes works.
*/
#define ENABLE_ICU1_AND_2 ENABLE_ICU1
+
#else
+
#define ENABLE_ICU1_AND_2 \
movb $ICU_EOI,%al ; /* as above */ \
outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
+
#endif
+#define PUSH_FRAME \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ pushal ; /* 8 ints */ \
+ pushl %ds ; /* save data and extra segments ... */ \
+ pushl %es ; \
+ pushl %fs
+
+#define PUSH_DUMMY \
+ pushfl ; /* eflags */ \
+ pushl %cs ; /* cs */ \
+ pushl 12(%esp) ; /* original caller eip */ \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ subl $11*4,%esp
+
+#define POP_FRAME \
+ popl %fs ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp
+
+#define POP_DUMMY \
+ addl $16*4,%esp
+
+#define MASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
+
+#define UNMASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
-#define FAST_INTR(irq_num, vec_name, enable_icus) \
+#define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; \
- pushl %es ; \
- pushl %fs ; \
+ PUSH_FRAME ; \
mov $KDSEL,%ax ; \
mov %ax,%ds ; \
mov %ax,%es ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+; \
+ movl $1,PCPU(INT_PENDING) ; \
+ orl $IRQ_LBIT(irq_num),PCPU(FPENDING) ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_CRITNEST(%ebx) ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
- call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
- enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
+ call *intr_handler + (irq_num) * 4 ; \
addl $4,%esp ; \
+ enable_icus ; \
incl cnt+V_INTR ; /* book-keeping can wait */ \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
+ decl TD_CRITNEST(%ebx) ; \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 2f ; \
+; \
+ call unpend ; \
+2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- call critical_exit ; \
+10: ; \
MEXITCOUNT ; \
jmp doreti
+/*
+ * Restart a fast interrupt that was held up by a critical section.
+ * This routine is called from unpend(). unpend() ensures we are
+ * in a critical section and deals with the interrupt nesting level
+ * for us. If we previously masked the irq, we have to unmask it.
+ *
+ * We have a choice. We can regenerate the irq using the 'int'
+ * instruction or we can create a dummy frame and call the interrupt
+ * handler directly. I've chosen to use the dummy-frame method.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+; \
+ pushl %ebp ; \
+ movl %esp, %ebp ; \
+ PUSH_DUMMY ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
+ addl $4, %esp ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ UNMASK_IRQ(icu, irq_num) ; \
+ POP_DUMMY ; \
+ popl %ebp ; \
+ ret
+
/*
* Slow, threaded interrupts.
*
@@ -85,74 +170,96 @@ IDTVEC(vec_name) ; \
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
-#define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
+#define INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; /* save our data and extra segments ... */ \
- pushl %es ; \
- pushl %fs ; \
+ PUSH_FRAME ; \
mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
mov %ax,%ds ; \
mov %ax,%es ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
+; \
maybe_extra_ipending ; \
- movb imen + IRQ_BYTE(irq_num),%al ; \
- orb $IRQ_BIT(irq_num),%al ; \
- movb %al,imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET ; \
+ MASK_IRQ(icu, irq_num) ; \
enable_icus ; \
+; \
movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+ movl $1,PCPU(INT_PENDING); \
+ orl $IRQ_LBIT(irq_num),PCPU(IPENDING) ; \
+ jmp 10f ; \
+1: ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 9f ; \
+ call unpend ; \
+9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
addl $4, %esp ; /* discard the parameter */ \
+; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
MEXITCOUNT ; \
- /* We could usually avoid the following jmp by inlining some of */ \
- /* doreti, but it's probably better to use less cache. */ \
- jmp doreti /* and catch up inside doreti */
+ jmp doreti
MCOUNT_LABEL(bintr)
- FAST_INTR(0,fastintr0, ENABLE_ICU1)
- FAST_INTR(1,fastintr1, ENABLE_ICU1)
- FAST_INTR(2,fastintr2, ENABLE_ICU1)
- FAST_INTR(3,fastintr3, ENABLE_ICU1)
- FAST_INTR(4,fastintr4, ENABLE_ICU1)
- FAST_INTR(5,fastintr5, ENABLE_ICU1)
- FAST_INTR(6,fastintr6, ENABLE_ICU1)
- FAST_INTR(7,fastintr7, ENABLE_ICU1)
- FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
- FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
- FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
- FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
- FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
- FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
- FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
- FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+ FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
- INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
- INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
- INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
- INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
- INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
- INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
- INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
- INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
- INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+ INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING)
+ INTR(1,intr1, IO_ICU1, ENABLE_ICU1,)
+ INTR(2,intr2, IO_ICU1, ENABLE_ICU1,)
+ INTR(3,intr3, IO_ICU1, ENABLE_ICU1,)
+ INTR(4,intr4, IO_ICU1, ENABLE_ICU1,)
+ INTR(5,intr5, IO_ICU1, ENABLE_ICU1,)
+ INTR(6,intr6, IO_ICU1, ENABLE_ICU1,)
+ INTR(7,intr7, IO_ICU1, ENABLE_ICU1,)
+ INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,)
+ FAST_UNPEND(0,fastunpend0, IO_ICU1)
+ FAST_UNPEND(1,fastunpend1, IO_ICU1)
+ FAST_UNPEND(2,fastunpend2, IO_ICU1)
+ FAST_UNPEND(3,fastunpend3, IO_ICU1)
+ FAST_UNPEND(4,fastunpend4, IO_ICU1)
+ FAST_UNPEND(5,fastunpend5, IO_ICU1)
+ FAST_UNPEND(6,fastunpend6, IO_ICU1)
+ FAST_UNPEND(7,fastunpend7, IO_ICU1)
+ FAST_UNPEND(8,fastunpend8, IO_ICU2)
+ FAST_UNPEND(9,fastunpend9, IO_ICU2)
+ FAST_UNPEND(10,fastunpend10, IO_ICU2)
+ FAST_UNPEND(11,fastunpend11, IO_ICU2)
+ FAST_UNPEND(12,fastunpend12, IO_ICU2)
+ FAST_UNPEND(13,fastunpend13, IO_ICU2)
+ FAST_UNPEND(14,fastunpend14, IO_ICU2)
+ FAST_UNPEND(15,fastunpend15, IO_ICU2)
MCOUNT_LABEL(eintr)
+
diff --git a/sys/amd64/isa/intr_machdep.c b/sys/amd64/isa/intr_machdep.c
index cfc162b..59c739e 100644
--- a/sys/amd64/isa/intr_machdep.c
+++ b/sys/amd64/isa/intr_machdep.c
@@ -117,6 +117,27 @@ static inthand_t *fastintr[ICU_LEN] = {
#endif /* APIC_IO */
};
+static unpendhand_t *fastunpend[ICU_LEN] = {
+ &IDTVEC(fastunpend0), &IDTVEC(fastunpend1),
+ &IDTVEC(fastunpend2), &IDTVEC(fastunpend3),
+ &IDTVEC(fastunpend4), &IDTVEC(fastunpend5),
+ &IDTVEC(fastunpend6), &IDTVEC(fastunpend7),
+ &IDTVEC(fastunpend8), &IDTVEC(fastunpend9),
+ &IDTVEC(fastunpend10), &IDTVEC(fastunpend11),
+ &IDTVEC(fastunpend12), &IDTVEC(fastunpend13),
+ &IDTVEC(fastunpend14), &IDTVEC(fastunpend15),
+#if defined(APIC_IO)
+ &IDTVEC(fastunpend16), &IDTVEC(fastunpend17),
+ &IDTVEC(fastunpend18), &IDTVEC(fastunpend19),
+ &IDTVEC(fastunpend20), &IDTVEC(fastunpend21),
+ &IDTVEC(fastunpend22), &IDTVEC(fastunpend23),
+ &IDTVEC(fastunpend24), &IDTVEC(fastunpend25),
+ &IDTVEC(fastunpend26), &IDTVEC(fastunpend27),
+ &IDTVEC(fastunpend28), &IDTVEC(fastunpend29),
+ &IDTVEC(fastunpend30), &IDTVEC(fastunpend31),
+#endif /* APIC_IO */
+};
+
static inthand_t *slowintr[ICU_LEN] = {
&IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
&IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
@@ -291,13 +312,16 @@ isa_nmi(cd)
void icu_reinit()
{
int i;
+ register_t crit;
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
init_i8259();
for(i=0;i<ICU_LEN;i++)
if(intr_handler[i] != isa_strayintr)
INTREN(1<<i);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
/*
@@ -309,13 +333,16 @@ void
isa_defaultirq()
{
int i;
+ register_t crit;
/* icu vectors */
for (i = 0; i < ICU_LEN; i++)
icu_unset(i, (driver_intr_t *)NULL);
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
init_i8259();
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
@@ -476,6 +503,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
int vector;
u_int32_t value; /* the window register is 32 bits */
#endif /* FAST_HI */
+ register_t crit;
#if defined(APIC_IO)
if ((u_int)intr >= ICU_LEN) /* no 8259 SLAVE to ignore */
@@ -488,6 +516,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
return (EBUSY);
#endif
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
intr_handler[intr] = handler;
intr_unit[intr] = arg;
@@ -530,6 +559,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
#endif /* FAST_HI */
INTREN(1 << intr);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
return (0);
}
@@ -543,10 +573,12 @@ icu_unset(intr, handler)
int intr;
driver_intr_t *handler;
{
+ register_t crit;
if ((u_int)intr >= ICU_LEN || handler != intr_handler[intr])
return (EINVAL);
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << intr);
intr_countp[intr] = &intrcnt[1 + intr];
@@ -564,6 +596,7 @@ icu_unset(intr, handler)
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
return (0);
}
@@ -578,19 +611,25 @@ SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
static void
ithread_enable(int vector)
{
+ register_t crit;
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTREN(1 << vector);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
static void
ithread_disable(int vector)
{
+ register_t crit;
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << vector);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
int
@@ -672,3 +711,10 @@ inthand_remove(void *cookie)
return (ithread_remove_handler(cookie));
}
+
+void
+call_fast_unpend(int irq)
+{
+ fastunpend[irq]();
+}
+
diff --git a/sys/amd64/isa/intr_machdep.h b/sys/amd64/isa/intr_machdep.h
index d674630..8e1a828 100644
--- a/sys/amd64/isa/intr_machdep.h
+++ b/sys/amd64/isa/intr_machdep.h
@@ -140,6 +140,7 @@
* Type of the first (asm) part of an interrupt handler.
*/
typedef void inthand_t(u_int cs, u_int ef, u_int esp, u_int ss);
+typedef void unpendhand_t __P((void));
#define IDTVEC(name) __CONCAT(X,name)
@@ -163,6 +164,18 @@ inthand_t
IDTVEC(intr4), IDTVEC(intr5), IDTVEC(intr6), IDTVEC(intr7),
IDTVEC(intr8), IDTVEC(intr9), IDTVEC(intr10), IDTVEC(intr11),
IDTVEC(intr12), IDTVEC(intr13), IDTVEC(intr14), IDTVEC(intr15);
+unpendhand_t
+ IDTVEC(fastunpend0), IDTVEC(fastunpend1), IDTVEC(fastunpend2),
+ IDTVEC(fastunpend3), IDTVEC(fastunpend4), IDTVEC(fastunpend5),
+ IDTVEC(fastunpend6), IDTVEC(fastunpend7), IDTVEC(fastunpend8),
+ IDTVEC(fastunpend9), IDTVEC(fastunpend10), IDTVEC(fastunpend11),
+ IDTVEC(fastunpend12), IDTVEC(fastunpend13), IDTVEC(fastunpend14),
+ IDTVEC(fastunpend15), IDTVEC(fastunpend16), IDTVEC(fastunpend17),
+ IDTVEC(fastunpend18), IDTVEC(fastunpend19), IDTVEC(fastunpend20),
+ IDTVEC(fastunpend21), IDTVEC(fastunpend22), IDTVEC(fastunpend23),
+ IDTVEC(fastunpend24), IDTVEC(fastunpend25), IDTVEC(fastunpend26),
+ IDTVEC(fastunpend27), IDTVEC(fastunpend28), IDTVEC(fastunpend29),
+ IDTVEC(fastunpend30), IDTVEC(fastunpend31);
#if defined(SMP) || defined(APIC_IO)
inthand_t
@@ -227,6 +240,7 @@ int inthand_add(const char *name, int irq, driver_intr_t handler, void *arg,
enum intr_type flags, void **cookiep);
int inthand_remove(void *cookie);
void sched_ithd(void *dummy);
+void call_fast_unpend(int irq);
#endif /* LOCORE */
diff --git a/sys/amd64/isa/nmi.c b/sys/amd64/isa/nmi.c
index cfc162b..59c739e 100644
--- a/sys/amd64/isa/nmi.c
+++ b/sys/amd64/isa/nmi.c
@@ -117,6 +117,27 @@ static inthand_t *fastintr[ICU_LEN] = {
#endif /* APIC_IO */
};
+static unpendhand_t *fastunpend[ICU_LEN] = {
+ &IDTVEC(fastunpend0), &IDTVEC(fastunpend1),
+ &IDTVEC(fastunpend2), &IDTVEC(fastunpend3),
+ &IDTVEC(fastunpend4), &IDTVEC(fastunpend5),
+ &IDTVEC(fastunpend6), &IDTVEC(fastunpend7),
+ &IDTVEC(fastunpend8), &IDTVEC(fastunpend9),
+ &IDTVEC(fastunpend10), &IDTVEC(fastunpend11),
+ &IDTVEC(fastunpend12), &IDTVEC(fastunpend13),
+ &IDTVEC(fastunpend14), &IDTVEC(fastunpend15),
+#if defined(APIC_IO)
+ &IDTVEC(fastunpend16), &IDTVEC(fastunpend17),
+ &IDTVEC(fastunpend18), &IDTVEC(fastunpend19),
+ &IDTVEC(fastunpend20), &IDTVEC(fastunpend21),
+ &IDTVEC(fastunpend22), &IDTVEC(fastunpend23),
+ &IDTVEC(fastunpend24), &IDTVEC(fastunpend25),
+ &IDTVEC(fastunpend26), &IDTVEC(fastunpend27),
+ &IDTVEC(fastunpend28), &IDTVEC(fastunpend29),
+ &IDTVEC(fastunpend30), &IDTVEC(fastunpend31),
+#endif /* APIC_IO */
+};
+
static inthand_t *slowintr[ICU_LEN] = {
&IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
&IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
@@ -291,13 +312,16 @@ isa_nmi(cd)
void icu_reinit()
{
int i;
+ register_t crit;
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
init_i8259();
for(i=0;i<ICU_LEN;i++)
if(intr_handler[i] != isa_strayintr)
INTREN(1<<i);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
/*
@@ -309,13 +333,16 @@ void
isa_defaultirq()
{
int i;
+ register_t crit;
/* icu vectors */
for (i = 0; i < ICU_LEN; i++)
icu_unset(i, (driver_intr_t *)NULL);
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
init_i8259();
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
@@ -476,6 +503,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
int vector;
u_int32_t value; /* the window register is 32 bits */
#endif /* FAST_HI */
+ register_t crit;
#if defined(APIC_IO)
if ((u_int)intr >= ICU_LEN) /* no 8259 SLAVE to ignore */
@@ -488,6 +516,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
return (EBUSY);
#endif
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
intr_handler[intr] = handler;
intr_unit[intr] = arg;
@@ -530,6 +559,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
#endif /* FAST_HI */
INTREN(1 << intr);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
return (0);
}
@@ -543,10 +573,12 @@ icu_unset(intr, handler)
int intr;
driver_intr_t *handler;
{
+ register_t crit;
if ((u_int)intr >= ICU_LEN || handler != intr_handler[intr])
return (EINVAL);
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << intr);
intr_countp[intr] = &intrcnt[1 + intr];
@@ -564,6 +596,7 @@ icu_unset(intr, handler)
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
return (0);
}
@@ -578,19 +611,25 @@ SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
static void
ithread_enable(int vector)
{
+ register_t crit;
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTREN(1 << vector);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
static void
ithread_disable(int vector)
{
+ register_t crit;
+ crit = intr_disable();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << vector);
mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
}
int
@@ -672,3 +711,10 @@ inthand_remove(void *cookie)
return (ithread_remove_handler(cookie));
}
+
+void
+call_fast_unpend(int irq)
+{
+ fastunpend[irq]();
+}
+
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
index 43b012c..22a0282 100644
--- a/sys/amd64/isa/npx.c
+++ b/sys/amd64/isa/npx.c
@@ -429,9 +429,15 @@ no_irq13:
* XXX hack around brokenness of bus_teardown_intr(). If we left the
* irq active then we would get it instead of exception 16.
*/
- mtx_lock_spin(&icu_lock);
- INTRDIS(1 << irq_num);
- mtx_unlock_spin(&icu_lock);
+ {
+ register_t crit;
+
+ crit = intr_disable();
+ mtx_lock_spin(&icu_lock);
+ INTRDIS(1 << irq_num);
+ mtx_unlock_spin(&icu_lock);
+ intr_restore(crit);
+ }
bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res);
bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res);
OpenPOWER on IntegriCloud