summaryrefslogtreecommitdiffstats
path: root/sys/amd64/isa
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2002-02-26 17:06:21 +0000
committerdillon <dillon@FreeBSD.org>2002-02-26 17:06:21 +0000
commit57b097e18ce888ddd81fe5d6bb4145bb08cbe650 (patch)
treea1eb3a1ea442b6b5ed8c84da1e6a766614f65cfc /sys/amd64/isa
parent1db171521516daa282d3ded476eaf5454f2b0a42 (diff)
downloadFreeBSD-src-57b097e18ce888ddd81fe5d6bb4145bb08cbe650.zip
FreeBSD-src-57b097e18ce888ddd81fe5d6bb4145bb08cbe650.tar.gz
STAGE-1 of 3 commit - allow (but do not require) interrupts to remain
enabled in critical sections and streamline critical_enter() and critical_exit(). This commit allows an architecture to leave interrupts enabled inside critical sections if it so wishes. Architectures that do not wish to do this are not effected by this change. This commit implements the feature for the I386 architecture and provides a sysctl, debug.critical_mode, which defaults to 1 (use the feature). For now you can turn the sysctl on and off at any time in order to test the architectural changes or track down bugs. This commit is just the first stage. Some areas of the code, specifically the MACHINE_CRITICAL_ENTER #ifdef'd code, is strictly temporary and will be cleaned up in the STAGE-2 commit when the critical_*() functions are moved entirely into MD files. The following changes have been made: * critical_enter() and critical_exit() for I386 now simply increment and decrement curthread->td_critnest. They no longer disable hard interrupts. When critical_exit() decrements the counter to 0 it effectively calls a routine to deal with whatever interrupts were deferred during the time the code was operating in a critical section. Other architectures are unaffected. * fork_exit() has been conditionalized to remove MD assumptions for the new code. Old code will still use the old MD assumptions in regards to hard interrupt disablement. In STAGE-2 this will be turned into a subroutine call into MD code rather then hardcoded in MI code. The new code places the burden of entering the critical section in the trampoline code where it belongs. * I386: interrupts are now enabled while we are in a critical section. The interrupt vector code has been adjusted to deal with the fact. If it detects that we are in a critical section it currently defers the interrupt by adding the appropriate bit to an interrupt mask. * In order to accomplish the deferral, icu_lock is required. This is i386-specific. Thus icu_lock can only be obtained by mainline i386 code while interrupts are hard disabled. This change has been made. * Because interrupts may or may not be hard disabled during a context switch, cpu_switch() can no longer simply assume that PSL_I will be in a consistent state. Therefore, it now saves and restores eflags. * FAST INTERRUPT PROVISION. Fast interrupts are currently deferred. The intention is to eventually allow them to operate either while we are in a critical section or, if we are able to restrict the use of sched_lock, while we are not holding the sched_lock. * ICU and APIC vector assembly for I386 cleaned up. The ICU code has been cleaned up to match the APIC code in regards to format and macro availability. Additionally, the code has been adjusted to deal with deferred interrupts. * Deferred interrupts use a per-cpu boolean int_pending, and masks ipending, spending, and fpending. Being per-cpu variables it is not currently necessary to lock; bus cycles modifying them. Note that the same mechanism will enable preemption to be incorporated as a true software interrupt without having to further hack up the critical nesting code. * Note: the old critical_enter() code in kern/kern_switch.c is currently #ifdef to be compatible with both the old and new methodology. In STAGE-2 it will be moved entirely to MD code. Performance issues: One of the purposes of this commit is to enhance critical section performance, specifically to greatly reduce bus overhead to allow the critical section code to be used to protect per-cpu caches. These caches, such as Jeff's slab allocator work, can potentially operate very quickly making the effective savings of the new critical section code's performance very significant. The second purpose of this commit is to allow architectures to enable certain interrupts while in a critical section. Specifically, the intention is to eventually allow certain FAST interrupts to operate rather then defer. The third purpose of this commit is to begin to clean up the critical_enter()/critical_exit()/cpu_critical_enter()/ cpu_critical_exit() API which currently has serious cross pollution in MI code (in fork_exit() and ast() for example). The fourth purpose of this commit is to provide a framework that allows kernel-preempting software interrupts to be implemented cleanly. This is currently used for two forward interrupts in I386. Other architectures will have the choice of using this infrastructure or building the functionality directly into critical_enter()/ critical_exit(). Finally, this commit is designed to greatly improve the flexibility of various architectures to manage critical section handling, software interrupts, preemption, and other highly integrated architecture-specific details.
Diffstat (limited to 'sys/amd64/isa')
-rw-r--r--sys/amd64/isa/atpic_vector.S300
-rw-r--r--sys/amd64/isa/clock.c13
-rw-r--r--sys/amd64/isa/icu_vector.S300
-rw-r--r--sys/amd64/isa/icu_vector.s300
-rw-r--r--sys/amd64/isa/intr_machdep.c46
-rw-r--r--sys/amd64/isa/intr_machdep.h14
-rw-r--r--sys/amd64/isa/nmi.c46
-rw-r--r--sys/amd64/isa/npx.c12
8 files changed, 734 insertions, 297 deletions
diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S
index 4e10cc2..3411c06 100644
--- a/sys/amd64/isa/atpic_vector.S
+++ b/sys/amd64/isa/atpic_vector.S
@@ -16,17 +16,23 @@
#define ICU_EOI 0x20 /* XXX - define elsewhere */
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_LBIT(irq_num) (1 << (irq_num))
#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
#ifdef AUTO_EOI_1
+
#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
#define OUTB_ICU1
+
#else
-#define ENABLE_ICU1 \
- movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
+
+#define ENABLE_ICU1 \
+ movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
OUTB_ICU1 /* ... to clear in service bit */
-#define OUTB_ICU1 \
+
+#define OUTB_ICU1 \
outb %al,$IO_ICU1
+
#endif
#ifdef AUTO_EOI_2
@@ -34,48 +40,124 @@
* The data sheet says no auto-EOI on slave, but it sometimes works.
*/
#define ENABLE_ICU1_AND_2 ENABLE_ICU1
+
#else
-#define ENABLE_ICU1_AND_2 \
- movb $ICU_EOI,%al ; /* as above */ \
- outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
+
+#define ENABLE_ICU1_AND_2 \
+ movb $ICU_EOI,%al ; /* as above */ \
+ outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
+
#endif
+#define PUSH_FRAME \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ pushal ; /* 8 ints */ \
+ pushl %ds ; /* save data and extra segments ... */ \
+ pushl %es ; \
+ pushl %fs
+
+#define PUSH_DUMMY \
+ pushfl ; /* eflags */ \
+ pushl %cs ; /* cs */ \
+ pushl $0 ; /* dummy eip */ \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ subl $11*4,%esp
+
+#define POP_FRAME \
+ popl %fs ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp
+
+#define POP_DUMMY \
+ addl $16*4,%esp
+
+#define MASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
+
+#define UNMASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
-#define FAST_INTR(irq_num, vec_name, enable_icus) \
- .text ; \
- SUPERALIGN_TEXT ; \
-IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; \
- pushl %es ; \
- pushl %fs ; \
- mov $KDSEL,%ax ; \
- mov %ax,%ds ; \
- mov %ax,%es ; \
- mov $KPSEL,%ax ; \
- mov %ax,%fs ; \
- FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- call critical_enter ; \
- movl PCPU(CURTHREAD),%ebx ; \
- incl TD_INTR_NESTING_LEVEL(%ebx) ; \
- pushl intr_unit + (irq_num) * 4 ; \
- call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
- enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
- addl $4,%esp ; \
- incl cnt+V_INTR ; /* book-keeping can wait */ \
- movl intr_countp + (irq_num) * 4,%eax ; \
- incl (%eax) ; \
- decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- call critical_exit ; \
- MEXITCOUNT ; \
+#define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%ds ; \
+ mov %ax,%es ; \
+ mov $KPSEL,%ax ; \
+ mov %ax,%fs ; \
+ FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
+ movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+; \
+ movl $1,PCPU(INT_PENDING) ; \
+ orl $IRQ_LBIT(irq_num),PCPU(FPENDING) ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_CRITNEST(%ebx) ; \
+ incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; \
+ addl $4,%esp ; \
+ enable_icus ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ decl TD_CRITNEST(%ebx) ; \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 2f ; \
+; \
+ call unpend ; \
+2: ; \
+ decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
+ MEXITCOUNT ; \
jmp doreti
+/*
+ * Restart a fast interrupt that was held up by a critical section.
+ * This routine is called from unpend(). unpend() ensures we are
+ * in a critical section and deals with the interrupt nesting level
+ * for us. If we previously masked the irq, we have to unmask it.
+ *
+ * We have a choice. We can regenerate the irq using the 'int'
+ * instruction or we can create a dummy frame and call the interrupt
+ * handler directly. I've chosen to use the dummy-frame method.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+; \
+ PUSH_DUMMY ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
+ addl $4, %esp ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ UNMASK_IRQ(icu, irq_num) ; \
+ POP_DUMMY ; \
+ ret
+
/*
* Slow, threaded interrupts.
*
@@ -85,74 +167,96 @@ IDTVEC(vec_name) ; \
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
-#define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
- .text ; \
- SUPERALIGN_TEXT ; \
-IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; /* save our data and extra segments ... */ \
- pushl %es ; \
- pushl %fs ; \
- mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
- mov %ax,%ds ; \
- mov %ax,%es ; \
- mov $KPSEL,%ax ; \
- mov %ax,%fs ; \
- maybe_extra_ipending ; \
- movb imen + IRQ_BYTE(irq_num),%al ; \
- orb $IRQ_BIT(irq_num),%al ; \
- movb %al,imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET ; \
- enable_icus ; \
- movl PCPU(CURTHREAD),%ebx ; \
- incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+#define INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
+ mov %ax,%ds ; \
+ mov %ax,%es ; \
+ mov $KPSEL,%ax ; \
+ mov %ax,%fs ; \
+; \
+ maybe_extra_ipending ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+; \
+ movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+ movl $1,PCPU(INT_PENDING); \
+ orl $IRQ_LBIT(irq_num),PCPU(IPENDING) ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
- pushl $irq_num; /* pass the IRQ */ \
- call sched_ithd ; \
- addl $4, %esp ; /* discard the parameter */ \
- decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- MEXITCOUNT ; \
- /* We could usually avoid the following jmp by inlining some of */ \
- /* doreti, but it's probably better to use less cache. */ \
- jmp doreti /* and catch up inside doreti */
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 9f ; \
+ call unpend ; \
+9: ; \
+ pushl $irq_num; /* pass the IRQ */ \
+ call sched_ithd ; \
+ addl $4, %esp ; /* discard the parameter */ \
+; \
+ decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
+ MEXITCOUNT ; \
+ jmp doreti
MCOUNT_LABEL(bintr)
- FAST_INTR(0,fastintr0, ENABLE_ICU1)
- FAST_INTR(1,fastintr1, ENABLE_ICU1)
- FAST_INTR(2,fastintr2, ENABLE_ICU1)
- FAST_INTR(3,fastintr3, ENABLE_ICU1)
- FAST_INTR(4,fastintr4, ENABLE_ICU1)
- FAST_INTR(5,fastintr5, ENABLE_ICU1)
- FAST_INTR(6,fastintr6, ENABLE_ICU1)
- FAST_INTR(7,fastintr7, ENABLE_ICU1)
- FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
- FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
- FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
- FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
- FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
- FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
- FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
- FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+ FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
- INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
- INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
- INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
- INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
- INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
- INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
- INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
- INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
- INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+ INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING)
+ INTR(1,intr1, IO_ICU1, ENABLE_ICU1,)
+ INTR(2,intr2, IO_ICU1, ENABLE_ICU1,)
+ INTR(3,intr3, IO_ICU1, ENABLE_ICU1,)
+ INTR(4,intr4, IO_ICU1, ENABLE_ICU1,)
+ INTR(5,intr5, IO_ICU1, ENABLE_ICU1,)
+ INTR(6,intr6, IO_ICU1, ENABLE_ICU1,)
+ INTR(7,intr7, IO_ICU1, ENABLE_ICU1,)
+ INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,)
+ FAST_UNPEND(0,fastunpend0, IO_ICU1)
+ FAST_UNPEND(1,fastunpend1, IO_ICU1)
+ FAST_UNPEND(2,fastunpend2, IO_ICU1)
+ FAST_UNPEND(3,fastunpend3, IO_ICU1)
+ FAST_UNPEND(4,fastunpend4, IO_ICU1)
+ FAST_UNPEND(5,fastunpend5, IO_ICU1)
+ FAST_UNPEND(6,fastunpend6, IO_ICU1)
+ FAST_UNPEND(7,fastunpend7, IO_ICU1)
+ FAST_UNPEND(8,fastunpend8, IO_ICU2)
+ FAST_UNPEND(9,fastunpend9, IO_ICU2)
+ FAST_UNPEND(10,fastunpend10, IO_ICU2)
+ FAST_UNPEND(11,fastunpend11, IO_ICU2)
+ FAST_UNPEND(12,fastunpend12, IO_ICU2)
+ FAST_UNPEND(13,fastunpend13, IO_ICU2)
+ FAST_UNPEND(14,fastunpend14, IO_ICU2)
+ FAST_UNPEND(15,fastunpend15, IO_ICU2)
MCOUNT_LABEL(eintr)
+
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index ae56051..ee776af 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -995,6 +995,7 @@ cpu_initclocks()
int apic_8254_trial;
void *clkdesc;
#endif /* APIC_IO */
+ critical_t crit;
if (statclock_disable) {
/*
@@ -1029,9 +1030,11 @@ cpu_initclocks()
inthand_add("clk", apic_8254_intr, (driver_intr_t *)clkintr, NULL,
INTR_TYPE_CLK | INTR_FAST, &clkdesc);
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTREN(1 << apic_8254_intr);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
#else /* APIC_IO */
@@ -1042,9 +1045,11 @@ cpu_initclocks()
*/
inthand_add("clk", 0, (driver_intr_t *)clkintr, NULL,
INTR_TYPE_CLK | INTR_FAST, NULL);
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTREN(IRQ0);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
#endif /* APIC_IO */
@@ -1067,6 +1072,7 @@ cpu_initclocks()
inthand_add("rtc", 8, (driver_intr_t *)rtcintr, NULL,
INTR_TYPE_CLK | INTR_FAST, NULL);
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
#ifdef APIC_IO
INTREN(APIC_IRQ8);
@@ -1074,6 +1080,7 @@ cpu_initclocks()
INTREN(IRQ8);
#endif /* APIC_IO */
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
writertc(RTC_STATUSB, rtc_statusb);
@@ -1090,9 +1097,13 @@ cpu_initclocks()
* on the IO APIC.
* Workaround: Limited variant of mixed mode.
*/
+ critical_t crit;
+
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << apic_8254_intr);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
inthand_remove(clkdesc);
printf("APIC_IO: Broken MP table detected: "
"8254 is not connected to "
@@ -1115,9 +1126,11 @@ cpu_initclocks()
inthand_add("clk", apic_8254_intr,
(driver_intr_t *)clkintr, NULL,
INTR_TYPE_CLK | INTR_FAST, NULL);
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTREN(1 << apic_8254_intr);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
}
diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S
index 4e10cc2..3411c06 100644
--- a/sys/amd64/isa/icu_vector.S
+++ b/sys/amd64/isa/icu_vector.S
@@ -16,17 +16,23 @@
#define ICU_EOI 0x20 /* XXX - define elsewhere */
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_LBIT(irq_num) (1 << (irq_num))
#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
#ifdef AUTO_EOI_1
+
#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
#define OUTB_ICU1
+
#else
-#define ENABLE_ICU1 \
- movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
+
+#define ENABLE_ICU1 \
+ movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
OUTB_ICU1 /* ... to clear in service bit */
-#define OUTB_ICU1 \
+
+#define OUTB_ICU1 \
outb %al,$IO_ICU1
+
#endif
#ifdef AUTO_EOI_2
@@ -34,48 +40,124 @@
* The data sheet says no auto-EOI on slave, but it sometimes works.
*/
#define ENABLE_ICU1_AND_2 ENABLE_ICU1
+
#else
-#define ENABLE_ICU1_AND_2 \
- movb $ICU_EOI,%al ; /* as above */ \
- outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
+
+#define ENABLE_ICU1_AND_2 \
+ movb $ICU_EOI,%al ; /* as above */ \
+ outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
+
#endif
+#define PUSH_FRAME \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ pushal ; /* 8 ints */ \
+ pushl %ds ; /* save data and extra segments ... */ \
+ pushl %es ; \
+ pushl %fs
+
+#define PUSH_DUMMY \
+ pushfl ; /* eflags */ \
+ pushl %cs ; /* cs */ \
+ pushl $0 ; /* dummy eip */ \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ subl $11*4,%esp
+
+#define POP_FRAME \
+ popl %fs ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp
+
+#define POP_DUMMY \
+ addl $16*4,%esp
+
+#define MASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
+
+#define UNMASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
-#define FAST_INTR(irq_num, vec_name, enable_icus) \
- .text ; \
- SUPERALIGN_TEXT ; \
-IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; \
- pushl %es ; \
- pushl %fs ; \
- mov $KDSEL,%ax ; \
- mov %ax,%ds ; \
- mov %ax,%es ; \
- mov $KPSEL,%ax ; \
- mov %ax,%fs ; \
- FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- call critical_enter ; \
- movl PCPU(CURTHREAD),%ebx ; \
- incl TD_INTR_NESTING_LEVEL(%ebx) ; \
- pushl intr_unit + (irq_num) * 4 ; \
- call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
- enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
- addl $4,%esp ; \
- incl cnt+V_INTR ; /* book-keeping can wait */ \
- movl intr_countp + (irq_num) * 4,%eax ; \
- incl (%eax) ; \
- decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- call critical_exit ; \
- MEXITCOUNT ; \
+#define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%ds ; \
+ mov %ax,%es ; \
+ mov $KPSEL,%ax ; \
+ mov %ax,%fs ; \
+ FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
+ movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+; \
+ movl $1,PCPU(INT_PENDING) ; \
+ orl $IRQ_LBIT(irq_num),PCPU(FPENDING) ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_CRITNEST(%ebx) ; \
+ incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; \
+ addl $4,%esp ; \
+ enable_icus ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ decl TD_CRITNEST(%ebx) ; \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 2f ; \
+; \
+ call unpend ; \
+2: ; \
+ decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
+ MEXITCOUNT ; \
jmp doreti
+/*
+ * Restart a fast interrupt that was held up by a critical section.
+ * This routine is called from unpend(). unpend() ensures we are
+ * in a critical section and deals with the interrupt nesting level
+ * for us. If we previously masked the irq, we have to unmask it.
+ *
+ * We have a choice. We can regenerate the irq using the 'int'
+ * instruction or we can create a dummy frame and call the interrupt
+ * handler directly. I've chosen to use the dummy-frame method.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+; \
+ PUSH_DUMMY ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
+ addl $4, %esp ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ UNMASK_IRQ(icu, irq_num) ; \
+ POP_DUMMY ; \
+ ret
+
/*
* Slow, threaded interrupts.
*
@@ -85,74 +167,96 @@ IDTVEC(vec_name) ; \
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
-#define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
- .text ; \
- SUPERALIGN_TEXT ; \
-IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; /* save our data and extra segments ... */ \
- pushl %es ; \
- pushl %fs ; \
- mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
- mov %ax,%ds ; \
- mov %ax,%es ; \
- mov $KPSEL,%ax ; \
- mov %ax,%fs ; \
- maybe_extra_ipending ; \
- movb imen + IRQ_BYTE(irq_num),%al ; \
- orb $IRQ_BIT(irq_num),%al ; \
- movb %al,imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET ; \
- enable_icus ; \
- movl PCPU(CURTHREAD),%ebx ; \
- incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+#define INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
+ mov %ax,%ds ; \
+ mov %ax,%es ; \
+ mov $KPSEL,%ax ; \
+ mov %ax,%fs ; \
+; \
+ maybe_extra_ipending ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+; \
+ movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+ movl $1,PCPU(INT_PENDING); \
+ orl $IRQ_LBIT(irq_num),PCPU(IPENDING) ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
- pushl $irq_num; /* pass the IRQ */ \
- call sched_ithd ; \
- addl $4, %esp ; /* discard the parameter */ \
- decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- MEXITCOUNT ; \
- /* We could usually avoid the following jmp by inlining some of */ \
- /* doreti, but it's probably better to use less cache. */ \
- jmp doreti /* and catch up inside doreti */
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 9f ; \
+ call unpend ; \
+9: ; \
+ pushl $irq_num; /* pass the IRQ */ \
+ call sched_ithd ; \
+ addl $4, %esp ; /* discard the parameter */ \
+; \
+ decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
+ MEXITCOUNT ; \
+ jmp doreti
MCOUNT_LABEL(bintr)
- FAST_INTR(0,fastintr0, ENABLE_ICU1)
- FAST_INTR(1,fastintr1, ENABLE_ICU1)
- FAST_INTR(2,fastintr2, ENABLE_ICU1)
- FAST_INTR(3,fastintr3, ENABLE_ICU1)
- FAST_INTR(4,fastintr4, ENABLE_ICU1)
- FAST_INTR(5,fastintr5, ENABLE_ICU1)
- FAST_INTR(6,fastintr6, ENABLE_ICU1)
- FAST_INTR(7,fastintr7, ENABLE_ICU1)
- FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
- FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
- FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
- FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
- FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
- FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
- FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
- FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+ FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
- INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
- INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
- INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
- INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
- INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
- INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
- INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
- INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
- INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+ INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING)
+ INTR(1,intr1, IO_ICU1, ENABLE_ICU1,)
+ INTR(2,intr2, IO_ICU1, ENABLE_ICU1,)
+ INTR(3,intr3, IO_ICU1, ENABLE_ICU1,)
+ INTR(4,intr4, IO_ICU1, ENABLE_ICU1,)
+ INTR(5,intr5, IO_ICU1, ENABLE_ICU1,)
+ INTR(6,intr6, IO_ICU1, ENABLE_ICU1,)
+ INTR(7,intr7, IO_ICU1, ENABLE_ICU1,)
+ INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,)
+ FAST_UNPEND(0,fastunpend0, IO_ICU1)
+ FAST_UNPEND(1,fastunpend1, IO_ICU1)
+ FAST_UNPEND(2,fastunpend2, IO_ICU1)
+ FAST_UNPEND(3,fastunpend3, IO_ICU1)
+ FAST_UNPEND(4,fastunpend4, IO_ICU1)
+ FAST_UNPEND(5,fastunpend5, IO_ICU1)
+ FAST_UNPEND(6,fastunpend6, IO_ICU1)
+ FAST_UNPEND(7,fastunpend7, IO_ICU1)
+ FAST_UNPEND(8,fastunpend8, IO_ICU2)
+ FAST_UNPEND(9,fastunpend9, IO_ICU2)
+ FAST_UNPEND(10,fastunpend10, IO_ICU2)
+ FAST_UNPEND(11,fastunpend11, IO_ICU2)
+ FAST_UNPEND(12,fastunpend12, IO_ICU2)
+ FAST_UNPEND(13,fastunpend13, IO_ICU2)
+ FAST_UNPEND(14,fastunpend14, IO_ICU2)
+ FAST_UNPEND(15,fastunpend15, IO_ICU2)
MCOUNT_LABEL(eintr)
+
diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s
index 4e10cc2..3411c06 100644
--- a/sys/amd64/isa/icu_vector.s
+++ b/sys/amd64/isa/icu_vector.s
@@ -16,17 +16,23 @@
#define ICU_EOI 0x20 /* XXX - define elsewhere */
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_LBIT(irq_num) (1 << (irq_num))
#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
#ifdef AUTO_EOI_1
+
#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
#define OUTB_ICU1
+
#else
-#define ENABLE_ICU1 \
- movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
+
+#define ENABLE_ICU1 \
+ movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
OUTB_ICU1 /* ... to clear in service bit */
-#define OUTB_ICU1 \
+
+#define OUTB_ICU1 \
outb %al,$IO_ICU1
+
#endif
#ifdef AUTO_EOI_2
@@ -34,48 +40,124 @@
* The data sheet says no auto-EOI on slave, but it sometimes works.
*/
#define ENABLE_ICU1_AND_2 ENABLE_ICU1
+
#else
-#define ENABLE_ICU1_AND_2 \
- movb $ICU_EOI,%al ; /* as above */ \
- outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
+
+#define ENABLE_ICU1_AND_2 \
+ movb $ICU_EOI,%al ; /* as above */ \
+ outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
+
#endif
+#define PUSH_FRAME \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ pushal ; /* 8 ints */ \
+ pushl %ds ; /* save data and extra segments ... */ \
+ pushl %es ; \
+ pushl %fs
+
+#define PUSH_DUMMY \
+ pushfl ; /* eflags */ \
+ pushl %cs ; /* cs */ \
+ pushl $0 ; /* dummy eip */ \
+ pushl $0 ; /* dummy error code */ \
+ pushl $0 ; /* dummy trap type */ \
+ subl $11*4,%esp
+
+#define POP_FRAME \
+ popl %fs ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp
+
+#define POP_DUMMY \
+ addl $16*4,%esp
+
+#define MASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
+
+#define UNMASK_IRQ(icu, irq_num) \
+ movb imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,imen + IRQ_BYTE(irq_num) ; \
+ outb %al,$icu+ICU_IMR_OFFSET
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
-#define FAST_INTR(irq_num, vec_name, enable_icus) \
- .text ; \
- SUPERALIGN_TEXT ; \
-IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; \
- pushl %es ; \
- pushl %fs ; \
- mov $KDSEL,%ax ; \
- mov %ax,%ds ; \
- mov %ax,%es ; \
- mov $KPSEL,%ax ; \
- mov %ax,%fs ; \
- FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- call critical_enter ; \
- movl PCPU(CURTHREAD),%ebx ; \
- incl TD_INTR_NESTING_LEVEL(%ebx) ; \
- pushl intr_unit + (irq_num) * 4 ; \
- call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
- enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
- addl $4,%esp ; \
- incl cnt+V_INTR ; /* book-keeping can wait */ \
- movl intr_countp + (irq_num) * 4,%eax ; \
- incl (%eax) ; \
- decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- call critical_exit ; \
- MEXITCOUNT ; \
+#define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%ds ; \
+ mov %ax,%es ; \
+ mov $KPSEL,%ax ; \
+ mov %ax,%fs ; \
+ FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
+ movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+; \
+ movl $1,PCPU(INT_PENDING) ; \
+ orl $IRQ_LBIT(irq_num),PCPU(FPENDING) ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_CRITNEST(%ebx) ; \
+ incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; \
+ addl $4,%esp ; \
+ enable_icus ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ decl TD_CRITNEST(%ebx) ; \
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 2f ; \
+; \
+ call unpend ; \
+2: ; \
+ decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
+ MEXITCOUNT ; \
jmp doreti
+/*
+ * Restart a fast interrupt that was held up by a critical section.
+ * This routine is called from unpend(). unpend() ensures we are
+ * in a critical section and deals with the interrupt nesting level
+ * for us. If we previously masked the irq, we have to unmask it.
+ *
+ * We have a choice. We can regenerate the irq using the 'int'
+ * instruction or we can create a dummy frame and call the interrupt
+ * handler directly. I've chosen to use the dummy-frame method.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+; \
+ PUSH_DUMMY ; \
+ pushl intr_unit + (irq_num) * 4 ; \
+ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
+ addl $4, %esp ; \
+ incl cnt+V_INTR ; /* book-keeping can wait */ \
+ movl intr_countp + (irq_num) * 4,%eax ; \
+ incl (%eax) ; \
+ UNMASK_IRQ(icu, irq_num) ; \
+ POP_DUMMY ; \
+ ret
+
/*
* Slow, threaded interrupts.
*
@@ -85,74 +167,96 @@ IDTVEC(vec_name) ; \
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
-#define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
- .text ; \
- SUPERALIGN_TEXT ; \
-IDTVEC(vec_name) ; \
- pushl $0 ; /* dummy error code */ \
- pushl $0 ; /* dummy trap type */ \
- pushal ; \
- pushl %ds ; /* save our data and extra segments ... */ \
- pushl %es ; \
- pushl %fs ; \
- mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
- mov %ax,%ds ; \
- mov %ax,%es ; \
- mov $KPSEL,%ax ; \
- mov %ax,%fs ; \
- maybe_extra_ipending ; \
- movb imen + IRQ_BYTE(irq_num),%al ; \
- orb $IRQ_BIT(irq_num),%al ; \
- movb %al,imen + IRQ_BYTE(irq_num) ; \
- outb %al,$icu+ICU_IMR_OFFSET ; \
- enable_icus ; \
- movl PCPU(CURTHREAD),%ebx ; \
- incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+#define INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
+ mov %ax,%ds ; \
+ mov %ax,%es ; \
+ mov $KPSEL,%ax ; \
+ mov %ax,%fs ; \
+; \
+ maybe_extra_ipending ; \
+ MASK_IRQ(icu, irq_num) ; \
+ enable_icus ; \
+; \
+ movl PCPU(CURTHREAD),%ebx ; \
+ cmpl $0,TD_CRITNEST(%ebx) ; \
+ je 1f ; \
+ movl $1,PCPU(INT_PENDING); \
+ orl $IRQ_LBIT(irq_num),PCPU(IPENDING) ; \
+ jmp 10f ; \
+1: ; \
+ incl TD_INTR_NESTING_LEVEL(%ebx) ; \
+; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
- pushl $irq_num; /* pass the IRQ */ \
- call sched_ithd ; \
- addl $4, %esp ; /* discard the parameter */ \
- decl TD_INTR_NESTING_LEVEL(%ebx) ; \
- MEXITCOUNT ; \
- /* We could usually avoid the following jmp by inlining some of */ \
- /* doreti, but it's probably better to use less cache. */ \
- jmp doreti /* and catch up inside doreti */
+ cmpl $0,PCPU(INT_PENDING) ; \
+ je 9f ; \
+ call unpend ; \
+9: ; \
+ pushl $irq_num; /* pass the IRQ */ \
+ call sched_ithd ; \
+ addl $4, %esp ; /* discard the parameter */ \
+; \
+ decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+10: ; \
+ MEXITCOUNT ; \
+ jmp doreti
MCOUNT_LABEL(bintr)
- FAST_INTR(0,fastintr0, ENABLE_ICU1)
- FAST_INTR(1,fastintr1, ENABLE_ICU1)
- FAST_INTR(2,fastintr2, ENABLE_ICU1)
- FAST_INTR(3,fastintr3, ENABLE_ICU1)
- FAST_INTR(4,fastintr4, ENABLE_ICU1)
- FAST_INTR(5,fastintr5, ENABLE_ICU1)
- FAST_INTR(6,fastintr6, ENABLE_ICU1)
- FAST_INTR(7,fastintr7, ENABLE_ICU1)
- FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
- FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
- FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
- FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
- FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
- FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
- FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
- FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+ FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+ FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+ FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
- INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
- INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
- INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
- INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
- INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
- INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
- INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
- INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
- INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
- INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+ INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING)
+ INTR(1,intr1, IO_ICU1, ENABLE_ICU1,)
+ INTR(2,intr2, IO_ICU1, ENABLE_ICU1,)
+ INTR(3,intr3, IO_ICU1, ENABLE_ICU1,)
+ INTR(4,intr4, IO_ICU1, ENABLE_ICU1,)
+ INTR(5,intr5, IO_ICU1, ENABLE_ICU1,)
+ INTR(6,intr6, IO_ICU1, ENABLE_ICU1,)
+ INTR(7,intr7, IO_ICU1, ENABLE_ICU1,)
+ INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,)
+ INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,)
+ FAST_UNPEND(0,fastunpend0, IO_ICU1)
+ FAST_UNPEND(1,fastunpend1, IO_ICU1)
+ FAST_UNPEND(2,fastunpend2, IO_ICU1)
+ FAST_UNPEND(3,fastunpend3, IO_ICU1)
+ FAST_UNPEND(4,fastunpend4, IO_ICU1)
+ FAST_UNPEND(5,fastunpend5, IO_ICU1)
+ FAST_UNPEND(6,fastunpend6, IO_ICU1)
+ FAST_UNPEND(7,fastunpend7, IO_ICU1)
+ FAST_UNPEND(8,fastunpend8, IO_ICU2)
+ FAST_UNPEND(9,fastunpend9, IO_ICU2)
+ FAST_UNPEND(10,fastunpend10, IO_ICU2)
+ FAST_UNPEND(11,fastunpend11, IO_ICU2)
+ FAST_UNPEND(12,fastunpend12, IO_ICU2)
+ FAST_UNPEND(13,fastunpend13, IO_ICU2)
+ FAST_UNPEND(14,fastunpend14, IO_ICU2)
+ FAST_UNPEND(15,fastunpend15, IO_ICU2)
MCOUNT_LABEL(eintr)
+
diff --git a/sys/amd64/isa/intr_machdep.c b/sys/amd64/isa/intr_machdep.c
index 92bf581..616e8c3 100644
--- a/sys/amd64/isa/intr_machdep.c
+++ b/sys/amd64/isa/intr_machdep.c
@@ -117,6 +117,27 @@ static inthand_t *fastintr[ICU_LEN] = {
#endif /* APIC_IO */
};
+static unpendhand_t *fastunpend[ICU_LEN] = {
+ &IDTVEC(fastunpend0), &IDTVEC(fastunpend1),
+ &IDTVEC(fastunpend2), &IDTVEC(fastunpend3),
+ &IDTVEC(fastunpend4), &IDTVEC(fastunpend5),
+ &IDTVEC(fastunpend6), &IDTVEC(fastunpend7),
+ &IDTVEC(fastunpend8), &IDTVEC(fastunpend9),
+ &IDTVEC(fastunpend10), &IDTVEC(fastunpend11),
+ &IDTVEC(fastunpend12), &IDTVEC(fastunpend13),
+ &IDTVEC(fastunpend14), &IDTVEC(fastunpend15),
+#if defined(APIC_IO)
+ &IDTVEC(fastunpend16), &IDTVEC(fastunpend17),
+ &IDTVEC(fastunpend18), &IDTVEC(fastunpend19),
+ &IDTVEC(fastunpend20), &IDTVEC(fastunpend21),
+ &IDTVEC(fastunpend22), &IDTVEC(fastunpend23),
+ &IDTVEC(fastunpend24), &IDTVEC(fastunpend25),
+ &IDTVEC(fastunpend26), &IDTVEC(fastunpend27),
+ &IDTVEC(fastunpend28), &IDTVEC(fastunpend29),
+ &IDTVEC(fastunpend30), &IDTVEC(fastunpend31),
+#endif /* APIC_IO */
+};
+
static inthand_t *slowintr[ICU_LEN] = {
&IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
&IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
@@ -291,13 +312,16 @@ isa_nmi(cd)
void icu_reinit()
{
int i;
+ critical_t crit;
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
init_i8259();
for(i=0;i<ICU_LEN;i++)
if(intr_handler[i] != isa_strayintr)
INTREN(1<<i);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
/*
@@ -309,13 +333,16 @@ void
isa_defaultirq()
{
int i;
+ critical_t crit;
/* icu vectors */
for (i = 0; i < ICU_LEN; i++)
icu_unset(i, (driver_intr_t *)NULL);
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
init_i8259();
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
@@ -476,6 +503,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
int vector;
u_int32_t value; /* the window register is 32 bits */
#endif /* FAST_HI */
+ critical_t crit;
#if defined(APIC_IO)
if ((u_int)intr >= ICU_LEN) /* no 8259 SLAVE to ignore */
@@ -488,6 +516,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
return (EBUSY);
#endif
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
intr_handler[intr] = handler;
intr_unit[intr] = arg;
@@ -522,6 +551,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
#endif /* FAST_HI */
INTREN(1 << intr);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
return (0);
}
@@ -535,10 +565,12 @@ icu_unset(intr, handler)
int intr;
driver_intr_t *handler;
{
+ critical_t crit;
if ((u_int)intr >= ICU_LEN || handler != intr_handler[intr])
return (EINVAL);
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << intr);
intr_countp[intr] = &intrcnt[1 + intr];
@@ -556,6 +588,7 @@ icu_unset(intr, handler)
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
return (0);
}
@@ -570,19 +603,25 @@ SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
static void
ithread_enable(int vector)
{
+ critical_t crit;
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTREN(1 << vector);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
static void
ithread_disable(int vector)
{
+ critical_t crit;
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << vector);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
int
@@ -664,3 +703,10 @@ inthand_remove(void *cookie)
return (ithread_remove_handler(cookie));
}
+
+void
+call_fast_unpend(int irq)
+{
+ fastunpend[irq]();
+}
+
diff --git a/sys/amd64/isa/intr_machdep.h b/sys/amd64/isa/intr_machdep.h
index 789b02b..21d5a93 100644
--- a/sys/amd64/isa/intr_machdep.h
+++ b/sys/amd64/isa/intr_machdep.h
@@ -144,6 +144,7 @@
* Type of the first (asm) part of an interrupt handler.
*/
typedef void inthand_t __P((u_int cs, u_int ef, u_int esp, u_int ss));
+typedef void unpendhand_t __P((void));
#define IDTVEC(name) __CONCAT(X,name)
@@ -167,6 +168,18 @@ inthand_t
IDTVEC(intr4), IDTVEC(intr5), IDTVEC(intr6), IDTVEC(intr7),
IDTVEC(intr8), IDTVEC(intr9), IDTVEC(intr10), IDTVEC(intr11),
IDTVEC(intr12), IDTVEC(intr13), IDTVEC(intr14), IDTVEC(intr15);
+unpendhand_t
+ IDTVEC(fastunpend0), IDTVEC(fastunpend1), IDTVEC(fastunpend2),
+ IDTVEC(fastunpend3), IDTVEC(fastunpend4), IDTVEC(fastunpend5),
+ IDTVEC(fastunpend6), IDTVEC(fastunpend7), IDTVEC(fastunpend8),
+ IDTVEC(fastunpend9), IDTVEC(fastunpend10), IDTVEC(fastunpend11),
+ IDTVEC(fastunpend12), IDTVEC(fastunpend13), IDTVEC(fastunpend14),
+ IDTVEC(fastunpend15), IDTVEC(fastunpend16), IDTVEC(fastunpend17),
+ IDTVEC(fastunpend18), IDTVEC(fastunpend19), IDTVEC(fastunpend20),
+ IDTVEC(fastunpend21), IDTVEC(fastunpend22), IDTVEC(fastunpend23),
+ IDTVEC(fastunpend24), IDTVEC(fastunpend25), IDTVEC(fastunpend26),
+ IDTVEC(fastunpend27), IDTVEC(fastunpend28), IDTVEC(fastunpend29),
+ IDTVEC(fastunpend30), IDTVEC(fastunpend31);
#if defined(SMP) || defined(APIC_IO)
inthand_t
@@ -234,6 +247,7 @@ int inthand_add(const char *name, int irq, driver_intr_t handler, void *arg,
enum intr_type flags, void **cookiep);
int inthand_remove(void *cookie);
void sched_ithd(void *dummy);
+void call_fast_unpend(int irq);
#endif /* LOCORE */
diff --git a/sys/amd64/isa/nmi.c b/sys/amd64/isa/nmi.c
index 92bf581..616e8c3 100644
--- a/sys/amd64/isa/nmi.c
+++ b/sys/amd64/isa/nmi.c
@@ -117,6 +117,27 @@ static inthand_t *fastintr[ICU_LEN] = {
#endif /* APIC_IO */
};
+static unpendhand_t *fastunpend[ICU_LEN] = {
+ &IDTVEC(fastunpend0), &IDTVEC(fastunpend1),
+ &IDTVEC(fastunpend2), &IDTVEC(fastunpend3),
+ &IDTVEC(fastunpend4), &IDTVEC(fastunpend5),
+ &IDTVEC(fastunpend6), &IDTVEC(fastunpend7),
+ &IDTVEC(fastunpend8), &IDTVEC(fastunpend9),
+ &IDTVEC(fastunpend10), &IDTVEC(fastunpend11),
+ &IDTVEC(fastunpend12), &IDTVEC(fastunpend13),
+ &IDTVEC(fastunpend14), &IDTVEC(fastunpend15),
+#if defined(APIC_IO)
+ &IDTVEC(fastunpend16), &IDTVEC(fastunpend17),
+ &IDTVEC(fastunpend18), &IDTVEC(fastunpend19),
+ &IDTVEC(fastunpend20), &IDTVEC(fastunpend21),
+ &IDTVEC(fastunpend22), &IDTVEC(fastunpend23),
+ &IDTVEC(fastunpend24), &IDTVEC(fastunpend25),
+ &IDTVEC(fastunpend26), &IDTVEC(fastunpend27),
+ &IDTVEC(fastunpend28), &IDTVEC(fastunpend29),
+ &IDTVEC(fastunpend30), &IDTVEC(fastunpend31),
+#endif /* APIC_IO */
+};
+
static inthand_t *slowintr[ICU_LEN] = {
&IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
&IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
@@ -291,13 +312,16 @@ isa_nmi(cd)
void icu_reinit()
{
int i;
+ critical_t crit;
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
init_i8259();
for(i=0;i<ICU_LEN;i++)
if(intr_handler[i] != isa_strayintr)
INTREN(1<<i);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
/*
@@ -309,13 +333,16 @@ void
isa_defaultirq()
{
int i;
+ critical_t crit;
/* icu vectors */
for (i = 0; i < ICU_LEN; i++)
icu_unset(i, (driver_intr_t *)NULL);
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
init_i8259();
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
@@ -476,6 +503,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
int vector;
u_int32_t value; /* the window register is 32 bits */
#endif /* FAST_HI */
+ critical_t crit;
#if defined(APIC_IO)
if ((u_int)intr >= ICU_LEN) /* no 8259 SLAVE to ignore */
@@ -488,6 +516,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
return (EBUSY);
#endif
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
intr_handler[intr] = handler;
intr_unit[intr] = arg;
@@ -522,6 +551,7 @@ icu_setup(int intr, driver_intr_t *handler, void *arg, int flags)
#endif /* FAST_HI */
INTREN(1 << intr);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
return (0);
}
@@ -535,10 +565,12 @@ icu_unset(intr, handler)
int intr;
driver_intr_t *handler;
{
+ critical_t crit;
if ((u_int)intr >= ICU_LEN || handler != intr_handler[intr])
return (EINVAL);
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << intr);
intr_countp[intr] = &intrcnt[1 + intr];
@@ -556,6 +588,7 @@ icu_unset(intr, handler)
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
return (0);
}
@@ -570,19 +603,25 @@ SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
static void
ithread_enable(int vector)
{
+ critical_t crit;
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTREN(1 << vector);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
static void
ithread_disable(int vector)
{
+ critical_t crit;
+ crit = cpu_critical_enter();
mtx_lock_spin(&icu_lock);
INTRDIS(1 << vector);
mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
}
int
@@ -664,3 +703,10 @@ inthand_remove(void *cookie)
return (ithread_remove_handler(cookie));
}
+
+void
+call_fast_unpend(int irq)
+{
+ fastunpend[irq]();
+}
+
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
index 07bd2f5..abccf06 100644
--- a/sys/amd64/isa/npx.c
+++ b/sys/amd64/isa/npx.c
@@ -429,9 +429,15 @@ no_irq13:
* XXX hack around brokenness of bus_teardown_intr(). If we left the
* irq active then we would get it instead of exception 16.
*/
- mtx_lock_spin(&icu_lock);
- INTRDIS(1 << irq_num);
- mtx_unlock_spin(&icu_lock);
+ {
+ critical_t crit;
+
+ crit = cpu_critical_enter();
+ mtx_lock_spin(&icu_lock);
+ INTRDIS(1 << irq_num);
+ mtx_unlock_spin(&icu_lock);
+ cpu_critical_exit(crit);
+ }
bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res);
bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res);
OpenPOWER on IntegriCloud