summaryrefslogtreecommitdiffstats
path: root/sys/i386/isa/ipl.s
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2000-03-28 07:16:37 +0000
committerdillon <dillon@FreeBSD.org>2000-03-28 07:16:37 +0000
commit689641c1ea53b9b5f18384314b488d0369596cf4 (patch)
tree19fb936349a321ee7ee797aaae79a14fa3863394 /sys/i386/isa/ipl.s
parent9c60490f9c6bfc1cca909053f05b8c1553e089fb (diff)
downloadFreeBSD-src-689641c1ea53b9b5f18384314b488d0369596cf4.zip
FreeBSD-src-689641c1ea53b9b5f18384314b488d0369596cf4.tar.gz
Commit major SMP cleanups and move the BGL (big giant lock) in the
syscall path inward. A system call may select whether it needs the MP lock or not (the default being that it does need it). A great deal of conditional SMP code for various deadended experiments has been removed. 'cil' and 'cml' have been removed entirely, and the locking around the cpl has been removed. The conditional separately-locked fast-interrupt code has been removed, meaning that interrupts must hold the CPL now (but they pretty much had to anyway). Another reason for doing this is that the original separate-lock for interrupts just doesn't apply to the interrupt thread mechanism being contemplated. Modifications to the cpl may now ONLY occur while holding the MP lock. For example, if an otherwise MP safe syscall needs to mess with the cpl, it must hold the MP lock for the duration and must (as usual) save/restore the cpl in a nested fashion. This is precursor work for the real meat coming later: avoiding having to hold the MP lock for common syscalls and I/O's and interrupt threads. It is expected that the spl mechanisms and new interrupt threading mechanisms will be able to run in tandem, allowing a slow piecemeal transition to occur. This patch should result in a moderate performance improvement due to the considerable amount of code that has been removed from the critical path, especially the simplification of the spl*() calls. The real performance gains will come later. Approved by: jkh Reviewed by: current, bde (exception.s) Some work taken from: luoqi's patch
Diffstat (limited to 'sys/i386/isa/ipl.s')
-rw-r--r--sys/i386/isa/ipl.s119
1 files changed, 28 insertions, 91 deletions
diff --git a/sys/i386/isa/ipl.s b/sys/i386/isa/ipl.s
index 7c1fca1..980257f 100644
--- a/sys/i386/isa/ipl.s
+++ b/sys/i386/isa/ipl.s
@@ -43,6 +43,10 @@
/*
* AT/386
* Vector interrupt control section
+ *
+ * cpl - Current interrupt disable mask
+ * *_imask - Interrupt masks for various spl*() functions
+ * ipending - Pending interrupts (set when a masked interrupt occurs)
*/
.data
@@ -67,9 +71,6 @@ _softnet_imask: .long SWI_NET_MASK
.globl _softtty_imask
_softtty_imask: .long SWI_TTY_MASK
- .globl _astpending
-_astpending: .long 0
-
/* pending interrupts blocked by splxxx() */
.globl _ipending
_ipending: .long 0
@@ -91,29 +92,12 @@ _netisrs:
.text
-#ifdef SMP
-#ifdef notnow
-#define TEST_CIL \
- cmpl $0x0100, _cil ; \
- jne 1f ; \
- cmpl $0, _inside_intr ; \
- jne 1f ; \
- int $3 ; \
-1:
-#else
-#define TEST_CIL
-#endif
-#endif
-
/*
* Handle return from interrupts, traps and syscalls.
*/
SUPERALIGN_TEXT
.type _doreti,@function
_doreti:
-#ifdef SMP
- TEST_CIL
-#endif
FAKE_MCOUNT(_bintr) /* init "from" _bintr -> _doreti */
addl $4,%esp /* discard unit number */
popl %eax /* cpl or cml to restore */
@@ -128,32 +112,17 @@ doreti_next:
* handlers is limited by the number of bits in cpl).
*/
#ifdef SMP
- TEST_CIL
cli /* early to prevent INT deadlock */
- pushl %eax /* preserve cpl while getting lock */
- ICPL_LOCK
- popl %eax
doreti_next2:
#endif
movl %eax,%ecx
-#ifdef CPL_AND_CML
- orl _cpl, %ecx /* add cpl to cml */
-#endif
notl %ecx /* set bit = unmasked level */
#ifndef SMP
cli
#endif
andl _ipending,%ecx /* set bit = unmasked pending INT */
jne doreti_unpend
-#ifdef SMP
- TEST_CIL
-#endif
-#ifdef CPL_AND_CML
- movl %eax, _cml
-#else
movl %eax,_cpl
-#endif
- FAST_ICPL_UNLOCK /* preserves %eax */
MPLOCKED decb _intr_nesting_level
/* Check for ASTs that can be handled now. */
@@ -166,19 +135,27 @@ doreti_next2:
cmpl $1,_in_vm86call
jne doreti_ast
+ /*
+ * doreti_exit - release MP lock, pop registers, iret.
+ *
+ * Note that the syscall trap shotcuts to doreti_syscall_ret.
+ * The segment register pop is a special case, since it may
+ * fault if (for example) a sigreturn specifies bad segment
+ * registers. The fault is handled in trap.c
+ */
+
doreti_exit:
MEXITCOUNT
#ifdef SMP
-#ifdef INTR_SIMPLELOCK
-#error code needed here to decide which lock to release, INTR or giant
-#endif
/* release the kernel lock */
movl $_mp_lock, %edx /* GIANT_LOCK */
call _MPrellock_edx
#endif /* SMP */
.globl doreti_popl_fs
+ .globl doreti_syscall_ret
+doreti_syscall_ret:
doreti_popl_fs:
popl %fs
.globl doreti_popl_es
@@ -215,22 +192,13 @@ doreti_popl_fs_fault:
doreti_unpend:
/*
* Enabling interrupts is safe because we haven't restored cpl yet.
- * The locking from the "btrl" test is probably no longer necessary.
- * We won't miss any new pending interrupts because we will check
- * for them again.
+ * %ecx contains the next probable ready interrupt (~cpl & ipending)
*/
#ifdef SMP
- TEST_CIL
- /* we enter with cpl locked */
- bsfl %ecx, %ecx /* slow, but not worth optimizing */
+ bsfl %ecx, %ecx /* locate the next dispatchable int */
lock
- btrl %ecx, _ipending
+ btrl %ecx, _ipending /* is it really still pending? */
jnc doreti_next2 /* some intr cleared memory copy */
- cmpl $NHWI, %ecx
- jae 1f
- btsl %ecx, _cil
-1:
- FAST_ICPL_UNLOCK /* preserves %eax */
sti /* late to prevent INT deadlock */
#else
sti
@@ -238,8 +206,9 @@ doreti_unpend:
btrl %ecx,_ipending
jnc doreti_next /* some intr cleared memory copy */
#endif /* SMP */
-
/*
+ * Execute handleable interrupt
+ *
* Set up JUMP to _ihandlers[%ecx] for HWIs.
* Set up CALL of _ihandlers[%ecx] for SWIs.
* This is a bit early for the SMP case - we have to push %ecx and
@@ -247,25 +216,10 @@ doreti_unpend:
*/
movl _ihandlers(,%ecx,4),%edx
cmpl $NHWI,%ecx
- jae doreti_swi
- cli
+ jae doreti_swi /* software interrupt handling */
+ cli /* else hardware int handling */
#ifdef SMP
- pushl %edx /* preserve %edx */
-#ifdef APIC_INTR_DIAGNOSTIC
- pushl %ecx
-#endif
- pushl %eax /* preserve %eax */
- ICPL_LOCK
-#ifdef CPL_AND_CML
- popl _cml
-#else
- popl _cpl
-#endif
- FAST_ICPL_UNLOCK
-#ifdef APIC_INTR_DIAGNOSTIC
- popl %ecx
-#endif
- popl %edx
+ movl %eax,_cpl /* same as non-smp case right now */
#else
movl %eax,_cpl
#endif
@@ -292,9 +246,6 @@ doreti_unpend:
ALIGN_TEXT
doreti_swi:
-#ifdef SMP
- TEST_CIL
-#endif
pushl %eax
/*
* At least the SWI_CLOCK handler has to run at a possibly strictly
@@ -304,29 +255,18 @@ doreti_swi:
* in dying interrupt frames and about 12 HWIs nested in active
* interrupt frames. There are only 4 different SWIs and the HWI
* and SWI masks limit the nesting further.
+ *
+ * The SMP case is currently the same as the non-SMP case.
*/
#ifdef SMP
- orl imasks(,%ecx,4), %eax
- pushl %ecx /* preserve for use by _swi_generic */
- pushl %edx /* save handler entry point */
- cli /* prevent INT deadlock */
- pushl %eax /* save cpl|cml */
- ICPL_LOCK
-#ifdef CPL_AND_CML
- popl _cml /* restore cml */
-#else
- popl _cpl /* restore cpl */
-#endif
- FAST_ICPL_UNLOCK
- sti
- popl %edx /* restore handler entry point */
- popl %ecx
+ orl imasks(,%ecx,4), %eax /* or in imasks */
+ movl %eax,_cpl /* set cpl for call */
#else
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
#endif
call %edx
- popl %eax
+ popl %eax /* cpl to restore */
jmp doreti_next
ALIGN_TEXT
@@ -336,9 +276,6 @@ doreti_ast:
movl $T_ASTFLT,TF_TRAPNO(%esp)
call _trap
subl %eax,%eax /* recover cpl|cml */
-#ifdef CPL_AND_CML
- movl %eax, _cpl
-#endif
movb $1,_intr_nesting_level /* for doreti_next to decrement */
jmp doreti_next
OpenPOWER on IntegriCloud