summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbde <bde@FreeBSD.org>1999-07-10 15:28:01 +0000
committerbde <bde@FreeBSD.org>1999-07-10 15:28:01 +0000
commit76b661148982d686a345ce7e8af6b837139288a6 (patch)
tree984225551b54fbef3e436d6ac171c797ff70ad9f
parent49a30aa645e3f361acf747fdefab3e56b508deec (diff)
downloadFreeBSD-src-76b661148982d686a345ce7e8af6b837139288a6.zip
FreeBSD-src-76b661148982d686a345ce7e8af6b837139288a6.tar.gz
Go back to the old (icu.s rev.1.7 1993) way of keeping the AST-pending
bit separate from ipending, since this is simpler and/or necessary for SMP and may even be better for UP. Reviewed by: alc, luoqi, tegge
-rw-r--r--sys/amd64/amd64/apic_vector.S9
-rw-r--r--sys/amd64/amd64/cpu_switch.S12
-rw-r--r--sys/amd64/amd64/exception.S45
-rw-r--r--sys/amd64/amd64/exception.s45
-rw-r--r--sys/amd64/amd64/swtch.s12
-rw-r--r--sys/amd64/include/cpu.h9
-rw-r--r--sys/amd64/isa/atpic_vector.S6
-rw-r--r--sys/amd64/isa/icu_ipl.S4
-rw-r--r--sys/amd64/isa/icu_ipl.s4
-rw-r--r--sys/amd64/isa/icu_vector.S6
-rw-r--r--sys/amd64/isa/icu_vector.s6
-rw-r--r--sys/i386/i386/apic_vector.s9
-rw-r--r--sys/i386/i386/exception.s45
-rw-r--r--sys/i386/i386/swtch.s12
-rw-r--r--sys/i386/include/asnames.h3
-rw-r--r--sys/i386/include/cpu.h9
-rw-r--r--sys/i386/include/ipl.h13
-rw-r--r--sys/i386/isa/apic_ipl.s4
-rw-r--r--sys/i386/isa/apic_vector.s9
-rw-r--r--sys/i386/isa/atpic_vector.s6
-rw-r--r--sys/i386/isa/icu_ipl.s4
-rw-r--r--sys/i386/isa/icu_vector.s6
-rw-r--r--sys/i386/isa/ipl.s84
-rw-r--r--sys/i386/isa/ipl_funcs.c18
24 files changed, 98 insertions, 282 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 7acd37c..b99b93a 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.40 1999/06/16 03:53:52 tegge Exp $
+ * $Id: apic_vector.s,v 1.41 1999/07/03 06:33:47 alc Exp $
*/
@@ -705,8 +705,7 @@ _Xcpuast:
movl _cpl, %eax
#endif
pushl %eax
- lock
- orl $SWI_AST_PENDING, _ipending
+ movl $1, _astpending /* XXX */
AVCPL_UNLOCK
lock
incb _intr_nesting_level
@@ -976,13 +975,13 @@ _ihandlers:
* apic_ipl.s: splz_unpend
*/
.long _swi_null, swi_net, _swi_null, _swi_null
- .long _swi_vm, _swi_null, _softclock, swi_ast
+ .long _swi_vm, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
- .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0
/* active flag for lazy masking */
iactive:
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index 82c9737..553b9a7 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.83 1999/07/03 06:33:24 alc Exp $
+ * $Id: swtch.s,v 1.84 1999/07/09 04:15:42 jlemon Exp $
*/
#include "npx.h"
@@ -310,10 +310,6 @@ _idle:
*
* XXX: we had damn well better be sure we had it before doing this!
*/
- CPL_LOCK /* XXX */
- MPLOCKED andl $~SWI_AST_MASK, _ipending /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
movl $FREE_LOCK, %eax
movl %eax, _mp_lock
@@ -357,18 +353,12 @@ idle_loop:
3:
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
call _get_mplock
- CPL_LOCK /* XXX */
- movl $SWI_AST_MASK, _cpl /* XXX Disallow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
cmpl $0,_whichqs /* normal queue */
CROSSJUMP(jne, nortqr, je)
cmpl $0,_whichidqs /* 'idle' queue */
CROSSJUMP(jne, idqr, je)
- CPL_LOCK /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
call _rel_mplock
jmp idle_loop
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index 6479e55..dce98e8 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.61 1999/06/01 18:19:36 jlemon Exp $
+ * $Id: exception.s,v 1.62 1999/06/27 21:31:43 alc Exp $
*/
#include "npx.h"
@@ -142,9 +142,8 @@ IDTVEC(fpu)
/*
* Handle like an interrupt (except for accounting) so that we can
* call npx_intr to clear the error. It would be better to handle
- * npx interrupts as traps. This used to be difficult for nested
- * interrupts, but now it is fairly easy - mask nested ones the
- * same as SWI_AST's.
+ * npx interrupts as traps. Nested interrupts would probably have
+ * to be converted to ASTs.
*/
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
@@ -166,13 +165,9 @@ IDTVEC(fpu)
#ifdef CPL_AND_CML
movl _cml,%eax
pushl %eax /* save original cml */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
#else
movl _cpl,%eax
pushl %eax /* save original cpl */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* CPL_AND_CML */
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
@@ -181,8 +176,6 @@ IDTVEC(fpu)
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
incl _cnt+V_TRAP
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* SMP */
call _npx_intr
@@ -218,15 +211,9 @@ calltrap:
ALIGN_LOCK
ECPL_LOCK
#ifdef CPL_AND_CML
- movl _cml,%eax
- movl %eax,%ebx /* keep orig. cml here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
+ movl _cml,%ebx /* keep orig. cml here during trap() */
#else
- movl _cpl,%eax
- movl %eax,%ebx /* keep orig. cpl here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
+ movl _cpl,%ebx /* keep orig. cpl here during trap() */
#endif
ECPL_UNLOCK
call _trap
@@ -270,17 +257,6 @@ IDTVEC(syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
@@ -311,17 +287,6 @@ IDTVEC(int0x80_syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index 6479e55..dce98e8 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.61 1999/06/01 18:19:36 jlemon Exp $
+ * $Id: exception.s,v 1.62 1999/06/27 21:31:43 alc Exp $
*/
#include "npx.h"
@@ -142,9 +142,8 @@ IDTVEC(fpu)
/*
* Handle like an interrupt (except for accounting) so that we can
* call npx_intr to clear the error. It would be better to handle
- * npx interrupts as traps. This used to be difficult for nested
- * interrupts, but now it is fairly easy - mask nested ones the
- * same as SWI_AST's.
+ * npx interrupts as traps. Nested interrupts would probably have
+ * to be converted to ASTs.
*/
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
@@ -166,13 +165,9 @@ IDTVEC(fpu)
#ifdef CPL_AND_CML
movl _cml,%eax
pushl %eax /* save original cml */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
#else
movl _cpl,%eax
pushl %eax /* save original cpl */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* CPL_AND_CML */
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
@@ -181,8 +176,6 @@ IDTVEC(fpu)
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
incl _cnt+V_TRAP
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* SMP */
call _npx_intr
@@ -218,15 +211,9 @@ calltrap:
ALIGN_LOCK
ECPL_LOCK
#ifdef CPL_AND_CML
- movl _cml,%eax
- movl %eax,%ebx /* keep orig. cml here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
+ movl _cml,%ebx /* keep orig. cml here during trap() */
#else
- movl _cpl,%eax
- movl %eax,%ebx /* keep orig. cpl here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
+ movl _cpl,%ebx /* keep orig. cpl here during trap() */
#endif
ECPL_UNLOCK
call _trap
@@ -270,17 +257,6 @@ IDTVEC(syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
@@ -311,17 +287,6 @@ IDTVEC(int0x80_syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index 82c9737..553b9a7 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.83 1999/07/03 06:33:24 alc Exp $
+ * $Id: swtch.s,v 1.84 1999/07/09 04:15:42 jlemon Exp $
*/
#include "npx.h"
@@ -310,10 +310,6 @@ _idle:
*
* XXX: we had damn well better be sure we had it before doing this!
*/
- CPL_LOCK /* XXX */
- MPLOCKED andl $~SWI_AST_MASK, _ipending /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
movl $FREE_LOCK, %eax
movl %eax, _mp_lock
@@ -357,18 +353,12 @@ idle_loop:
3:
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
call _get_mplock
- CPL_LOCK /* XXX */
- movl $SWI_AST_MASK, _cpl /* XXX Disallow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
cmpl $0,_whichqs /* normal queue */
CROSSJUMP(jne, nortqr, je)
cmpl $0,_whichidqs /* 'idle' queue */
CROSSJUMP(jne, idqr, je)
- CPL_LOCK /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
call _rel_mplock
jmp idle_loop
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index 06f060d..d3e4ad4 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
- * $Id: cpu.h,v 1.38 1999/02/02 09:08:23 bde Exp $
+ * $Id: cpu.h,v 1.39 1999/04/23 20:22:44 dt Exp $
*/
#ifndef _MACHINE_CPU_H_
@@ -69,11 +69,11 @@
* problem at 100 Hz but it is serious at 16000 Hz for pcaudio. softclock()
* can take more than 62.5 usec so clock interrupts are lost.) It doesn't
* check for pending interrupts being unmasked. clkintr() and Xintr0()
- * assume that the ipl is high when hardclock() returns. Our SWI_AST
+ * assume that the ipl is high when hardclock() returns. Our SWI_CLOCK
* handling is efficient enough that little is gained by calling
* softclock() directly.
*/
-#define CLKF_BASEPRI(framep) (((framep)->cf_ppl & ~SWI_AST_MASK) == 0)
+#define CLKF_BASEPRI(framep) ((framep)->cf_ppl == 0)
#else
#define CLKF_BASEPRI(framep) (0)
#endif
@@ -102,7 +102,7 @@
*/
#define signotify(p) aston()
-#define aston() setsoftast()
+#define aston() do { astpending = 1; } while (0)
#define astoff()
/*
@@ -125,6 +125,7 @@
}
#ifdef KERNEL
+extern int astpending;
extern char btext[];
extern char etext[];
extern u_char intr_nesting_level;
diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S
index 453f9d5..cbb8b5e 100644
--- a/sys/amd64/isa/atpic_vector.S
+++ b/sys/amd64/isa/atpic_vector.S
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: icu_vector.s,v 1.11 1999/04/28 01:04:13 luoqi Exp $
+ * $Id: icu_vector.s,v 1.12 1999/05/28 14:08:59 bde Exp $
*/
/*
@@ -211,7 +211,7 @@ _ihandlers: /* addresses of interrupt handlers */
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _swi_null, _swi_null
.long _swi_null, _swi_null, _swi_null, _swi_null
- .long _swi_null, _swi_null, _softclock, swi_ast
+ .long _swi_null, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
@@ -219,6 +219,6 @@ imasks: /* masks for interrupt handlers */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
.long SWI_VM_MASK, 0, 0, 0
.long 0, 0, 0, 0
- .long 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long 0, 0, SWI_CLOCK_MASK, 0
.text
diff --git a/sys/amd64/isa/icu_ipl.S b/sys/amd64/isa/icu_ipl.S
index 4d91725..0f3a3ca 100644
--- a/sys/amd64/isa/icu_ipl.S
+++ b/sys/amd64/isa/icu_ipl.S
@@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: icu_ipl.s,v 1.3 1997/09/02 19:40:13 fsmp Exp $
+ * $Id: icu_ipl.s,v 1.4 1998/08/11 15:08:12 bde Exp $
*/
.data
@@ -103,8 +103,6 @@ splz_unpend:
ALIGN_TEXT
splz_swi:
- cmpl $SWI_AST,%ecx
- je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
diff --git a/sys/amd64/isa/icu_ipl.s b/sys/amd64/isa/icu_ipl.s
index 4d91725..0f3a3ca 100644
--- a/sys/amd64/isa/icu_ipl.s
+++ b/sys/amd64/isa/icu_ipl.s
@@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: icu_ipl.s,v 1.3 1997/09/02 19:40:13 fsmp Exp $
+ * $Id: icu_ipl.s,v 1.4 1998/08/11 15:08:12 bde Exp $
*/
.data
@@ -103,8 +103,6 @@ splz_unpend:
ALIGN_TEXT
splz_swi:
- cmpl $SWI_AST,%ecx
- je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S
index 453f9d5..cbb8b5e 100644
--- a/sys/amd64/isa/icu_vector.S
+++ b/sys/amd64/isa/icu_vector.S
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: icu_vector.s,v 1.11 1999/04/28 01:04:13 luoqi Exp $
+ * $Id: icu_vector.s,v 1.12 1999/05/28 14:08:59 bde Exp $
*/
/*
@@ -211,7 +211,7 @@ _ihandlers: /* addresses of interrupt handlers */
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _swi_null, _swi_null
.long _swi_null, _swi_null, _swi_null, _swi_null
- .long _swi_null, _swi_null, _softclock, swi_ast
+ .long _swi_null, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
@@ -219,6 +219,6 @@ imasks: /* masks for interrupt handlers */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
.long SWI_VM_MASK, 0, 0, 0
.long 0, 0, 0, 0
- .long 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long 0, 0, SWI_CLOCK_MASK, 0
.text
diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s
index 453f9d5..cbb8b5e 100644
--- a/sys/amd64/isa/icu_vector.s
+++ b/sys/amd64/isa/icu_vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: icu_vector.s,v 1.11 1999/04/28 01:04:13 luoqi Exp $
+ * $Id: icu_vector.s,v 1.12 1999/05/28 14:08:59 bde Exp $
*/
/*
@@ -211,7 +211,7 @@ _ihandlers: /* addresses of interrupt handlers */
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _swi_null, _swi_null
.long _swi_null, _swi_null, _swi_null, _swi_null
- .long _swi_null, _swi_null, _softclock, swi_ast
+ .long _swi_null, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
@@ -219,6 +219,6 @@ imasks: /* masks for interrupt handlers */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
.long SWI_VM_MASK, 0, 0, 0
.long 0, 0, 0, 0
- .long 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long 0, 0, SWI_CLOCK_MASK, 0
.text
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index 7acd37c..b99b93a 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.40 1999/06/16 03:53:52 tegge Exp $
+ * $Id: apic_vector.s,v 1.41 1999/07/03 06:33:47 alc Exp $
*/
@@ -705,8 +705,7 @@ _Xcpuast:
movl _cpl, %eax
#endif
pushl %eax
- lock
- orl $SWI_AST_PENDING, _ipending
+ movl $1, _astpending /* XXX */
AVCPL_UNLOCK
lock
incb _intr_nesting_level
@@ -976,13 +975,13 @@ _ihandlers:
* apic_ipl.s: splz_unpend
*/
.long _swi_null, swi_net, _swi_null, _swi_null
- .long _swi_vm, _swi_null, _softclock, swi_ast
+ .long _swi_vm, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
- .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0
/* active flag for lazy masking */
iactive:
diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s
index 6479e55..dce98e8 100644
--- a/sys/i386/i386/exception.s
+++ b/sys/i386/i386/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.61 1999/06/01 18:19:36 jlemon Exp $
+ * $Id: exception.s,v 1.62 1999/06/27 21:31:43 alc Exp $
*/
#include "npx.h"
@@ -142,9 +142,8 @@ IDTVEC(fpu)
/*
* Handle like an interrupt (except for accounting) so that we can
* call npx_intr to clear the error. It would be better to handle
- * npx interrupts as traps. This used to be difficult for nested
- * interrupts, but now it is fairly easy - mask nested ones the
- * same as SWI_AST's.
+ * npx interrupts as traps. Nested interrupts would probably have
+ * to be converted to ASTs.
*/
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
@@ -166,13 +165,9 @@ IDTVEC(fpu)
#ifdef CPL_AND_CML
movl _cml,%eax
pushl %eax /* save original cml */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
#else
movl _cpl,%eax
pushl %eax /* save original cpl */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* CPL_AND_CML */
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
@@ -181,8 +176,6 @@ IDTVEC(fpu)
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
incl _cnt+V_TRAP
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* SMP */
call _npx_intr
@@ -218,15 +211,9 @@ calltrap:
ALIGN_LOCK
ECPL_LOCK
#ifdef CPL_AND_CML
- movl _cml,%eax
- movl %eax,%ebx /* keep orig. cml here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
+ movl _cml,%ebx /* keep orig. cml here during trap() */
#else
- movl _cpl,%eax
- movl %eax,%ebx /* keep orig. cpl here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
+ movl _cpl,%ebx /* keep orig. cpl here during trap() */
#endif
ECPL_UNLOCK
call _trap
@@ -270,17 +257,6 @@ IDTVEC(syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
@@ -311,17 +287,6 @@ IDTVEC(int0x80_syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s
index 82c9737..553b9a7 100644
--- a/sys/i386/i386/swtch.s
+++ b/sys/i386/i386/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.83 1999/07/03 06:33:24 alc Exp $
+ * $Id: swtch.s,v 1.84 1999/07/09 04:15:42 jlemon Exp $
*/
#include "npx.h"
@@ -310,10 +310,6 @@ _idle:
*
* XXX: we had damn well better be sure we had it before doing this!
*/
- CPL_LOCK /* XXX */
- MPLOCKED andl $~SWI_AST_MASK, _ipending /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
movl $FREE_LOCK, %eax
movl %eax, _mp_lock
@@ -357,18 +353,12 @@ idle_loop:
3:
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
call _get_mplock
- CPL_LOCK /* XXX */
- movl $SWI_AST_MASK, _cpl /* XXX Disallow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
cmpl $0,_whichqs /* normal queue */
CROSSJUMP(jne, nortqr, je)
cmpl $0,_whichidqs /* 'idle' queue */
CROSSJUMP(jne, idqr, je)
- CPL_LOCK /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
call _rel_mplock
jmp idle_loop
diff --git a/sys/i386/include/asnames.h b/sys/i386/include/asnames.h
index aeca739..710ff9e 100644
--- a/sys/i386/include/asnames.h
+++ b/sys/i386/include/asnames.h
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: asnames.h,v 1.34 1999/05/09 19:01:47 peter Exp $
+ * $Id: asnames.h,v 1.35 1999/05/12 21:38:57 luoqi Exp $
*/
#ifndef _MACHINE_ASNAMES_H_
@@ -172,6 +172,7 @@
#define _arith_invalid arith_invalid
#define _arith_overflow arith_overflow
#define _arith_underflow arith_underflow
+#define _astpending astpending
#define _bcopy bcopy
#define _bcopy_vector bcopy_vector
#define _bigJump bigJump
diff --git a/sys/i386/include/cpu.h b/sys/i386/include/cpu.h
index 06f060d..d3e4ad4 100644
--- a/sys/i386/include/cpu.h
+++ b/sys/i386/include/cpu.h
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
- * $Id: cpu.h,v 1.38 1999/02/02 09:08:23 bde Exp $
+ * $Id: cpu.h,v 1.39 1999/04/23 20:22:44 dt Exp $
*/
#ifndef _MACHINE_CPU_H_
@@ -69,11 +69,11 @@
* problem at 100 Hz but it is serious at 16000 Hz for pcaudio. softclock()
* can take more than 62.5 usec so clock interrupts are lost.) It doesn't
* check for pending interrupts being unmasked. clkintr() and Xintr0()
- * assume that the ipl is high when hardclock() returns. Our SWI_AST
+ * assume that the ipl is high when hardclock() returns. Our SWI_CLOCK
* handling is efficient enough that little is gained by calling
* softclock() directly.
*/
-#define CLKF_BASEPRI(framep) (((framep)->cf_ppl & ~SWI_AST_MASK) == 0)
+#define CLKF_BASEPRI(framep) ((framep)->cf_ppl == 0)
#else
#define CLKF_BASEPRI(framep) (0)
#endif
@@ -102,7 +102,7 @@
*/
#define signotify(p) aston()
-#define aston() setsoftast()
+#define aston() do { astpending = 1; } while (0)
#define astoff()
/*
@@ -125,6 +125,7 @@
}
#ifdef KERNEL
+extern int astpending;
extern char btext[];
extern char etext[];
extern u_char intr_nesting_level;
diff --git a/sys/i386/include/ipl.h b/sys/i386/include/ipl.h
index aef990e..6207e93 100644
--- a/sys/i386/include/ipl.h
+++ b/sys/i386/include/ipl.h
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: ipl.h,v 1.14 1998/08/11 15:08:12 bde Exp $
+ * $Id: ipl.h,v 1.15 1998/08/11 19:21:17 bde Exp $
*/
#ifndef _MACHINE_IPL_H_
@@ -53,7 +53,6 @@
#define SWI_CAMBIO (NHWI + 3)
#define SWI_VM (NHWI + 4)
#define SWI_CLOCK 30
-#define SWI_AST 31
#define NSWI (32 - NHWI)
/*
@@ -65,15 +64,14 @@
#define SWI_CAMBIO_PENDING (1 << SWI_CAMBIO)
#define SWI_VM_PENDING (1 << SWI_VM)
#define SWI_CLOCK_PENDING (1 << SWI_CLOCK)
-#define SWI_AST_PENDING (1 << SWI_AST)
/*
* Corresponding interrupt-disable masks for cpl. The ordering is now by
* inclusion (where each mask is considered as a set of bits). Everything
- * except SWI_AST_MASK includes SWI_CLOCK_MASK so that softclock() doesn't
+ * includes SWI_CLOCK_MASK so that softclock() doesn't
* run while other swi handlers are running and timeout routines can call
- * swi handlers. Everything includes SWI_AST_MASK so that AST's are masked
- * until just before return to user mode. SWI_TTY_MASK includes SWI_NET_MASK
+ * swi handlers.
+ * SWI_TTY_MASK includes SWI_NET_MASK
* in case tty interrupts are processed at splsofttty() for a tty that is in
* SLIP or PPP line discipline (this is weaker than merging net_imask with
* tty_imask in isa.c - splimp() must mask hard and soft tty interrupts, but
@@ -84,8 +82,7 @@
#define SWI_CAMBIO_MASK (SWI_CAMBIO_PENDING | SWI_CLOCK_MASK)
#define SWI_NET_MASK (SWI_NET_PENDING | SWI_CLOCK_MASK)
#define SWI_VM_MASK (SWI_VM_PENDING | SWI_CLOCK_MASK)
-#define SWI_CLOCK_MASK (SWI_CLOCK_PENDING | SWI_AST_MASK)
-#define SWI_AST_MASK SWI_AST_PENDING
+#define SWI_CLOCK_MASK SWI_CLOCK_PENDING
#define SWI_MASK (~HWI_MASK)
#ifndef LOCORE
diff --git a/sys/i386/isa/apic_ipl.s b/sys/i386/isa/apic_ipl.s
index 9ed8de0..4ba7df1 100644
--- a/sys/i386/isa/apic_ipl.s
+++ b/sys/i386/isa/apic_ipl.s
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: apic_ipl.s,v 1.23 1999/04/10 19:22:17 tegge Exp $
+ * $Id: apic_ipl.s,v 1.24 1999/07/03 06:33:47 alc Exp $
*/
@@ -138,8 +138,6 @@ splz_unpend:
ALIGN_TEXT
splz_swi:
- cmpl $SWI_AST,%ecx
- je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index 7acd37c..b99b93a 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.40 1999/06/16 03:53:52 tegge Exp $
+ * $Id: apic_vector.s,v 1.41 1999/07/03 06:33:47 alc Exp $
*/
@@ -705,8 +705,7 @@ _Xcpuast:
movl _cpl, %eax
#endif
pushl %eax
- lock
- orl $SWI_AST_PENDING, _ipending
+ movl $1, _astpending /* XXX */
AVCPL_UNLOCK
lock
incb _intr_nesting_level
@@ -976,13 +975,13 @@ _ihandlers:
* apic_ipl.s: splz_unpend
*/
.long _swi_null, swi_net, _swi_null, _swi_null
- .long _swi_vm, _swi_null, _softclock, swi_ast
+ .long _swi_vm, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
- .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0
/* active flag for lazy masking */
iactive:
diff --git a/sys/i386/isa/atpic_vector.s b/sys/i386/isa/atpic_vector.s
index 453f9d5..cbb8b5e 100644
--- a/sys/i386/isa/atpic_vector.s
+++ b/sys/i386/isa/atpic_vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: icu_vector.s,v 1.11 1999/04/28 01:04:13 luoqi Exp $
+ * $Id: icu_vector.s,v 1.12 1999/05/28 14:08:59 bde Exp $
*/
/*
@@ -211,7 +211,7 @@ _ihandlers: /* addresses of interrupt handlers */
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _swi_null, _swi_null
.long _swi_null, _swi_null, _swi_null, _swi_null
- .long _swi_null, _swi_null, _softclock, swi_ast
+ .long _swi_null, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
@@ -219,6 +219,6 @@ imasks: /* masks for interrupt handlers */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
.long SWI_VM_MASK, 0, 0, 0
.long 0, 0, 0, 0
- .long 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long 0, 0, SWI_CLOCK_MASK, 0
.text
diff --git a/sys/i386/isa/icu_ipl.s b/sys/i386/isa/icu_ipl.s
index 4d91725..0f3a3ca 100644
--- a/sys/i386/isa/icu_ipl.s
+++ b/sys/i386/isa/icu_ipl.s
@@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: icu_ipl.s,v 1.3 1997/09/02 19:40:13 fsmp Exp $
+ * $Id: icu_ipl.s,v 1.4 1998/08/11 15:08:12 bde Exp $
*/
.data
@@ -103,8 +103,6 @@ splz_unpend:
ALIGN_TEXT
splz_swi:
- cmpl $SWI_AST,%ecx
- je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
diff --git a/sys/i386/isa/icu_vector.s b/sys/i386/isa/icu_vector.s
index 453f9d5..cbb8b5e 100644
--- a/sys/i386/isa/icu_vector.s
+++ b/sys/i386/isa/icu_vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: icu_vector.s,v 1.11 1999/04/28 01:04:13 luoqi Exp $
+ * $Id: icu_vector.s,v 1.12 1999/05/28 14:08:59 bde Exp $
*/
/*
@@ -211,7 +211,7 @@ _ihandlers: /* addresses of interrupt handlers */
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _swi_null, _swi_null
.long _swi_null, _swi_null, _swi_null, _swi_null
- .long _swi_null, _swi_null, _softclock, swi_ast
+ .long _swi_null, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
@@ -219,6 +219,6 @@ imasks: /* masks for interrupt handlers */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
.long SWI_VM_MASK, 0, 0, 0
.long 0, 0, 0, 0
- .long 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long 0, 0, SWI_CLOCK_MASK, 0
.text
diff --git a/sys/i386/isa/ipl.s b/sys/i386/isa/ipl.s
index 51e8329..ad7eb11 100644
--- a/sys/i386/isa/ipl.s
+++ b/sys/i386/isa/ipl.s
@@ -36,7 +36,7 @@
*
* @(#)ipl.s
*
- * $Id: ipl.s,v 1.28 1999/06/01 18:20:15 jlemon Exp $
+ * $Id: ipl.s,v 1.29 1999/07/03 06:33:48 alc Exp $
*/
@@ -67,6 +67,8 @@ _softnet_imask: .long SWI_NET_MASK
.globl _softtty_imask
_softtty_imask: .long SWI_TTY_MASK
+ .globl _astpending
+_astpending: .long 0
/* pending interrupts blocked by splxxx() */
.globl _ipending
@@ -143,7 +145,6 @@ doreti_next2:
#endif
andl _ipending,%ecx /* set bit = unmasked pending INT */
jne doreti_unpend
-doreti_exit:
#ifdef SMP
TEST_CIL
#endif
@@ -154,32 +155,19 @@ doreti_exit:
#endif
FAST_ICPL_UNLOCK /* preserves %eax */
MPLOCKED decb _intr_nesting_level
- MEXITCOUNT
-#ifdef CPL_AND_CML
- /* XXX CPL_AND_CML needs work */
-#error not ready for vm86
-#endif
+
+ /* Check for ASTs that can be handled now. */
+ cmpb $0,_astpending
+ je doreti_exit
+ testb $SEL_RPL_MASK,TF_CS(%esp)
+ jne doreti_ast
+ testl $PSL_VM,TF_EFLAGS(%esp)
+ je doreti_exit
cmpl $1,_in_vm86call
- je 1f /* want cpl == SWI_AST_PENDING */
- /*
- * XXX
- * Sometimes when attempting to return to vm86 mode, cpl is not
- * being reset to 0, so here we force it to 0 before returning to
- * vm86 mode. doreti_stop is a convenient place to set a breakpoint.
- * When the cpl problem is solved, this code can disappear.
- */
- ICPL_LOCK
- cmpl $0,_cpl /* cpl == 0, skip it */
- je 1f
- testl $PSL_VM,TF_EFLAGS(%esp) /* going to VM86 mode? */
- jne doreti_stop
- testb $SEL_RPL_MASK,TF_CS(%esp) /* to user mode? */
- je 1f
-doreti_stop:
- movl $0,_cpl
- nop
-1:
- FAST_ICPL_UNLOCK /* preserves %eax */
+ jne doreti_ast
+
+doreti_exit:
+ MEXITCOUNT
#ifdef SMP
#ifdef INTR_SIMPLELOCK
@@ -310,8 +298,8 @@ doreti_swi:
#endif
pushl %eax
/*
- * The SWI_AST handler has to run at cpl = SWI_AST_MASK and the
- * SWI_CLOCK handler at cpl = SWI_CLOCK_MASK, so we have to restore
+ * At least the SWI_CLOCK handler has to run at a possibly strictly
+ * lower cpl, so we have to restore
* all the h/w bits in cpl now and have to worry about stack growth.
* The worst case is currently (30 Jan 1994) 2 SWI handlers nested
* in dying interrupt frames and about 12 HWIs nested in active
@@ -343,15 +331,10 @@ doreti_swi:
jmp doreti_next
ALIGN_TEXT
-swi_ast:
- addl $8,%esp /* discard raddr & cpl to get trap frame */
- cmpl $1,_in_vm86call
- je 1f /* stay in kernel mode */
- testb $SEL_RPL_MASK,TF_CS(%esp)
- je swi_ast_phantom
-swi_ast_user:
+doreti_ast:
+ movl $0,_astpending
+ sti
movl $T_ASTFLT,TF_TRAPNO(%esp)
- movb $0,_intr_nesting_level /* finish becoming a trap handler */
call _trap
subl %eax,%eax /* recover cpl|cml */
#ifdef CPL_AND_CML
@@ -361,33 +344,6 @@ swi_ast_user:
jmp doreti_next
ALIGN_TEXT
-swi_ast_phantom:
- /*
- * check for ast from vm86 mode. Placed down here so the jumps do
- * not get taken for mainline code.
- */
- testl $PSL_VM,TF_EFLAGS(%esp)
- jne swi_ast_user
-1:
- /*
- * These happen when there is an interrupt in a trap handler before
- * ASTs can be masked or in an lcall handler before they can be
- * masked or after they are unmasked. They could be avoided for
- * trap entries by using interrupt gates, and for lcall exits by
- * using by using cli, but they are unavoidable for lcall entries.
- */
- cli
- ICPL_LOCK
- MPLOCKED orl $SWI_AST_PENDING, _ipending
- /* cpl is unlocked in doreti_exit */
- subl %eax,%eax
-#ifdef CPL_AND_CML
- movl %eax, _cpl
-#endif
- jmp doreti_exit /* SWI_AST is highest so we must be done */
-
-
- ALIGN_TEXT
swi_net:
MCOUNT
bsfl _netisr,%eax
diff --git a/sys/i386/isa/ipl_funcs.c b/sys/i386/isa/ipl_funcs.c
index 602ce01..2b7a1c4 100644
--- a/sys/i386/isa/ipl_funcs.c
+++ b/sys/i386/isa/ipl_funcs.c
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: ipl_funcs.c,v 1.20 1999/05/09 23:40:29 peter Exp $
+ * $Id: ipl_funcs.c,v 1.21 1999/06/30 03:39:29 alc Exp $
*/
#include <sys/types.h>
@@ -55,7 +55,6 @@ void setdelayed(void)
}
#endif /* !SMP */
-DO_SETBITS(setsoftast, &ipending, SWI_AST_PENDING)
DO_SETBITS(setsoftcamnet,&ipending, SWI_CAMNET_PENDING)
DO_SETBITS(setsoftcambio,&ipending, SWI_CAMBIO_PENDING)
DO_SETBITS(setsoftclock, &ipending, SWI_CLOCK_PENDING)
@@ -106,8 +105,8 @@ GENSPL(splvm, cpl |= net_imask | bio_imask | cam_imask)
void
spl0(void)
{
- cpl = SWI_AST_MASK;
- if (ipending & ~SWI_AST_MASK)
+ cpl = 0;
+ if (ipending)
splz();
}
@@ -281,6 +280,12 @@ spl0(void)
for (;;) {
IFCPL_LOCK();
POSTCODE_HI(0xc);
+ /*
+ * XXX SWI_AST_MASK in ipending has moved to 1 in astpending,
+ * so the following code is dead, but just removing it may
+ * not be right.
+ */
+#if 0
if (cil & SWI_AST_MASK) { /* not now */
IFCPL_UNLOCK(); /* allow cil to change */
SPIN_RESET;
@@ -288,14 +293,15 @@ spl0(void)
SPIN_SPL
continue; /* try again */
}
+#endif
break;
}
#else /* INTR_SPL */
IFCPL_LOCK();
#endif /* INTR_SPL */
- cpl = SWI_AST_MASK;
- unpend = ipending & ~SWI_AST_MASK;
+ cpl = 0;
+ unpend = ipending;
IFCPL_UNLOCK();
if (unpend && !inside_intr)
OpenPOWER on IntegriCloud