summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorbde <bde@FreeBSD.org>1999-07-10 15:28:01 +0000
committerbde <bde@FreeBSD.org>1999-07-10 15:28:01 +0000
commit76b661148982d686a345ce7e8af6b837139288a6 (patch)
tree984225551b54fbef3e436d6ac171c797ff70ad9f /sys/amd64
parent49a30aa645e3f361acf747fdefab3e56b508deec (diff)
downloadFreeBSD-src-76b661148982d686a345ce7e8af6b837139288a6.zip
FreeBSD-src-76b661148982d686a345ce7e8af6b837139288a6.tar.gz
Go back to the old (icu.s rev.1.7 1993) way of keeping the AST-pending
bit separate from ipending, since this is simpler and/or necessary for SMP and may even be better for UP. Reviewed by: alc, luoqi, tegge
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/apic_vector.S9
-rw-r--r--sys/amd64/amd64/cpu_switch.S12
-rw-r--r--sys/amd64/amd64/exception.S45
-rw-r--r--sys/amd64/amd64/exception.s45
-rw-r--r--sys/amd64/amd64/swtch.s12
-rw-r--r--sys/amd64/include/cpu.h9
-rw-r--r--sys/amd64/isa/atpic_vector.S6
-rw-r--r--sys/amd64/isa/icu_ipl.S4
-rw-r--r--sys/amd64/isa/icu_ipl.s4
-rw-r--r--sys/amd64/isa/icu_vector.S6
-rw-r--r--sys/amd64/isa/icu_vector.s6
11 files changed, 32 insertions, 126 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 7acd37c..b99b93a 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: apic_vector.s,v 1.40 1999/06/16 03:53:52 tegge Exp $
+ * $Id: apic_vector.s,v 1.41 1999/07/03 06:33:47 alc Exp $
*/
@@ -705,8 +705,7 @@ _Xcpuast:
movl _cpl, %eax
#endif
pushl %eax
- lock
- orl $SWI_AST_PENDING, _ipending
+ movl $1, _astpending /* XXX */
AVCPL_UNLOCK
lock
incb _intr_nesting_level
@@ -976,13 +975,13 @@ _ihandlers:
* apic_ipl.s: splz_unpend
*/
.long _swi_null, swi_net, _swi_null, _swi_null
- .long _swi_vm, _swi_null, _softclock, swi_ast
+ .long _swi_vm, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
- .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0
/* active flag for lazy masking */
iactive:
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index 82c9737..553b9a7 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.83 1999/07/03 06:33:24 alc Exp $
+ * $Id: swtch.s,v 1.84 1999/07/09 04:15:42 jlemon Exp $
*/
#include "npx.h"
@@ -310,10 +310,6 @@ _idle:
*
* XXX: we had damn well better be sure we had it before doing this!
*/
- CPL_LOCK /* XXX */
- MPLOCKED andl $~SWI_AST_MASK, _ipending /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
movl $FREE_LOCK, %eax
movl %eax, _mp_lock
@@ -357,18 +353,12 @@ idle_loop:
3:
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
call _get_mplock
- CPL_LOCK /* XXX */
- movl $SWI_AST_MASK, _cpl /* XXX Disallow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
cmpl $0,_whichqs /* normal queue */
CROSSJUMP(jne, nortqr, je)
cmpl $0,_whichidqs /* 'idle' queue */
CROSSJUMP(jne, idqr, je)
- CPL_LOCK /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
call _rel_mplock
jmp idle_loop
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index 6479e55..dce98e8 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.61 1999/06/01 18:19:36 jlemon Exp $
+ * $Id: exception.s,v 1.62 1999/06/27 21:31:43 alc Exp $
*/
#include "npx.h"
@@ -142,9 +142,8 @@ IDTVEC(fpu)
/*
* Handle like an interrupt (except for accounting) so that we can
* call npx_intr to clear the error. It would be better to handle
- * npx interrupts as traps. This used to be difficult for nested
- * interrupts, but now it is fairly easy - mask nested ones the
- * same as SWI_AST's.
+ * npx interrupts as traps. Nested interrupts would probably have
+ * to be converted to ASTs.
*/
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
@@ -166,13 +165,9 @@ IDTVEC(fpu)
#ifdef CPL_AND_CML
movl _cml,%eax
pushl %eax /* save original cml */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
#else
movl _cpl,%eax
pushl %eax /* save original cpl */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* CPL_AND_CML */
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
@@ -181,8 +176,6 @@ IDTVEC(fpu)
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
incl _cnt+V_TRAP
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* SMP */
call _npx_intr
@@ -218,15 +211,9 @@ calltrap:
ALIGN_LOCK
ECPL_LOCK
#ifdef CPL_AND_CML
- movl _cml,%eax
- movl %eax,%ebx /* keep orig. cml here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
+ movl _cml,%ebx /* keep orig. cml here during trap() */
#else
- movl _cpl,%eax
- movl %eax,%ebx /* keep orig. cpl here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
+ movl _cpl,%ebx /* keep orig. cpl here during trap() */
#endif
ECPL_UNLOCK
call _trap
@@ -270,17 +257,6 @@ IDTVEC(syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
@@ -311,17 +287,6 @@ IDTVEC(int0x80_syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index 6479e55..dce98e8 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.61 1999/06/01 18:19:36 jlemon Exp $
+ * $Id: exception.s,v 1.62 1999/06/27 21:31:43 alc Exp $
*/
#include "npx.h"
@@ -142,9 +142,8 @@ IDTVEC(fpu)
/*
* Handle like an interrupt (except for accounting) so that we can
* call npx_intr to clear the error. It would be better to handle
- * npx interrupts as traps. This used to be difficult for nested
- * interrupts, but now it is fairly easy - mask nested ones the
- * same as SWI_AST's.
+ * npx interrupts as traps. Nested interrupts would probably have
+ * to be converted to ASTs.
*/
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
@@ -166,13 +165,9 @@ IDTVEC(fpu)
#ifdef CPL_AND_CML
movl _cml,%eax
pushl %eax /* save original cml */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
#else
movl _cpl,%eax
pushl %eax /* save original cpl */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* CPL_AND_CML */
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
@@ -181,8 +176,6 @@ IDTVEC(fpu)
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
incl _cnt+V_TRAP
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
#endif /* SMP */
call _npx_intr
@@ -218,15 +211,9 @@ calltrap:
ALIGN_LOCK
ECPL_LOCK
#ifdef CPL_AND_CML
- movl _cml,%eax
- movl %eax,%ebx /* keep orig. cml here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cml
+ movl _cml,%ebx /* keep orig. cml here during trap() */
#else
- movl _cpl,%eax
- movl %eax,%ebx /* keep orig. cpl here during trap() */
- orl $SWI_AST_MASK,%eax
- movl %eax,_cpl
+ movl _cpl,%ebx /* keep orig. cpl here during trap() */
#endif
ECPL_UNLOCK
call _trap
@@ -270,17 +257,6 @@ IDTVEC(syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
@@ -311,17 +287,6 @@ IDTVEC(int0x80_syscall)
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
-#if 0
- ECPL_LOCK /* restore the locking if ... */
-#endif
-#ifdef CPL_AND_CML
- movl $SWI_AST_MASK,_cml
-#else
- movl $SWI_AST_MASK,_cpl /* this critical sections expands */
-#endif
-#if 0
- ECPL_UNLOCK
-#endif
call _syscall
/*
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index 82c9737..553b9a7 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.83 1999/07/03 06:33:24 alc Exp $
+ * $Id: swtch.s,v 1.84 1999/07/09 04:15:42 jlemon Exp $
*/
#include "npx.h"
@@ -310,10 +310,6 @@ _idle:
*
* XXX: we had damn well better be sure we had it before doing this!
*/
- CPL_LOCK /* XXX */
- MPLOCKED andl $~SWI_AST_MASK, _ipending /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
movl $FREE_LOCK, %eax
movl %eax, _mp_lock
@@ -357,18 +353,12 @@ idle_loop:
3:
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
call _get_mplock
- CPL_LOCK /* XXX */
- movl $SWI_AST_MASK, _cpl /* XXX Disallow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
cmpl $0,_whichqs /* normal queue */
CROSSJUMP(jne, nortqr, je)
cmpl $0,_whichidqs /* 'idle' queue */
CROSSJUMP(jne, idqr, je)
- CPL_LOCK /* XXX */
- movl $0, _cpl /* XXX Allow ASTs on other CPU */
- CPL_UNLOCK /* XXX */
call _rel_mplock
jmp idle_loop
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index 06f060d..d3e4ad4 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
- * $Id: cpu.h,v 1.38 1999/02/02 09:08:23 bde Exp $
+ * $Id: cpu.h,v 1.39 1999/04/23 20:22:44 dt Exp $
*/
#ifndef _MACHINE_CPU_H_
@@ -69,11 +69,11 @@
* problem at 100 Hz but it is serious at 16000 Hz for pcaudio. softclock()
* can take more than 62.5 usec so clock interrupts are lost.) It doesn't
* check for pending interrupts being unmasked. clkintr() and Xintr0()
- * assume that the ipl is high when hardclock() returns. Our SWI_AST
+ * assume that the ipl is high when hardclock() returns. Our SWI_CLOCK
* handling is efficient enough that little is gained by calling
* softclock() directly.
*/
-#define CLKF_BASEPRI(framep) (((framep)->cf_ppl & ~SWI_AST_MASK) == 0)
+#define CLKF_BASEPRI(framep) ((framep)->cf_ppl == 0)
#else
#define CLKF_BASEPRI(framep) (0)
#endif
@@ -102,7 +102,7 @@
*/
#define signotify(p) aston()
-#define aston() setsoftast()
+#define aston() do { astpending = 1; } while (0)
#define astoff()
/*
@@ -125,6 +125,7 @@
}
#ifdef KERNEL
+extern int astpending;
extern char btext[];
extern char etext[];
extern u_char intr_nesting_level;
diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S
index 453f9d5..cbb8b5e 100644
--- a/sys/amd64/isa/atpic_vector.S
+++ b/sys/amd64/isa/atpic_vector.S
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: icu_vector.s,v 1.11 1999/04/28 01:04:13 luoqi Exp $
+ * $Id: icu_vector.s,v 1.12 1999/05/28 14:08:59 bde Exp $
*/
/*
@@ -211,7 +211,7 @@ _ihandlers: /* addresses of interrupt handlers */
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _swi_null, _swi_null
.long _swi_null, _swi_null, _swi_null, _swi_null
- .long _swi_null, _swi_null, _softclock, swi_ast
+ .long _swi_null, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
@@ -219,6 +219,6 @@ imasks: /* masks for interrupt handlers */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
.long SWI_VM_MASK, 0, 0, 0
.long 0, 0, 0, 0
- .long 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long 0, 0, SWI_CLOCK_MASK, 0
.text
diff --git a/sys/amd64/isa/icu_ipl.S b/sys/amd64/isa/icu_ipl.S
index 4d91725..0f3a3ca 100644
--- a/sys/amd64/isa/icu_ipl.S
+++ b/sys/amd64/isa/icu_ipl.S
@@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: icu_ipl.s,v 1.3 1997/09/02 19:40:13 fsmp Exp $
+ * $Id: icu_ipl.s,v 1.4 1998/08/11 15:08:12 bde Exp $
*/
.data
@@ -103,8 +103,6 @@ splz_unpend:
ALIGN_TEXT
splz_swi:
- cmpl $SWI_AST,%ecx
- je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
diff --git a/sys/amd64/isa/icu_ipl.s b/sys/amd64/isa/icu_ipl.s
index 4d91725..0f3a3ca 100644
--- a/sys/amd64/isa/icu_ipl.s
+++ b/sys/amd64/isa/icu_ipl.s
@@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: icu_ipl.s,v 1.3 1997/09/02 19:40:13 fsmp Exp $
+ * $Id: icu_ipl.s,v 1.4 1998/08/11 15:08:12 bde Exp $
*/
.data
@@ -103,8 +103,6 @@ splz_unpend:
ALIGN_TEXT
splz_swi:
- cmpl $SWI_AST,%ecx
- je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S
index 453f9d5..cbb8b5e 100644
--- a/sys/amd64/isa/icu_vector.S
+++ b/sys/amd64/isa/icu_vector.S
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: icu_vector.s,v 1.11 1999/04/28 01:04:13 luoqi Exp $
+ * $Id: icu_vector.s,v 1.12 1999/05/28 14:08:59 bde Exp $
*/
/*
@@ -211,7 +211,7 @@ _ihandlers: /* addresses of interrupt handlers */
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _swi_null, _swi_null
.long _swi_null, _swi_null, _swi_null, _swi_null
- .long _swi_null, _swi_null, _softclock, swi_ast
+ .long _swi_null, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
@@ -219,6 +219,6 @@ imasks: /* masks for interrupt handlers */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
.long SWI_VM_MASK, 0, 0, 0
.long 0, 0, 0, 0
- .long 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long 0, 0, SWI_CLOCK_MASK, 0
.text
diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s
index 453f9d5..cbb8b5e 100644
--- a/sys/amd64/isa/icu_vector.s
+++ b/sys/amd64/isa/icu_vector.s
@@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
- * $Id: icu_vector.s,v 1.11 1999/04/28 01:04:13 luoqi Exp $
+ * $Id: icu_vector.s,v 1.12 1999/05/28 14:08:59 bde Exp $
*/
/*
@@ -211,7 +211,7 @@ _ihandlers: /* addresses of interrupt handlers */
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _swi_null, _swi_null
.long _swi_null, _swi_null, _swi_null, _swi_null
- .long _swi_null, _swi_null, _softclock, swi_ast
+ .long _swi_null, _swi_null, _softclock, _swi_null
imasks: /* masks for interrupt handlers */
.space NHWI*4 /* padding; HWI masks are elsewhere */
@@ -219,6 +219,6 @@ imasks: /* masks for interrupt handlers */
.long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
.long SWI_VM_MASK, 0, 0, 0
.long 0, 0, 0, 0
- .long 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+ .long 0, 0, SWI_CLOCK_MASK, 0
.text
OpenPOWER on IntegriCloud