summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorfsmp <fsmp@FreeBSD.org>1997-07-31 05:43:05 +0000
committerfsmp <fsmp@FreeBSD.org>1997-07-31 05:43:05 +0000
commit4fa08df3f63ff52d105b8e2c4608527f7986a2ad (patch)
tree2b929b1d42afe31c643d8d7d98be73b9234d2ad0 /sys
parentae192ba332646764300b4256af3dcbeb84218e1a (diff)
downloadFreeBSD-src-4fa08df3f63ff52d105b8e2c4608527f7986a2ad.zip
FreeBSD-src-4fa08df3f63ff52d105b8e2c4608527f7986a2ad.tar.gz
Converted the TEST_LOPRIO code to default.
Created mplock functions that save/restore NO registers. Minor cleanup.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/cpu_switch.S12
-rw-r--r--sys/amd64/amd64/exception.S26
-rw-r--r--sys/amd64/amd64/exception.s26
-rw-r--r--sys/amd64/amd64/mp_machdep.c4
-rw-r--r--sys/amd64/amd64/mptable.c4
-rw-r--r--sys/amd64/amd64/swtch.s12
-rw-r--r--sys/amd64/include/mptable.h4
-rw-r--r--sys/i386/i386/exception.s26
-rw-r--r--sys/i386/i386/mp_machdep.c4
-rw-r--r--sys/i386/i386/mpapic.c40
-rw-r--r--sys/i386/i386/mplock.s178
-rw-r--r--sys/i386/i386/mptable.c4
-rw-r--r--sys/i386/i386/swtch.s12
-rw-r--r--sys/i386/include/mptable.h4
-rw-r--r--sys/kern/subr_smp.c4
15 files changed, 185 insertions, 175 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index eec0f38..0a3bf40 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.55 1997/07/15 02:51:20 fsmp Exp $
+ * $Id: swtch.s,v 1.4 1997/07/30 22:51:11 smp Exp smp $
*/
#include "npx.h"
@@ -42,7 +42,6 @@
#include <sys/rtprio.h>
#include <machine/asmacros.h>
-#include <machine/smptests.h> /** TEST_LOPRIO */
#ifdef SMP
#include <machine/pmap.h>
@@ -330,6 +329,7 @@ ENTRY(cpu_switch)
#ifdef SMP
movl _mp_lock, %eax
+ /* XXX FIXME: we should be saving the local APIC TPR */
#ifdef DIAGNOSTIC
cmpl $FREE_LOCK, %eax /* is it free? */
je badsw4 /* yes, bad medicine! */
@@ -516,16 +516,10 @@ swtch_com:
movl %ecx, _curproc /* into next process */
#ifdef SMP
-#ifdef TEST_LOPRIO /* hold LOPRIO for INTs */
-#ifdef CHEAP_TPR
- movl $0, lapic_tpr
-#else
- andl $~APIC_TPR_PRIO, lapic_tpr
-#endif /* CHEAP_TPR */
-#endif /* TEST_LOPRIO */
movl _cpu_lockid, %eax
orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
movl %eax, _mp_lock /* load the mp_lock */
+ /* XXX FIXME: we should be restoring the local APIC TPR */
#endif /* SMP */
#ifdef USER_LDT
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index ab55025..18715d2 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.2 1997/07/23 20:24:21 smp Exp smp $
+ * $Id: exception.s,v 1.3 1997/07/30 22:51:11 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@@ -43,8 +43,22 @@
#ifdef SMP
#include <machine/apic.h> /* for apic_vector.s */
+
+/* generic giant-lock calls */
#define GET_MPLOCK call _get_mplock
#define REL_MPLOCK call _rel_mplock
+
+/* ISR specific giant-lock calls */
+#define GET_ISRLOCK(N) call _get_isrlock
+#define TRY_ISRLOCK(N) \
+ pushl $_mp_lock ; \
+ call _MPtrylock ; \
+ add $4, %esp
+#define REL_ISRLOCK(N) \
+ pushl $_mp_lock ; \
+ call _MPrellock ; \
+ add $4, %esp
+
#define MP_INSTR_LOCK lock
/* protects the IO APIC and apic_imen as a critical region */
@@ -62,6 +76,8 @@
#define GET_MPLOCK /* NOP get Kernel Mutex */
#define REL_MPLOCK /* NOP release mutex */
+#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
+#define REL_ISRLOCK(N) /* NOP release mutex */
#define MP_INSTR_LOCK /* NOP instruction lock */
#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
@@ -164,7 +180,7 @@ IDTVEC(fpu)
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish building intr frame */
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@@ -190,7 +206,7 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
- GET_MPLOCK
+ GET_ISRLOCK(-1)
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
@@ -240,7 +256,7 @@ IDTVEC(syscall)
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
@@ -267,7 +283,7 @@ IDTVEC(int0x80_syscall)
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index ab55025..18715d2 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.2 1997/07/23 20:24:21 smp Exp smp $
+ * $Id: exception.s,v 1.3 1997/07/30 22:51:11 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@@ -43,8 +43,22 @@
#ifdef SMP
#include <machine/apic.h> /* for apic_vector.s */
+
+/* generic giant-lock calls */
#define GET_MPLOCK call _get_mplock
#define REL_MPLOCK call _rel_mplock
+
+/* ISR specific giant-lock calls */
+#define GET_ISRLOCK(N) call _get_isrlock
+#define TRY_ISRLOCK(N) \
+ pushl $_mp_lock ; \
+ call _MPtrylock ; \
+ add $4, %esp
+#define REL_ISRLOCK(N) \
+ pushl $_mp_lock ; \
+ call _MPrellock ; \
+ add $4, %esp
+
#define MP_INSTR_LOCK lock
/* protects the IO APIC and apic_imen as a critical region */
@@ -62,6 +76,8 @@
#define GET_MPLOCK /* NOP get Kernel Mutex */
#define REL_MPLOCK /* NOP release mutex */
+#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
+#define REL_ISRLOCK(N) /* NOP release mutex */
#define MP_INSTR_LOCK /* NOP instruction lock */
#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
@@ -164,7 +180,7 @@ IDTVEC(fpu)
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish building intr frame */
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@@ -190,7 +206,7 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
- GET_MPLOCK
+ GET_ISRLOCK(-1)
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
@@ -240,7 +256,7 @@ IDTVEC(syscall)
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
@@ -267,7 +283,7 @@ IDTVEC(int0x80_syscall)
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index ff0390a..cf5078d 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.22 1997/07/28 03:39:06 smp Exp smp $
+ * $Id: mp_machdep.c,v 1.23 1997/07/30 22:51:11 smp Exp smp $
*/
#include "opt_smp.h"
@@ -1420,7 +1420,7 @@ init_locks(void)
mp_lock = 0x00000001;
/* locks the IO APIC and apic_imen accesses */
- s_lock_init(&imen_lock);
+ s_lock_init((struct simplelock*)&imen_lock);
}
diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c
index ff0390a..cf5078d 100644
--- a/sys/amd64/amd64/mptable.c
+++ b/sys/amd64/amd64/mptable.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.22 1997/07/28 03:39:06 smp Exp smp $
+ * $Id: mp_machdep.c,v 1.23 1997/07/30 22:51:11 smp Exp smp $
*/
#include "opt_smp.h"
@@ -1420,7 +1420,7 @@ init_locks(void)
mp_lock = 0x00000001;
/* locks the IO APIC and apic_imen accesses */
- s_lock_init(&imen_lock);
+ s_lock_init((struct simplelock*)&imen_lock);
}
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index eec0f38..0a3bf40 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.55 1997/07/15 02:51:20 fsmp Exp $
+ * $Id: swtch.s,v 1.4 1997/07/30 22:51:11 smp Exp smp $
*/
#include "npx.h"
@@ -42,7 +42,6 @@
#include <sys/rtprio.h>
#include <machine/asmacros.h>
-#include <machine/smptests.h> /** TEST_LOPRIO */
#ifdef SMP
#include <machine/pmap.h>
@@ -330,6 +329,7 @@ ENTRY(cpu_switch)
#ifdef SMP
movl _mp_lock, %eax
+ /* XXX FIXME: we should be saving the local APIC TPR */
#ifdef DIAGNOSTIC
cmpl $FREE_LOCK, %eax /* is it free? */
je badsw4 /* yes, bad medicine! */
@@ -516,16 +516,10 @@ swtch_com:
movl %ecx, _curproc /* into next process */
#ifdef SMP
-#ifdef TEST_LOPRIO /* hold LOPRIO for INTs */
-#ifdef CHEAP_TPR
- movl $0, lapic_tpr
-#else
- andl $~APIC_TPR_PRIO, lapic_tpr
-#endif /* CHEAP_TPR */
-#endif /* TEST_LOPRIO */
movl _cpu_lockid, %eax
orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
movl %eax, _mp_lock /* load the mp_lock */
+ /* XXX FIXME: we should be restoring the local APIC TPR */
#endif /* SMP */
#ifdef USER_LDT
diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h
index ff0390a..cf5078d 100644
--- a/sys/amd64/include/mptable.h
+++ b/sys/amd64/include/mptable.h
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.22 1997/07/28 03:39:06 smp Exp smp $
+ * $Id: mp_machdep.c,v 1.23 1997/07/30 22:51:11 smp Exp smp $
*/
#include "opt_smp.h"
@@ -1420,7 +1420,7 @@ init_locks(void)
mp_lock = 0x00000001;
/* locks the IO APIC and apic_imen accesses */
- s_lock_init(&imen_lock);
+ s_lock_init((struct simplelock*)&imen_lock);
}
diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s
index ab55025..18715d2 100644
--- a/sys/i386/i386/exception.s
+++ b/sys/i386/i386/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.2 1997/07/23 20:24:21 smp Exp smp $
+ * $Id: exception.s,v 1.3 1997/07/30 22:51:11 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@@ -43,8 +43,22 @@
#ifdef SMP
#include <machine/apic.h> /* for apic_vector.s */
+
+/* generic giant-lock calls */
#define GET_MPLOCK call _get_mplock
#define REL_MPLOCK call _rel_mplock
+
+/* ISR specific giant-lock calls */
+#define GET_ISRLOCK(N) call _get_isrlock
+#define TRY_ISRLOCK(N) \
+ pushl $_mp_lock ; \
+ call _MPtrylock ; \
+ add $4, %esp
+#define REL_ISRLOCK(N) \
+ pushl $_mp_lock ; \
+ call _MPrellock ; \
+ add $4, %esp
+
#define MP_INSTR_LOCK lock
/* protects the IO APIC and apic_imen as a critical region */
@@ -62,6 +76,8 @@
#define GET_MPLOCK /* NOP get Kernel Mutex */
#define REL_MPLOCK /* NOP release mutex */
+#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
+#define REL_ISRLOCK(N) /* NOP release mutex */
#define MP_INSTR_LOCK /* NOP instruction lock */
#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
@@ -164,7 +180,7 @@ IDTVEC(fpu)
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish building intr frame */
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@@ -190,7 +206,7 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
- GET_MPLOCK
+ GET_ISRLOCK(-1)
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
@@ -240,7 +256,7 @@ IDTVEC(syscall)
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
@@ -267,7 +283,7 @@ IDTVEC(int0x80_syscall)
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
- GET_MPLOCK
+ GET_ISRLOCK(-1)
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index ff0390a..cf5078d 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.22 1997/07/28 03:39:06 smp Exp smp $
+ * $Id: mp_machdep.c,v 1.23 1997/07/30 22:51:11 smp Exp smp $
*/
#include "opt_smp.h"
@@ -1420,7 +1420,7 @@ init_locks(void)
mp_lock = 0x00000001;
/* locks the IO APIC and apic_imen accesses */
- s_lock_init(&imen_lock);
+ s_lock_init((struct simplelock*)&imen_lock);
}
diff --git a/sys/i386/i386/mpapic.c b/sys/i386/i386/mpapic.c
index 34060ba..699b735 100644
--- a/sys/i386/i386/mpapic.c
+++ b/sys/i386/i386/mpapic.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mpapic.c,v 1.25 1997/07/28 03:39:06 smp Exp smp $
+ * $Id: mpapic.c,v 1.26 1997/07/30 22:51:11 smp Exp smp $
*/
#include "opt_smp.h"
@@ -30,7 +30,7 @@
#include <sys/types.h>
#include <sys/systm.h>
-#include <machine/smptests.h> /** TEST_LOPRIO, PEND_INTS, TEST_TEST1 */
+#include <machine/smptests.h> /** PEND_INTS, TEST_TEST1 */
#include <machine/smp.h>
#include <machine/mpapic.h>
#include <machine/segments.h>
@@ -76,10 +76,7 @@ apic_initialize(void)
/* set the Task Priority Register as needed */
temp = lapic.tpr;
temp &= ~APIC_TPR_PRIO; /* clear priority field */
-
-#if defined(TEST_LOPRIO)
temp |= LOPRIO_LEVEL; /* allow INT arbitration */
-#endif /* TEST_LOPRIO */
lapic.tpr = temp;
@@ -132,8 +129,6 @@ apic_dump(char* str)
static int trigger __P((int apic, int pin, u_int32_t * flags));
static void polarity __P((int apic, int pin, u_int32_t * flags, int level));
-
-#if defined(TEST_LOPRIO)
#define DEFAULT_FLAGS \
((u_int32_t) \
(IOART_INTMSET | \
@@ -147,21 +142,6 @@ static void polarity __P((int apic, int pin, u_int32_t * flags, int level));
IOART_INTAHI | \
IOART_DESTPHY | \
IOART_DELLOPRI))
-#else
-#define DEFAULT_FLAGS \
- ((u_int32_t) \
- (IOART_INTMSET | \
- IOART_DESTPHY | \
- IOART_DELFIXED))
-
-#define DEFAULT_ISA_FLAGS \
- ((u_int32_t) \
- (IOART_INTMSET | \
- IOART_TRGREDG | \
- IOART_INTAHI | \
- IOART_DESTPHY | \
- IOART_DELFIXED))
-#endif /* TEST_LOPRIO */
/*
* Setup the IO APIC.
@@ -179,18 +159,14 @@ io_apic_setup(int apic)
u_int32_t vector; /* the window register is 32 bits */
int pin, level;
-#if defined(TEST_LOPRIO)
target = IOART_DEST;
-#else
- target = boot_cpu_id << 24;
-#endif /* TEST_LOPRIO */
#if defined(PEND_INTS)
apic_pin_trigger[apic] = 0; /* default to edge-triggered */
#endif /* PEND_INTS */
if (apic == 0) {
- maxpin = REDIRCNT_IOAPIC(apic); /* pins-1 in APIC */
+ maxpin = REDIRCNT_IOAPIC(apic); /* pins in APIC */
for (pin = 0; pin < maxpin; ++pin) {
int bus, bustype;
@@ -244,7 +220,6 @@ io_apic_setup(int apic)
#undef DEFAULT_FLAGS
-#if defined(TEST_LOPRIO)
#define DEFAULT_EXTINT_FLAGS \
((u_int32_t) \
(IOART_INTMSET | \
@@ -252,15 +227,6 @@ io_apic_setup(int apic)
IOART_INTAHI | \
IOART_DESTPHY | \
IOART_DELLOPRI))
-#else
-#define DEFAULT_EXTINT_FLAGS \
- ((u_int32_t) \
- (IOART_INTMSET | \
- IOART_TRGREDG | \
- IOART_INTAHI | \
- IOART_DESTPHY | \
- IOART_DELFIXED))
-#endif /* TEST_LOPRIO */
/*
* Setup the source of External INTerrupts.
diff --git a/sys/i386/i386/mplock.s b/sys/i386/i386/mplock.s
index 8e43778..17425af 100644
--- a/sys/i386/i386/mplock.s
+++ b/sys/i386/i386/mplock.s
@@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
- * $Id: mplock.s,v 1.11 1997/07/25 22:19:16 smp Exp smp $
+ * $Id: mplock.s,v 1.12 1997/07/30 22:51:11 smp Exp smp $
*
* Functions for locking between CPUs in a SMP system.
*
@@ -27,52 +27,8 @@
#include <i386/isa/intr_machdep.h>
-/*
- * claim LOW PRIO, ie. accept ALL INTerrupts
- */
-#ifdef TEST_LOPRIO
-
-/* location of saved TPR on stack */
-#define TPR_TARGET 20(%esp)
-
-/* we assumme that the 'reserved bits' can be written with zeros */
-#ifdef CHEAP_TPR
-
-/* after 1st acquire of lock we attempt to grab all hardware INTs */
-#define GRAB_HWI \
- movl $ALLHWI_LEVEL, TPR_TARGET /* task prio to 'all HWI' */
-
-#define GRAB_HWI_2 \
- movl $ALLHWI_LEVEL, lapic_tpr /* task prio to 'all HWI' */
-
-/* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
-#define ARB_HWI \
- movl $LOPRIO_LEVEL, lapic_tpr /* task prio to 'arbitrate' */
-
-#else /** CHEAP_TPR */
-
-#define GRAB_HWI \
- andl $~APIC_TPR_PRIO, TPR_TARGET /* task prio to 'all HWI' */
-
-#define GRAB_HWI_2 \
- andl $~APIC_TPR_PRIO, lapic_tpr /* task prio to 'all HWI' */
-
-#define ARB_HWI \
- movl lapic_tpr, %eax ; /* TPR */ \
- andl $~APIC_TPR_PRIO, %eax ; /* clear TPR field */ \
- orl $LOPRIO_LEVEL, %eax ; /* prio to arbitrate */ \
- movl %eax, lapic_tpr ; /* set it */ \
- movl (%edx), %eax /* reload %eax with lock */
-
-#endif /** CHEAP_TPR */
-
-#else /** TEST_LOPRIO */
-
-#define GRAB_HWI /* nop */
-#define GRAB_HWI_2 /* nop */
-#define ARB_HWI /* nop */
-
-#endif /** TEST_LOPRIO */
+#define MAYBE_PUSHL_EAX pushl %eax
+#define MAYBE_POPL_EAX popl %eax
.text
@@ -83,7 +39,8 @@
*/
NON_GPROF_ENTRY(MPgetlock)
-1: movl 4(%esp), %edx /* Get the address of the lock */
+ movl 4(%esp), %edx /* Get the address of the lock */
+1:
movl (%edx), %eax /* Try to see if we have it already */
andl $COUNT_FIELD, %eax /* - get count */
movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
@@ -94,17 +51,19 @@ NON_GPROF_ENTRY(MPgetlock)
cmpxchg %ecx, (%edx) /* - try it atomically */
jne 2f /* - miss */
ret
-2: movl $FREE_LOCK, %eax /* Assume it's free */
+2:
+ movl $FREE_LOCK, %eax /* Assume it's free */
movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
incl %ecx /* - new count is one */
lock
cmpxchg %ecx, (%edx) /* - try it atomically */
- jne 3f /* ...do not collect $200 */
- GRAB_HWI /* 1st acquire, grab hw INTs */
- ret
-3: cmpl $FREE_LOCK, (%edx) /* Wait for it to become free */
+ je 4f /* ...do not collect $200 */
+3:
+ cmpl $FREE_LOCK, (%edx) /* Wait for it to become free */
jne 3b
jmp 2b /* XXX 1b ? */
+4:
+ ret
/***********************************************************************
* int MPtrylock(unsigned int *lock)
@@ -114,7 +73,7 @@ NON_GPROF_ENTRY(MPgetlock)
*/
NON_GPROF_ENTRY(MPtrylock)
-1: movl 4(%esp), %edx /* Get the address of the lock */
+ movl 4(%esp), %edx /* Get the address of the lock */
movl (%edx), %eax /* Try to see if we have it already */
andl $COUNT_FIELD, %eax /* - get count */
movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
@@ -123,19 +82,20 @@ NON_GPROF_ENTRY(MPtrylock)
incl %ecx /* - new count is one more */
lock
cmpxchg %ecx, (%edx) /* - try it atomically */
- jne 2f /* - miss */
+ jne 1f /* - miss */
movl $1, %eax
ret
-2: movl $FREE_LOCK, %eax /* Assume it's free */
+1:
+ movl $FREE_LOCK, %eax /* Assume it's free */
movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
incl %ecx /* - new count is one */
lock
cmpxchg %ecx, (%edx) /* - try it atomically */
- jne 3f /* ...do not collect $200 */
- GRAB_HWI_2 /* 1st acquire, grab hw INTs */
+ jne 2f /* ...do not collect $200 */
movl $1, %eax
ret
-3: movl $0, %eax
+2:
+ movl $0, %eax
ret
/***********************************************************************
@@ -145,57 +105,50 @@ NON_GPROF_ENTRY(MPtrylock)
*/
NON_GPROF_ENTRY(MPrellock)
-1: movl 4(%esp), %edx /* Get the address of the lock */
+ movl 4(%esp), %edx /* Get the address of the lock */
+1:
movl (%edx), %eax /* - get the value */
- movl %eax,%ecx
+ movl %eax, %ecx
decl %ecx /* - new count is one less */
testl $COUNT_FIELD, %ecx /* - Unless it's zero... */
jnz 2f
- ARB_HWI /* last release, arbitrate hw INTs */
movl $FREE_LOCK, %ecx /* - In which case we release it */
-2: lock
+2:
+ lock
cmpxchg %ecx, (%edx) /* - try it atomically */
jne 1b /* ...do not collect $200 */
ret
+
/***********************************************************************
* void get_mplock()
* -----------------
* All registers preserved
*
- * Stack (after call to _MPgetlock):
- *
- * &mp_lock 4(%esp)
- * edx 8(%esp)
- * ecx 12(%esp)
- * EFLAGS 16(%esp)
- * local APIC TPR 20(%esp)
- * eax 24(%esp)
*/
NON_GPROF_ENTRY(get_mplock)
- pushl %eax
+ MAYBE_PUSHL_EAX
/* block all HW INTs via Task Priority Register */
#ifdef CHEAP_TPR
pushl lapic_tpr /* save current TPR */
pushfl /* save current EFLAGS */
- btl $9, (%esp) /* test EI bit */
- jc 1f /* INTs currently enabled */
+ testl $(1<<9), (%esp) /* test EI bit */
+ jnz 1f /* INTs currently enabled */
movl $TPR_BLOCK_HWI, lapic_tpr
#else
movl lapic_tpr, %eax /* get current TPR */
pushl %eax /* save current TPR */
pushfl /* save current EFLAGS */
- btl $9, (%esp) /* test EI bit */
- jc 1f /* INTs currently enabled */
+ testl $(1<<9), (%esp) /* test EI bit */
+ jnz 1f /* INTs currently enabled */
andl $~APIC_TPR_PRIO, %eax /* clear task priority field */
orl $TPR_BLOCK_HWI, %eax /* only allow IPIs */
movl %eax, lapic_tpr
#endif /** CHEAP_TPR */
- sti /* allow (IPI and only IPI) INTs */
+ sti /* allow IPI (and only IPI) INTs */
1:
-
pushl %ecx
pushl %edx
pushl $_mp_lock
@@ -206,9 +159,42 @@ NON_GPROF_ENTRY(get_mplock)
popfl /* restore original EFLAGS */
popl lapic_tpr /* restore TPR */
- popl %eax /* restore scratch */
+ MAYBE_POPL_EAX
+ ret
+
+/***********************************************************************
+ * void get_isrlock()
+ * -----------------
+ * no registers preserved, assummed the calling ISR does!
+ *
+ */
+
+NON_GPROF_ENTRY(get_isrlock)
+
+ /* block all HW INTs via Task Priority Register */
+#ifdef CHEAP_TPR
+ pushl lapic_tpr /* save current TPR */
+ pushfl /* save current EFLAGS */
+ movl $TPR_BLOCK_HWI, lapic_tpr
+#else
+ movl lapic_tpr, %eax /* get current TPR */
+ pushl %eax /* save current TPR */
+ pushfl /* save current EFLAGS */
+ andl $~APIC_TPR_PRIO, %eax /* clear task priority field */
+ orl $TPR_BLOCK_HWI, %eax /* only allow IPIs */
+ movl %eax, lapic_tpr
+#endif /** CHEAP_TPR */
+ sti /* allow IPI (and only IPI) INTs */
+1:
+ pushl $_mp_lock
+ call _MPgetlock
+ add $4, %esp
+
+ popfl /* restore original EFLAGS */
+ popl lapic_tpr /* restore TPR */
ret
+
/***********************************************************************
* void try_mplock()
* -----------------
@@ -226,14 +212,27 @@ NON_GPROF_ENTRY(try_mplock)
ret
/***********************************************************************
+ * void try_isrlock()
+ * -----------------
+ * no registers preserved, assummed the calling ISR does!
+ * reg %eax == 1 if success
+ */
+
+NON_GPROF_ENTRY(try_isrlock)
+ pushl $_mp_lock
+ call _MPtrylock
+ add $4, %esp
+ ret
+
+
+/***********************************************************************
* void rel_mplock()
* -----------------
* All registers preserved
*/
NON_GPROF_ENTRY(rel_mplock)
- pushl %eax
-
+ MAYBE_PUSHL_EAX
pushl %ecx
pushl %edx
pushl $_mp_lock
@@ -241,11 +240,26 @@ NON_GPROF_ENTRY(rel_mplock)
add $4, %esp
popl %edx
popl %ecx
+ MAYBE_POPL_EAX
+ ret
+
+/***********************************************************************
+ * void rel_isrlock()
+ * -----------------
+ * no registers preserved, assummed the calling ISR does!
+ */
- popl %eax
+NON_GPROF_ENTRY(rel_isrlock)
+ pushl $_mp_lock
+ call _MPrellock
+ add $4, %esp
ret
+/***********************************************************************
+ *
+ */
+
.data
.globl _mp_lock
.align 4 /* mp_lock aligned on int boundary */
diff --git a/sys/i386/i386/mptable.c b/sys/i386/i386/mptable.c
index ff0390a..cf5078d 100644
--- a/sys/i386/i386/mptable.c
+++ b/sys/i386/i386/mptable.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.22 1997/07/28 03:39:06 smp Exp smp $
+ * $Id: mp_machdep.c,v 1.23 1997/07/30 22:51:11 smp Exp smp $
*/
#include "opt_smp.h"
@@ -1420,7 +1420,7 @@ init_locks(void)
mp_lock = 0x00000001;
/* locks the IO APIC and apic_imen accesses */
- s_lock_init(&imen_lock);
+ s_lock_init((struct simplelock*)&imen_lock);
}
diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s
index eec0f38..0a3bf40 100644
--- a/sys/i386/i386/swtch.s
+++ b/sys/i386/i386/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.55 1997/07/15 02:51:20 fsmp Exp $
+ * $Id: swtch.s,v 1.4 1997/07/30 22:51:11 smp Exp smp $
*/
#include "npx.h"
@@ -42,7 +42,6 @@
#include <sys/rtprio.h>
#include <machine/asmacros.h>
-#include <machine/smptests.h> /** TEST_LOPRIO */
#ifdef SMP
#include <machine/pmap.h>
@@ -330,6 +329,7 @@ ENTRY(cpu_switch)
#ifdef SMP
movl _mp_lock, %eax
+ /* XXX FIXME: we should be saving the local APIC TPR */
#ifdef DIAGNOSTIC
cmpl $FREE_LOCK, %eax /* is it free? */
je badsw4 /* yes, bad medicine! */
@@ -516,16 +516,10 @@ swtch_com:
movl %ecx, _curproc /* into next process */
#ifdef SMP
-#ifdef TEST_LOPRIO /* hold LOPRIO for INTs */
-#ifdef CHEAP_TPR
- movl $0, lapic_tpr
-#else
- andl $~APIC_TPR_PRIO, lapic_tpr
-#endif /* CHEAP_TPR */
-#endif /* TEST_LOPRIO */
movl _cpu_lockid, %eax
orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
movl %eax, _mp_lock /* load the mp_lock */
+ /* XXX FIXME: we should be restoring the local APIC TPR */
#endif /* SMP */
#ifdef USER_LDT
diff --git a/sys/i386/include/mptable.h b/sys/i386/include/mptable.h
index ff0390a..cf5078d 100644
--- a/sys/i386/include/mptable.h
+++ b/sys/i386/include/mptable.h
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.22 1997/07/28 03:39:06 smp Exp smp $
+ * $Id: mp_machdep.c,v 1.23 1997/07/30 22:51:11 smp Exp smp $
*/
#include "opt_smp.h"
@@ -1420,7 +1420,7 @@ init_locks(void)
mp_lock = 0x00000001;
/* locks the IO APIC and apic_imen accesses */
- s_lock_init(&imen_lock);
+ s_lock_init((struct simplelock*)&imen_lock);
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index ff0390a..cf5078d 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mp_machdep.c,v 1.22 1997/07/28 03:39:06 smp Exp smp $
+ * $Id: mp_machdep.c,v 1.23 1997/07/30 22:51:11 smp Exp smp $
*/
#include "opt_smp.h"
@@ -1420,7 +1420,7 @@ init_locks(void)
mp_lock = 0x00000001;
/* locks the IO APIC and apic_imen accesses */
- s_lock_init(&imen_lock);
+ s_lock_init((struct simplelock*)&imen_lock);
}
OpenPOWER on IntegriCloud