summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1998-05-12 18:37:10 +0000
committerdyson <dyson@FreeBSD.org>1998-05-12 18:37:10 +0000
commitaf13c26695e4da4355b94ed954530e173ad82a86 (patch)
treeb4f886e5e464b2452aeb9b4daa22217d66a58ba7
parent154b3ce4e894ba5ac645f208cf9313e248f149a4 (diff)
downloadFreeBSD-src-af13c26695e4da4355b94ed954530e173ad82a86.zip
FreeBSD-src-af13c26695e4da4355b94ed954530e173ad82a86.tar.gz
Some temporary fixes to SMP to make it more scheduling and signal friendly.
This is a result of discussions on the mailing lists. Kudos to those who have found the issue and created work-arounds. I have chosen Tor's fix for now, before we can all work the issue more completely. Submitted by: Tor Egge
-rw-r--r--sys/amd64/amd64/cpu_switch.S16
-rw-r--r--sys/amd64/amd64/swtch.s16
-rw-r--r--sys/i386/i386/swtch.s16
3 files changed, 39 insertions, 9 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index 7a108dd..81afd30 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.70 1998/03/28 11:49:31 dufault Exp $
+ * $Id: swtch.s,v 1.71 1998/04/06 15:44:31 peter Exp $
*/
#include "npx.h"
@@ -49,6 +49,8 @@
#include <machine/pmap.h>
#include <machine/apic.h>
#include <machine/smptests.h> /** GRAB_LOPRIO */
+#include <machine/ipl.h>
+#include <machine/lock.h>
#endif /* SMP */
#include "assym.s"
@@ -308,6 +310,10 @@ _idle:
*
* XXX: we had damn well better be sure we had it before doing this!
*/
+ CPL_LOCK /* XXX */
+ andl $~SWI_AST_MASK, _ipending /* XXX */
+ movl $0, _cpl /* XXX Allow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
movl $FREE_LOCK, %eax
movl %eax, _mp_lock
@@ -357,16 +363,20 @@ idle_loop:
jmp idle_loop
3:
-#ifdef SMP
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
-#endif
call _get_mplock
+ CPL_LOCK /* XXX */
+ movl $SWI_AST_MASK, _cpl /* XXX Disallow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
cmpl $0,_whichqs /* normal queue */
CROSSJUMP(jne, nortqr, je)
cmpl $0,_whichidqs /* 'idle' queue */
CROSSJUMP(jne, idqr, je)
+ CPL_LOCK /* XXX */
+ movl $0, _cpl /* XXX Allow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
call _rel_mplock
jmp idle_loop
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index 7a108dd..81afd30 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.70 1998/03/28 11:49:31 dufault Exp $
+ * $Id: swtch.s,v 1.71 1998/04/06 15:44:31 peter Exp $
*/
#include "npx.h"
@@ -49,6 +49,8 @@
#include <machine/pmap.h>
#include <machine/apic.h>
#include <machine/smptests.h> /** GRAB_LOPRIO */
+#include <machine/ipl.h>
+#include <machine/lock.h>
#endif /* SMP */
#include "assym.s"
@@ -308,6 +310,10 @@ _idle:
*
* XXX: we had damn well better be sure we had it before doing this!
*/
+ CPL_LOCK /* XXX */
+ andl $~SWI_AST_MASK, _ipending /* XXX */
+ movl $0, _cpl /* XXX Allow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
movl $FREE_LOCK, %eax
movl %eax, _mp_lock
@@ -357,16 +363,20 @@ idle_loop:
jmp idle_loop
3:
-#ifdef SMP
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
-#endif
call _get_mplock
+ CPL_LOCK /* XXX */
+ movl $SWI_AST_MASK, _cpl /* XXX Disallow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
cmpl $0,_whichqs /* normal queue */
CROSSJUMP(jne, nortqr, je)
cmpl $0,_whichidqs /* 'idle' queue */
CROSSJUMP(jne, idqr, je)
+ CPL_LOCK /* XXX */
+ movl $0, _cpl /* XXX Allow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
call _rel_mplock
jmp idle_loop
diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s
index 7a108dd..81afd30 100644
--- a/sys/i386/i386/swtch.s
+++ b/sys/i386/i386/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.70 1998/03/28 11:49:31 dufault Exp $
+ * $Id: swtch.s,v 1.71 1998/04/06 15:44:31 peter Exp $
*/
#include "npx.h"
@@ -49,6 +49,8 @@
#include <machine/pmap.h>
#include <machine/apic.h>
#include <machine/smptests.h> /** GRAB_LOPRIO */
+#include <machine/ipl.h>
+#include <machine/lock.h>
#endif /* SMP */
#include "assym.s"
@@ -308,6 +310,10 @@ _idle:
*
* XXX: we had damn well better be sure we had it before doing this!
*/
+ CPL_LOCK /* XXX */
+ andl $~SWI_AST_MASK, _ipending /* XXX */
+ movl $0, _cpl /* XXX Allow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
movl $FREE_LOCK, %eax
movl %eax, _mp_lock
@@ -357,16 +363,20 @@ idle_loop:
jmp idle_loop
3:
-#ifdef SMP
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
-#endif
call _get_mplock
+ CPL_LOCK /* XXX */
+ movl $SWI_AST_MASK, _cpl /* XXX Disallow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
cmpl $0,_whichqs /* normal queue */
CROSSJUMP(jne, nortqr, je)
cmpl $0,_whichidqs /* 'idle' queue */
CROSSJUMP(jne, idqr, je)
+ CPL_LOCK /* XXX */
+ movl $0, _cpl /* XXX Allow ASTs on other CPU */
+ CPL_UNLOCK /* XXX */
call _rel_mplock
jmp idle_loop
OpenPOWER on IntegriCloud