diff options
-rw-r--r-- | sys/amd64/amd64/cpu_switch.S | 12 | ||||
-rw-r--r-- | sys/amd64/amd64/swtch.s | 12 | ||||
-rw-r--r-- | sys/i386/i386/swtch.s | 12 |
3 files changed, 6 insertions, 30 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index eacef02..55932a5 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.52 1997/06/07 04:36:10 bde Exp $ + * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $ */ #include "npx.h" @@ -274,22 +274,17 @@ _idle: * XXX callers of cpu_switch() do a bogus splclock(). Locking should * be left to cpu_switch(). */ - movl $SWI_AST_MASK,_cpl - testl $~SWI_AST_MASK,_ipending - je idle_loop - call _splz + call _spl0 ALIGN_TEXT idle_loop: cli - movb $1,_intr_nesting_level /* charge Intr if we leave */ cmpl $0,_whichrtqs /* real-time queue */ CROSSJUMP(jne, sw1a, je) cmpl $0,_whichqs /* normal queue */ CROSSJUMP(jne, nortqr, je) cmpl $0,_whichidqs /* 'idle' queue */ CROSSJUMP(jne, idqr, je) - movb $0,_intr_nesting_level /* charge Idle for this loop */ call _vm_page_zero_idle testl %eax, %eax jnz idle_loop @@ -353,8 +348,6 @@ ENTRY(cpu_switch) 1: #endif /* NNPX > 0 */ - movb $1,_intr_nesting_level /* charge Intr, not Sys/Idle */ - movl $0,_curproc /* out of process */ /* save is done, now choose a new process or idle */ @@ -520,7 +513,6 @@ swtch_com: movl %edx,_curpcb movl %ecx,_curproc /* into next process */ - movb $0,_intr_nesting_level #ifdef SMP #if defined(TEST_LOPRIO) /* Set us to prefer to get irq's from the apic since we have the lock */ diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s index eacef02..55932a5 100644 --- a/sys/amd64/amd64/swtch.s +++ b/sys/amd64/amd64/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.52 1997/06/07 04:36:10 bde Exp $ + * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $ */ #include "npx.h" @@ -274,22 +274,17 @@ _idle: * XXX callers of cpu_switch() do a bogus splclock(). Locking should * be left to cpu_switch(). */ - movl $SWI_AST_MASK,_cpl - testl $~SWI_AST_MASK,_ipending - je idle_loop - call _splz + call _spl0 ALIGN_TEXT idle_loop: cli - movb $1,_intr_nesting_level /* charge Intr if we leave */ cmpl $0,_whichrtqs /* real-time queue */ CROSSJUMP(jne, sw1a, je) cmpl $0,_whichqs /* normal queue */ CROSSJUMP(jne, nortqr, je) cmpl $0,_whichidqs /* 'idle' queue */ CROSSJUMP(jne, idqr, je) - movb $0,_intr_nesting_level /* charge Idle for this loop */ call _vm_page_zero_idle testl %eax, %eax jnz idle_loop @@ -353,8 +348,6 @@ ENTRY(cpu_switch) 1: #endif /* NNPX > 0 */ - movb $1,_intr_nesting_level /* charge Intr, not Sys/Idle */ - movl $0,_curproc /* out of process */ /* save is done, now choose a new process or idle */ @@ -520,7 +513,6 @@ swtch_com: movl %edx,_curpcb movl %ecx,_curproc /* into next process */ - movb $0,_intr_nesting_level #ifdef SMP #if defined(TEST_LOPRIO) /* Set us to prefer to get irq's from the apic since we have the lock */ diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index eacef02..55932a5 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.52 1997/06/07 04:36:10 bde Exp $ + * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $ */ #include "npx.h" @@ -274,22 +274,17 @@ _idle: * XXX callers of cpu_switch() do a bogus splclock(). Locking should * be left to cpu_switch(). */ - movl $SWI_AST_MASK,_cpl - testl $~SWI_AST_MASK,_ipending - je idle_loop - call _splz + call _spl0 ALIGN_TEXT idle_loop: cli - movb $1,_intr_nesting_level /* charge Intr if we leave */ cmpl $0,_whichrtqs /* real-time queue */ CROSSJUMP(jne, sw1a, je) cmpl $0,_whichqs /* normal queue */ CROSSJUMP(jne, nortqr, je) cmpl $0,_whichidqs /* 'idle' queue */ CROSSJUMP(jne, idqr, je) - movb $0,_intr_nesting_level /* charge Idle for this loop */ call _vm_page_zero_idle testl %eax, %eax jnz idle_loop @@ -353,8 +348,6 @@ ENTRY(cpu_switch) 1: #endif /* NNPX > 0 */ - movb $1,_intr_nesting_level /* charge Intr, not Sys/Idle */ - movl $0,_curproc /* out of process */ /* save is done, now choose a new process or idle */ @@ -520,7 +513,6 @@ swtch_com: movl %edx,_curpcb movl %ecx,_curproc /* into next process */ - movb $0,_intr_nesting_level #ifdef SMP #if defined(TEST_LOPRIO) /* Set us to prefer to get irq's from the apic since we have the lock */ |