diff options
author | bde <bde@FreeBSD.org> | 1997-07-01 01:34:30 +0000 |
---|---|---|
committer | bde <bde@FreeBSD.org> | 1997-07-01 01:34:30 +0000 |
commit | 1e93934bf07fa1f51974db834a85098f54791d89 (patch) | |
tree | 7c81e8da568145349066692c2c1e8fbf0085315f /sys | |
parent | fa60bb8b2bc6f413a44592a14ea0e6bda4b89d22 (diff) | |
download | FreeBSD-src-1e93934bf07fa1f51974db834a85098f54791d89.zip FreeBSD-src-1e93934bf07fa1f51974db834a85098f54791d89.tar.gz |
Un-inline a call to spl0(). It is not time critical, and was only inline
because there was no non-inline spl0() to call.
Don't frob intr_nesting_level in idle() or cpu_switch(). Interrupts
are mostly disabled then, so the frobbing had little effect.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/amd64/amd64/cpu_switch.S | 12 | ||||
-rw-r--r-- | sys/amd64/amd64/swtch.s | 12 | ||||
-rw-r--r-- | sys/i386/i386/swtch.s | 12 |
3 files changed, 6 insertions, 30 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index eacef02..55932a5 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.52 1997/06/07 04:36:10 bde Exp $ + * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $ */ #include "npx.h" @@ -274,22 +274,17 @@ _idle: * XXX callers of cpu_switch() do a bogus splclock(). Locking should * be left to cpu_switch(). */ - movl $SWI_AST_MASK,_cpl - testl $~SWI_AST_MASK,_ipending - je idle_loop - call _splz + call _spl0 ALIGN_TEXT idle_loop: cli - movb $1,_intr_nesting_level /* charge Intr if we leave */ cmpl $0,_whichrtqs /* real-time queue */ CROSSJUMP(jne, sw1a, je) cmpl $0,_whichqs /* normal queue */ CROSSJUMP(jne, nortqr, je) cmpl $0,_whichidqs /* 'idle' queue */ CROSSJUMP(jne, idqr, je) - movb $0,_intr_nesting_level /* charge Idle for this loop */ call _vm_page_zero_idle testl %eax, %eax jnz idle_loop @@ -353,8 +348,6 @@ ENTRY(cpu_switch) 1: #endif /* NNPX > 0 */ - movb $1,_intr_nesting_level /* charge Intr, not Sys/Idle */ - movl $0,_curproc /* out of process */ /* save is done, now choose a new process or idle */ @@ -520,7 +513,6 @@ swtch_com: movl %edx,_curpcb movl %ecx,_curproc /* into next process */ - movb $0,_intr_nesting_level #ifdef SMP #if defined(TEST_LOPRIO) /* Set us to prefer to get irq's from the apic since we have the lock */ diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s index eacef02..55932a5 100644 --- a/sys/amd64/amd64/swtch.s +++ b/sys/amd64/amd64/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.52 1997/06/07 04:36:10 bde Exp $ + * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $ */ #include "npx.h" @@ -274,22 +274,17 @@ _idle: * XXX callers of cpu_switch() do a bogus splclock(). Locking should * be left to cpu_switch(). */ - movl $SWI_AST_MASK,_cpl - testl $~SWI_AST_MASK,_ipending - je idle_loop - call _splz + call _spl0 ALIGN_TEXT idle_loop: cli - movb $1,_intr_nesting_level /* charge Intr if we leave */ cmpl $0,_whichrtqs /* real-time queue */ CROSSJUMP(jne, sw1a, je) cmpl $0,_whichqs /* normal queue */ CROSSJUMP(jne, nortqr, je) cmpl $0,_whichidqs /* 'idle' queue */ CROSSJUMP(jne, idqr, je) - movb $0,_intr_nesting_level /* charge Idle for this loop */ call _vm_page_zero_idle testl %eax, %eax jnz idle_loop @@ -353,8 +348,6 @@ ENTRY(cpu_switch) 1: #endif /* NNPX > 0 */ - movb $1,_intr_nesting_level /* charge Intr, not Sys/Idle */ - movl $0,_curproc /* out of process */ /* save is done, now choose a new process or idle */ @@ -520,7 +513,6 @@ swtch_com: movl %edx,_curpcb movl %ecx,_curproc /* into next process */ - movb $0,_intr_nesting_level #ifdef SMP #if defined(TEST_LOPRIO) /* Set us to prefer to get irq's from the apic since we have the lock */ diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index eacef02..55932a5 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.52 1997/06/07 04:36:10 bde Exp $ + * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $ */ #include "npx.h" @@ -274,22 +274,17 @@ _idle: * XXX callers of cpu_switch() do a bogus splclock(). Locking should * be left to cpu_switch(). */ - movl $SWI_AST_MASK,_cpl - testl $~SWI_AST_MASK,_ipending - je idle_loop - call _splz + call _spl0 ALIGN_TEXT idle_loop: cli - movb $1,_intr_nesting_level /* charge Intr if we leave */ cmpl $0,_whichrtqs /* real-time queue */ CROSSJUMP(jne, sw1a, je) cmpl $0,_whichqs /* normal queue */ CROSSJUMP(jne, nortqr, je) cmpl $0,_whichidqs /* 'idle' queue */ CROSSJUMP(jne, idqr, je) - movb $0,_intr_nesting_level /* charge Idle for this loop */ call _vm_page_zero_idle testl %eax, %eax jnz idle_loop @@ -353,8 +348,6 @@ ENTRY(cpu_switch) 1: #endif /* NNPX > 0 */ - movb $1,_intr_nesting_level /* charge Intr, not Sys/Idle */ - movl $0,_curproc /* out of process */ /* save is done, now choose a new process or idle */ @@ -520,7 +513,6 @@ swtch_com: movl %edx,_curpcb movl %ecx,_curproc /* into next process */ - movb $0,_intr_nesting_level #ifdef SMP #if defined(TEST_LOPRIO) /* Set us to prefer to get irq's from the apic since we have the lock */ |