summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2002-03-29 16:35:26 +0000
committerjake <jake@FreeBSD.org>2002-03-29 16:35:26 +0000
commit8f9ce8398dc5c2f244495c3d0f279c47f0c2d58d (patch)
treee9eef5126c7bc7935260d949155e4f3eb728510f /sys
parent1787e9ff8d12fd561c9345471ac9c345fa9251dd (diff)
downloadFreeBSD-src-8f9ce8398dc5c2f244495c3d0f279c47f0c2d58d.zip
FreeBSD-src-8f9ce8398dc5c2f244495c3d0f279c47f0c2d58d.tar.gz
Remove abuse of intr_disable/restore in MI code by moving the loop in ast()
back into the calling MD code. The MD code must ensure no races between checking the astpening flag and returning to usermode. Submitted by: peter (ia64 bits) Tested on: alpha (peter, jeff), i386, ia64 (peter), sparc64
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/exception.s25
-rw-r--r--sys/alpha/alpha/genassym.c5
-rw-r--r--sys/i386/isa/ipl.s1
-rw-r--r--sys/ia64/ia64/exception.S34
-rw-r--r--sys/ia64/ia64/exception.s34
-rw-r--r--sys/ia64/ia64/genassym.c6
-rw-r--r--sys/kern/subr_trap.c14
-rw-r--r--sys/sparc64/sparc64/exception.S27
-rw-r--r--sys/sparc64/sparc64/exception.s27
9 files changed, 144 insertions, 29 deletions
diff --git a/sys/alpha/alpha/exception.s b/sys/alpha/alpha/exception.s
index 4e4fc9e..302216f 100644
--- a/sys/alpha/alpha/exception.s
+++ b/sys/alpha/alpha/exception.s
@@ -130,16 +130,26 @@
CALL(syscall)
/* Handle any AST's. */
+2: ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
+ call_pal PAL_OSF1_swpipl
+ ldq s0, PC_CURTHREAD(pcpup) /* checking for pending asts */
+ ldq s1, TD_KSE(s0) /* atomically with returning */
+ ldl s1, KE_FLAGS(s1)
+ ldiq s2, KEF_ASTPENDING | KEF_NEEDRESCHED
+ and s1, s2
+ beq s1, 3f
+ ldiq a0, ALPHA_PSL_IPL_0 /* reenable interrupts */
+ call_pal PAL_OSF1_swpipl
mov sp, a0 /* only arg is frame */
CALL(ast)
+ jmp zero, 2b
/* see if we need a full exception_return */
- ldq t1, (FRAME_FLAGS*8)(sp)
+3: ldq t1, (FRAME_FLAGS*8)(sp)
and t1, FRAME_FLAGS_SYSCALL
beq t1, exception_return
/* set the hae register if this process has specified a value */
- ldq s0, PC_CURTHREAD(pcpup)
ldq t1, TD_MD_FLAGS(s0)
and t1, MDP_HAEUSED
beq t1, 3f
@@ -266,8 +276,19 @@ Ler1: LDGP(pv)
beq t0, Lkernelret /* no: kernel return */
/* Handle any AST's or resched's. */
+1: ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
+ call_pal PAL_OSF1_swpipl
+ ldq s2, TD_KSE(s0) /* checking for pending asts */
+ ldl s2, KE_FLAGS(s2) /* atomically with returning */
+ ldiq s3, KEF_ASTPENDING | KEF_NEEDRESCHED
+ and s2, s3
+ beq s2, 2f
+ ldiq a0, ALPHA_PSL_IPL_0 /* reenable interrupts */
+ call_pal PAL_OSF1_swpipl
mov sp, a0 /* only arg is frame */
CALL(ast)
+ jmp zero, 1b
+2:
#ifdef SMP
br Lrestoreregs
#endif
diff --git a/sys/alpha/alpha/genassym.c b/sys/alpha/alpha/genassym.c
index b42ba24..62ff3a4 100644
--- a/sys/alpha/alpha/genassym.c
+++ b/sys/alpha/alpha/genassym.c
@@ -81,6 +81,11 @@ ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
+ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
+
+ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
+ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
+
ASSYM(TD_MD_FLAGS, offsetof(struct thread, td_md.md_flags));
ASSYM(TD_MD_PCBPADDR, offsetof(struct thread, td_md.md_pcbpaddr));
ASSYM(TD_MD_HAE, offsetof(struct thread, td_md.md_hae));
diff --git a/sys/i386/isa/ipl.s b/sys/i386/isa/ipl.s
index c1b1d86..78527f6 100644
--- a/sys/i386/isa/ipl.s
+++ b/sys/i386/isa/ipl.s
@@ -80,6 +80,7 @@ doreti_ast:
pushl %esp /* pass a pointer to the trapframe */
call ast
add $4,%esp
+ jmp doreti_ast
/*
* doreti_exit: pop registers, iret.
diff --git a/sys/ia64/ia64/exception.S b/sys/ia64/ia64/exception.S
index 780dc3c..fabff37 100644
--- a/sys/ia64/ia64/exception.S
+++ b/sys/ia64/ia64/exception.S
@@ -822,10 +822,38 @@ ENTRY(exception_restore, 0)
extr.u r16=rIPSR,32,2 // extract ipsr.cpl
;;
cmp.eq p1,p2=r0,r16 // test for return to kernel mode
+(p1) br.cond.dpnt 2f // skip ast checking for returns to kernel
+3:
+ add r3=PC_CURTHREAD,r13 // &curthread
;;
-(p2) add out0=16,sp // trapframe argument to ast()
-(p2) br.call.dptk.many rp=ast // note: p1, p2 preserved
-
+ ld8 r3=[r3] // curthread
+ add r2=(KEF_ASTPENDING|KEF_NEEDRESCHED),r0
+ ;;
+ add r3=TD_KSE,r3 // &curthread->td_kse
+ mov r15=psr // save interrupt enable status
+ ;;
+ ld8 r3=[r3] // curkse
+ ;;
+ add r3=KE_FLAGS,r3 // &curkse->ke_flags
+ rsm psr.i // disable interrupts
+ ;;
+ ld4 r14=[r3] // fetch curkse->ke_flags
+ ;;
+ and r14=r2,r14 // flags & (KEF_ASTPENDING|KEF_NEEDRESCHED)
+ ;;
+ cmp4.eq p6,p7=r0,r14 // == 0 ?
+(p6) br.cond.dptk 2f
+ ;;
+ mov psr.l=r15 // restore interrups
+ ;;
+ srlz.d
+ ;;
+ add out0=16,sp // trapframe argument to ast()
+ br.call.sptk.many rp=ast // note: p1, p2 preserved
+ ;;
+ br 3b
+ ;;
+2:
rsm psr.ic|psr.dt|psr.i // disable interrupt collection and vm
add r3=16,sp;
;;
diff --git a/sys/ia64/ia64/exception.s b/sys/ia64/ia64/exception.s
index 780dc3c..fabff37 100644
--- a/sys/ia64/ia64/exception.s
+++ b/sys/ia64/ia64/exception.s
@@ -822,10 +822,38 @@ ENTRY(exception_restore, 0)
extr.u r16=rIPSR,32,2 // extract ipsr.cpl
;;
cmp.eq p1,p2=r0,r16 // test for return to kernel mode
+(p1) br.cond.dpnt 2f // skip ast checking for returns to kernel
+3:
+ add r3=PC_CURTHREAD,r13 // &curthread
;;
-(p2) add out0=16,sp // trapframe argument to ast()
-(p2) br.call.dptk.many rp=ast // note: p1, p2 preserved
-
+ ld8 r3=[r3] // curthread
+ add r2=(KEF_ASTPENDING|KEF_NEEDRESCHED),r0
+ ;;
+ add r3=TD_KSE,r3 // &curthread->td_kse
+ mov r15=psr // save interrupt enable status
+ ;;
+ ld8 r3=[r3] // curkse
+ ;;
+ add r3=KE_FLAGS,r3 // &curkse->ke_flags
+ rsm psr.i // disable interrupts
+ ;;
+ ld4 r14=[r3] // fetch curkse->ke_flags
+ ;;
+ and r14=r2,r14 // flags & (KEF_ASTPENDING|KEF_NEEDRESCHED)
+ ;;
+ cmp4.eq p6,p7=r0,r14 // == 0 ?
+(p6) br.cond.dptk 2f
+ ;;
+ mov psr.l=r15 // restore interrups
+ ;;
+ srlz.d
+ ;;
+ add out0=16,sp // trapframe argument to ast()
+ br.call.sptk.many rp=ast // note: p1, p2 preserved
+ ;;
+ br 3b
+ ;;
+2:
rsm psr.ic|psr.dt|psr.i // disable interrupt collection and vm
add r3=16,sp;
;;
diff --git a/sys/ia64/ia64/genassym.c b/sys/ia64/ia64/genassym.c
index 99e74c6..8dbc394 100644
--- a/sys/ia64/ia64/genassym.c
+++ b/sys/ia64/ia64/genassym.c
@@ -76,9 +76,15 @@ ASSYM(MTX_UNOWNED, MTX_UNOWNED);
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
+ASSYM(TD_KSE, offsetof(struct thread, td_kse));
ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack));
ASSYM(TD_MD_FLAGS, offsetof(struct thread, td_md.md_flags));
+ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
+
+ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
+ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
+
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
ASSYM(FRAME_SYSCALL, FRAME_SYSCALL);
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 6fdb9c3..81b4a75 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -123,7 +123,6 @@ ast(framep)
struct proc *p = td->td_proc;
struct kse *ke = td->td_kse;
u_int prticks, sticks;
- register_t s;
int sflag;
int flags;
#if defined(DEV_NPX) && !defined(SMP)
@@ -137,16 +136,13 @@ ast(framep)
#endif
mtx_assert(&Giant, MA_NOTOWNED);
prticks = 0; /* XXX: Quiet warning. */
- s = intr_disable();
- while ((ke->ke_flags & (KEF_ASTPENDING | KEF_NEEDRESCHED)) != 0) {
- intr_restore(s);
td->td_frame = framep;
/*
* This updates the p_sflag's for the checks below in one
* "atomic" operation with turning off the astpending flag.
* If another AST is triggered while we are handling the
* AST's saved in sflag, the astpending flag will be set and
- * we will loop again.
+ * ast() will be called again.
*/
mtx_lock_spin(&sched_lock);
sticks = ke->ke_sticks;
@@ -190,13 +186,5 @@ ast(framep)
#ifdef DIAGNOSTIC
cred_free_thread(td);
#endif
- s = intr_disable();
- }
mtx_assert(&Giant, MA_NOTOWNED);
- /*
- * We need to keep interrupts disabled so that if any further AST's
- * come in, the interrupt they come in on will be delayed until we
- * finish returning to userland. We assume that the return to userland
- * will perform the equivalent of intr_restore().
- */
}
diff --git a/sys/sparc64/sparc64/exception.S b/sys/sparc64/sparc64/exception.S
index 3e3b79e..8bc5ff7 100644
--- a/sys/sparc64/sparc64/exception.S
+++ b/sys/sparc64/sparc64/exception.S
@@ -2249,19 +2249,38 @@ ENTRY(tl0_ret)
9:
#endif
- wrpr %g0, PIL_TICK, %pil
+ /*
+ * Check for pending asts atomically with returning. We must raise
+ * the pil before checking, and if no asts are found the pil must
+ * remain raised until the retry is executed, or we risk missing asts
+ * caused by interrupts occuring after the test. If the pil is lowered,
+ * as it is when we call ast, the check must be re-executed.
+ */
+1: wrpr %g0, PIL_TICK, %pil
ldx [PCPU(CURTHREAD)], %l0
ldx [%l0 + TD_KSE], %l1
lduw [%l1 + KE_FLAGS], %l2
and %l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
- brz,pt %l2, 1f
+ brz,a,pt %l2, 2f
nop
+ wrpr %g0, 0, %pil
call ast
add %sp, CCFSZ + SPOFF, %o0
+ ba,a %xcc, 1b
+ nop
-1: ldx [PCB_REG + PCB_NSAVED], %l1
+ /*
+ * Check for windows that were spilled to the pcb and need to be
+ * copied out. This must be the last thing that is done before the
+ * return to usermode. If there are still user windows in the cpu
+ * and we call a nested function after this, which causes them to be
+ * spilled to the pcb, they will not be copied out and the stack will
+ * be inconsistent.
+ */
+2: ldx [PCB_REG + PCB_NSAVED], %l1
+ mov T_SPILL, %o0
brnz,a,pn %l1, .Ltl0_trap_reenter
- mov T_SPILL, %o0
+ wrpr %g0, 0, %pil
ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
diff --git a/sys/sparc64/sparc64/exception.s b/sys/sparc64/sparc64/exception.s
index 3e3b79e..8bc5ff7 100644
--- a/sys/sparc64/sparc64/exception.s
+++ b/sys/sparc64/sparc64/exception.s
@@ -2249,19 +2249,38 @@ ENTRY(tl0_ret)
9:
#endif
- wrpr %g0, PIL_TICK, %pil
+ /*
+ * Check for pending asts atomically with returning. We must raise
+ * the pil before checking, and if no asts are found the pil must
+ * remain raised until the retry is executed, or we risk missing asts
+ * caused by interrupts occuring after the test. If the pil is lowered,
+ * as it is when we call ast, the check must be re-executed.
+ */
+1: wrpr %g0, PIL_TICK, %pil
ldx [PCPU(CURTHREAD)], %l0
ldx [%l0 + TD_KSE], %l1
lduw [%l1 + KE_FLAGS], %l2
and %l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
- brz,pt %l2, 1f
+ brz,a,pt %l2, 2f
nop
+ wrpr %g0, 0, %pil
call ast
add %sp, CCFSZ + SPOFF, %o0
+ ba,a %xcc, 1b
+ nop
-1: ldx [PCB_REG + PCB_NSAVED], %l1
+ /*
+ * Check for windows that were spilled to the pcb and need to be
+ * copied out. This must be the last thing that is done before the
+ * return to usermode. If there are still user windows in the cpu
+ * and we call a nested function after this, which causes them to be
+ * spilled to the pcb, they will not be copied out and the stack will
+ * be inconsistent.
+ */
+2: ldx [PCB_REG + PCB_NSAVED], %l1
+ mov T_SPILL, %o0
brnz,a,pn %l1, .Ltl0_trap_reenter
- mov T_SPILL, %o0
+ wrpr %g0, 0, %pil
ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
OpenPOWER on IntegriCloud