diff options
32 files changed, 171 insertions, 227 deletions
diff --git a/sys/alpha/alpha/exception.s b/sys/alpha/alpha/exception.s index afeccb8..f2febd0 100644 --- a/sys/alpha/alpha/exception.s +++ b/sys/alpha/alpha/exception.s @@ -135,14 +135,10 @@ XentSys1: LDGP(pv) and t1, FRAME_FLAGS_SYSCALL beq t1, exception_return - ldl t2, GD_ASTPENDING(globalp) /* AST pending? */ - beq t2, 2f /* no: return */ - - /* We've got an AST. Handle it. */ + /* Handle any AST's. */ mov sp, a0 /* only arg is frame */ CALL(ast) -2: /* set the hae register if this process has specified a value */ ldq t0, GD_CURPROC(globalp) beq t0, 3f @@ -264,12 +260,7 @@ Ler1: LDGP(pv) and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */ beq t0, Lrestoreregs /* no: just return */ - ldl t2, GD_ASTPENDING(globalp) /* AST pending? */ - beq t2, Lrestoreregs /* no: return */ - - /* We've got an AST. Handle it. */ - ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */ - call_pal PAL_OSF1_swpipl + /* Handle any AST's or resched's. */ mov sp, a0 /* only arg is frame */ CALL(ast) diff --git a/sys/alpha/alpha/genassym.c b/sys/alpha/alpha/genassym.c index 10bac57..1417c90 100644 --- a/sys/alpha/alpha/genassym.c +++ b/sys/alpha/alpha/genassym.c @@ -73,7 +73,6 @@ ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb)); ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime)); ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid)); ASSYM(GD_IDLEPCBPHYS, offsetof(struct globaldata, gd_idlepcbphys)); -ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock)); ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse)); diff --git a/sys/alpha/alpha/trap.c b/sys/alpha/alpha/trap.c index eada017..523c79c 100644 --- a/sys/alpha/alpha/trap.c +++ b/sys/alpha/alpha/trap.c @@ -70,8 +70,6 @@ #include <ddb/ddb.h> #endif -u_int32_t want_resched; - unsigned long Sfloat_to_reg __P((unsigned int)); unsigned int reg_to_Sfloat __P((unsigned long)); unsigned long Tfloat_reg_cvt __P((unsigned long)); @@ -101,7 +99,7 @@ userret(p, frame, oticks) struct trapframe *frame; u_quad_t oticks; { - int sig, s; + int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) { @@ -111,7 +109,7 @@ userret(p, frame, oticks) } mtx_lock_spin(&sched_lock); p->p_priority = p->p_usrpri; - if (want_resched) { + if (resched_wanted()) { /* * Since we are curproc, a clock interrupt could * change our priority without changing run queues @@ -120,14 +118,12 @@ userret(p, frame, oticks) * before we switch()'ed, we might not be on the queue * indicated by our priority. */ - s = splstatclock(); DROP_GIANT_NOSWITCH(); setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); - splx(s); while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) mtx_lock(&Giant); @@ -759,22 +755,27 @@ void ast(framep) struct trapframe *framep; { - register struct proc *p; + struct proc *p = CURPROC; u_quad_t sticks; - p = curproc; + KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode")); + + /* + * We check for a pending AST here rather than in the assembly as + * acquiring and releasing mutexes in assembly is not fun. + */ mtx_lock_spin(&sched_lock); + if (!(astpending() || resched_wanted())) { + mtx_unlock_spin(&sched_lock); + return; + } + sticks = p->p_sticks; - mtx_unlock_spin(&sched_lock); p->p_md.md_tf = framep; - if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0) - panic("ast and not user"); - + astoff(); cnt.v_soft++; - - PCPU_SET(astpending, 0); - mtx_lock_spin(&sched_lock); + mtx_intr_enable(&sched_lock); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; mtx_unlock_spin(&sched_lock); diff --git a/sys/alpha/include/cpu.h b/sys/alpha/include/cpu.h index 8ecaaf3..3e0696f 100644 --- a/sys/alpha/include/cpu.h +++ b/sys/alpha/include/cpu.h @@ -62,45 +62,29 @@ struct clockframe { struct trapframe cf_tf; }; -#define CLKF_USERMODE(framep) \ - (((framep)->cf_tf.tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0) -#define CLKF_PC(framep) ((framep)->cf_tf.tf_regs[FRAME_PC]) -#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2) - -/* - * Preempt the current process if in interrupt from user mode, - * or after the current trap/syscall if in system mode. - */ -#define need_resched() do { want_resched = 1; aston(); } while (0) +#define TRAPF_USERMODE(framep) \ + (((framep)->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0) +#define TRAPF_PC(framep) ((framep)->tf_regs[FRAME_PC]) -#define resched_wanted() want_resched +#define CLKF_USERMODE(framep) TRAPF_USERMODE(&(framep)->cf_tf) +#define CLKF_PC(framep) TRAPF_PC(&(framep)->cf_tf) +#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2) /* - * Give a profiling tick to the current process when the user profiling - * buffer pages are invalid. On the hp300, request an ast to send us - * through trap, marking the proc as needing a profiling tick. + * Arrange to handle pending profiling ticks before returning to user mode. + * + * XXX this is now poorly named and implemented. It used to handle only a + * single tick and the PS_OWEUPC flag served as a counter. Now there is a + * counter in the proc table and flag isn't really necessary. */ #define need_proftick(p) do { \ - mtx_lock_spin(&sched_lock); \ + mtx_lock_spin(&sched_lock); \ (p)->p_sflag |= PS_OWEUPC; \ - mtx_unlock_spin(&sched_lock); \ aston(); \ + mtx_unlock_spin(&sched_lock); \ } while (0) /* - * Notify the current process (p) that it has a signal pending, - * process as soon as possible. - */ -#define signotify(p) aston() - -#define aston() PCPU_SET(astpending, 1) - -#ifdef _KERNEL -extern u_int32_t want_resched; /* resched() was called */ -#endif - - -/* * CTL_MACHDEP definitions. */ #define CPU_CONSDEV 1 /* dev_t: console terminal device */ diff --git a/sys/alpha/include/globaldata.h b/sys/alpha/include/globaldata.h index 84dbf34..8e6cfbe 100644 --- a/sys/alpha/include/globaldata.h +++ b/sys/alpha/include/globaldata.h @@ -57,7 +57,6 @@ struct globaldata { u_int32_t gd_next_asn; /* next ASN to allocate */ u_int32_t gd_current_asngen; /* ASN rollover check */ - u_int gd_astpending; SLIST_ENTRY(globaldata) gd_allcpu; int gd_witness_spin_check; #ifdef KTR_PERCPU diff --git a/sys/alpha/include/pcpu.h b/sys/alpha/include/pcpu.h index 84dbf34..8e6cfbe 100644 --- a/sys/alpha/include/pcpu.h +++ b/sys/alpha/include/pcpu.h @@ -57,7 +57,6 @@ struct globaldata { u_int32_t gd_next_asn; /* next ASN to allocate */ u_int32_t gd_current_asngen; /* ASN rollover check */ - u_int gd_astpending; SLIST_ENTRY(globaldata) gd_allcpu; int gd_witness_spin_check; #ifdef KTR_PERCPU diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S index fbaceff..68b6c77 100644 --- a/sys/amd64/amd64/apic_vector.S +++ b/sys/amd64/amd64/apic_vector.S @@ -304,10 +304,9 @@ _Xcpuast: FAKE_MCOUNT(13*4(%esp)) - orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */ + MTX_LOCK_SPIN(sched_lock, 0) movl PCPU(CURPROC),%ebx - incl P_INTR_NESTING_LEVEL(%ebx) - sti + orl $PS_ASTPENDING, P_SFLAG(%ebx) movl PCPU(CPUID), %eax lock @@ -315,13 +314,13 @@ _Xcpuast: lock btrl %eax, CNAME(resched_cpus) jnc 2f - orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING) + orl $PS_NEEDRESCHED, P_SFLAG(%ebx) lock incl CNAME(want_resched_cnt) 2: + MTX_UNLOCK_SPIN(sched_lock) lock incl CNAME(cpuast_cnt) - decl P_INTR_NESTING_LEVEL(%ebx) MEXITCOUNT jmp _doreti 1: diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index 6d255df..20fe50a 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -180,9 +180,6 @@ sw1a: sw1b: movl %eax,%ecx - xorl %eax,%eax - andl $~AST_RESCHED,PCPU(ASTPENDING) - #ifdef INVARIANTS cmpb $SRUN,P_STAT(%ecx) jne badsw2 diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S index d3bce3b..51662ba 100644 --- a/sys/amd64/amd64/exception.S +++ b/sys/amd64/amd64/exception.S @@ -230,7 +230,7 @@ calltrap: * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -250,11 +250,8 @@ IDTVEC(syscall) movl %eax,TF_EFLAGS(%esp) movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti /* @@ -264,7 +261,7 @@ IDTVEC(syscall) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -281,11 +278,8 @@ IDTVEC(int0x80_syscall) mov %ax,%fs movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti ENTRY(fork_trampoline) diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s index d3bce3b..51662ba 100644 --- a/sys/amd64/amd64/exception.s +++ b/sys/amd64/amd64/exception.s @@ -230,7 +230,7 @@ calltrap: * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -250,11 +250,8 @@ IDTVEC(syscall) movl %eax,TF_EFLAGS(%esp) movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti /* @@ -264,7 +261,7 @@ IDTVEC(syscall) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -281,11 +278,8 @@ IDTVEC(int0x80_syscall) mov %ax,%fs movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti ENTRY(fork_trampoline) diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c index d2e1db3..bfb97ad 100644 --- a/sys/amd64/amd64/genassym.c +++ b/sys/amd64/amd64/genassym.c @@ -81,9 +81,13 @@ ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(P_ADDR, offsetof(struct proc, p_addr)); ASSYM(P_INTR_NESTING_LEVEL, offsetof(struct proc, p_intr_nesting_level)); +ASSYM(P_SFLAG, offsetof(struct proc, p_sflag)); ASSYM(P_STAT, offsetof(struct proc, p_stat)); ASSYM(P_WCHAN, offsetof(struct proc, p_wchan)); +ASSYM(PS_ASTPENDING, PS_ASTPENDING); +ASSYM(PS_NEEDRESCHED, PS_NEEDRESCHED); + #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu)); @@ -180,9 +184,6 @@ ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime)); ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks)); ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd)); ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt)); -ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); -ASSYM(AST_PENDING, AST_PENDING); -ASSYM(AST_RESCHED, AST_RESCHED); #ifdef USER_LDT ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt)); diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s index 6d255df..20fe50a 100644 --- a/sys/amd64/amd64/swtch.s +++ b/sys/amd64/amd64/swtch.s @@ -180,9 +180,6 @@ sw1a: sw1b: movl %eax,%ecx - xorl %eax,%eax - andl $~AST_RESCHED,PCPU(ASTPENDING) - #ifdef INVARIANTS cmpb $SRUN,P_STAT(%ecx) jne badsw2 diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 533d791..d34e4b1 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -105,7 +105,7 @@ int (*pmath_emulate) __P((struct trapframe *)); extern void trap __P((struct trapframe frame)); extern int trapwrite __P((unsigned addr)); -extern void syscall2 __P((struct trapframe frame)); +extern void syscall __P((struct trapframe frame)); extern void ast __P((struct trapframe frame)); static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); @@ -212,7 +212,7 @@ userret(p, frame, oticks) if (!mtx_owned(&Giant)) mtx_lock(&Giant); mtx_lock_spin(&sched_lock); - addupc_task(p, frame->tf_eip, + addupc_task(p, TRAPF_PC(frame), (u_int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; @@ -1075,7 +1075,7 @@ int trapwrite(addr) } /* - * syscall2 - MP aware system call request C handler + * syscall - MP aware system call request C handler * * A system call is essentially treated as a trap except that the * MP lock is not held on entry or return. We are responsible for @@ -1086,7 +1086,7 @@ int trapwrite(addr) * the current stack is allowed without having to hold MP lock. */ void -syscall2(frame) +syscall(frame) struct trapframe frame; { caddr_t params; @@ -1278,10 +1278,22 @@ ast(frame) struct proc *p = CURPROC; u_quad_t sticks; + KASSERT(TRAPF_USERMODE(&frame), ("ast in kernel mode")); + + /* + * We check for a pending AST here rather than in the assembly as + * acquiring and releasing mutexes in assembly is not fun. + */ mtx_lock_spin(&sched_lock); + if (!(astpending() || resched_wanted())) { + mtx_unlock_spin(&sched_lock); + return; + } + sticks = p->p_sticks; - + astoff(); + mtx_intr_enable(&sched_lock); atomic_add_int(&cnt.v_soft, 1); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h index 0b99ec6..e644b50 100644 --- a/sys/amd64/include/cpu.h +++ b/sys/amd64/include/cpu.h @@ -59,32 +59,17 @@ #define cpu_getstack(p) ((p)->p_md.md_regs->tf_esp) #define cpu_setstack(p, ap) ((p)->p_md.md_regs->tf_esp = (ap)) +#define TRAPF_USERMODE(framep) \ + ((ISPL((framep)->tf_cs) == SEL_UPL) || ((framep)->tf_eflags & PSL_VM)) +#define TRAPF_PC(framep) ((framep)->tf_eip) + #define CLKF_USERMODE(framep) \ - ((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM)) + ((ISPL((framep)->cf_cs) == SEL_UPL) || ((framep)->cf_eflags & PSL_VM)) #define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2) #define CLKF_PC(framep) ((framep)->cf_eip) /* - * astpending bits - */ -#define AST_PENDING 0x00000001 -#define AST_RESCHED 0x00000002 - -/* - * Preempt the current process if in interrupt from user mode, - * or after the current trap/syscall if in system mode. - * - * XXX: if astpending is later changed to an |= here due to more flags being - * added, we will have an atomicy problem. The type of atomicy we need is - * a non-locked orl. - */ -#define need_resched() do { \ - PCPU_SET(astpending, AST_RESCHED|AST_PENDING); \ -} while (0) -#define resched_wanted() (PCPU_GET(astpending) & AST_RESCHED) - -/* * Arrange to handle pending profiling ticks before returning to user mode. * * XXX this is now poorly named and implemented. It used to handle only a @@ -92,28 +77,13 @@ * counter in the proc table and flag isn't really necessary. */ #define need_proftick(p) do { \ - mtx_lock_spin(&sched_lock); \ + mtx_lock_spin(&sched_lock); \ (p)->p_sflag |= PS_OWEUPC; \ - mtx_unlock_spin(&sched_lock); \ aston(); \ + mtx_unlock_spin(&sched_lock); \ } while (0) /* - * Notify the current process (p) that it has a signal pending, - * process as soon as possible. - * - * XXX: aston() really needs to be an atomic (not locked, but an orl), - * in case need_resched() is set by an interrupt. But with astpending a - * per-cpu variable this is not trivial to do efficiently. For now we blow - * it off (asynchronous need_resched() conflicts are not critical). - */ -#define signotify(p) aston() -#define aston() do { \ - PCPU_SET(astpending, PCPU_GET(astpending) | AST_PENDING); \ -} while (0) -#define astoff() - -/* * CTL_MACHDEP definitions. */ #define CPU_CONSDEV 1 /* dev_t: console terminal device */ diff --git a/sys/amd64/include/pcpu.h b/sys/amd64/include/pcpu.h index 83d5103..3b7bca5 100644 --- a/sys/amd64/include/pcpu.h +++ b/sys/amd64/include/pcpu.h @@ -63,7 +63,6 @@ struct globaldata { int gd_currentldt; /* only used for USER_LDT */ u_int gd_cpuid; u_int gd_other_cpus; - u_int gd_astpending; SLIST_ENTRY(globaldata) gd_allcpu; int gd_witness_spin_check; #ifdef KTR_PERCPU diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s index fbaceff..68b6c77 100644 --- a/sys/i386/i386/apic_vector.s +++ b/sys/i386/i386/apic_vector.s @@ -304,10 +304,9 @@ _Xcpuast: FAKE_MCOUNT(13*4(%esp)) - orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */ + MTX_LOCK_SPIN(sched_lock, 0) movl PCPU(CURPROC),%ebx - incl P_INTR_NESTING_LEVEL(%ebx) - sti + orl $PS_ASTPENDING, P_SFLAG(%ebx) movl PCPU(CPUID), %eax lock @@ -315,13 +314,13 @@ _Xcpuast: lock btrl %eax, CNAME(resched_cpus) jnc 2f - orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING) + orl $PS_NEEDRESCHED, P_SFLAG(%ebx) lock incl CNAME(want_resched_cnt) 2: + MTX_UNLOCK_SPIN(sched_lock) lock incl CNAME(cpuast_cnt) - decl P_INTR_NESTING_LEVEL(%ebx) MEXITCOUNT jmp _doreti 1: diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s index d3bce3b..51662ba 100644 --- a/sys/i386/i386/exception.s +++ b/sys/i386/i386/exception.s @@ -230,7 +230,7 @@ calltrap: * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -250,11 +250,8 @@ IDTVEC(syscall) movl %eax,TF_EFLAGS(%esp) movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti /* @@ -264,7 +261,7 @@ IDTVEC(syscall) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -281,11 +278,8 @@ IDTVEC(int0x80_syscall) mov %ax,%fs movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti ENTRY(fork_trampoline) diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c index d2e1db3..bfb97ad 100644 --- a/sys/i386/i386/genassym.c +++ b/sys/i386/i386/genassym.c @@ -81,9 +81,13 @@ ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(P_ADDR, offsetof(struct proc, p_addr)); ASSYM(P_INTR_NESTING_LEVEL, offsetof(struct proc, p_intr_nesting_level)); +ASSYM(P_SFLAG, offsetof(struct proc, p_sflag)); ASSYM(P_STAT, offsetof(struct proc, p_stat)); ASSYM(P_WCHAN, offsetof(struct proc, p_wchan)); +ASSYM(PS_ASTPENDING, PS_ASTPENDING); +ASSYM(PS_NEEDRESCHED, PS_NEEDRESCHED); + #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu)); @@ -180,9 +184,6 @@ ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime)); ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks)); ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd)); ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt)); -ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); -ASSYM(AST_PENDING, AST_PENDING); -ASSYM(AST_RESCHED, AST_RESCHED); #ifdef USER_LDT ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt)); diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index 6d255df..20fe50a 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -180,9 +180,6 @@ sw1a: sw1b: movl %eax,%ecx - xorl %eax,%eax - andl $~AST_RESCHED,PCPU(ASTPENDING) - #ifdef INVARIANTS cmpb $SRUN,P_STAT(%ecx) jne badsw2 diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c index 533d791..d34e4b1 100644 --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -105,7 +105,7 @@ int (*pmath_emulate) __P((struct trapframe *)); extern void trap __P((struct trapframe frame)); extern int trapwrite __P((unsigned addr)); -extern void syscall2 __P((struct trapframe frame)); +extern void syscall __P((struct trapframe frame)); extern void ast __P((struct trapframe frame)); static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); @@ -212,7 +212,7 @@ userret(p, frame, oticks) if (!mtx_owned(&Giant)) mtx_lock(&Giant); mtx_lock_spin(&sched_lock); - addupc_task(p, frame->tf_eip, + addupc_task(p, TRAPF_PC(frame), (u_int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; @@ -1075,7 +1075,7 @@ int trapwrite(addr) } /* - * syscall2 - MP aware system call request C handler + * syscall - MP aware system call request C handler * * A system call is essentially treated as a trap except that the * MP lock is not held on entry or return. We are responsible for @@ -1086,7 +1086,7 @@ int trapwrite(addr) * the current stack is allowed without having to hold MP lock. */ void -syscall2(frame) +syscall(frame) struct trapframe frame; { caddr_t params; @@ -1278,10 +1278,22 @@ ast(frame) struct proc *p = CURPROC; u_quad_t sticks; + KASSERT(TRAPF_USERMODE(&frame), ("ast in kernel mode")); + + /* + * We check for a pending AST here rather than in the assembly as + * acquiring and releasing mutexes in assembly is not fun. + */ mtx_lock_spin(&sched_lock); + if (!(astpending() || resched_wanted())) { + mtx_unlock_spin(&sched_lock); + return; + } + sticks = p->p_sticks; - + astoff(); + mtx_intr_enable(&sched_lock); atomic_add_int(&cnt.v_soft, 1); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; diff --git a/sys/i386/include/asnames.h b/sys/i386/include/asnames.h index a7e3e24..d5ea905 100644 --- a/sys/i386/include/asnames.h +++ b/sys/i386/include/asnames.h @@ -304,7 +304,7 @@ #define _swi_net swi_net #define _swi_null swi_null #define _swi_vm swi_vm -#define _syscall2 syscall2 +#define _syscall syscall #define _szsigcode szsigcode #define _ticks ticks #define _time time diff --git a/sys/i386/include/cpu.h b/sys/i386/include/cpu.h index 0b99ec6..e644b50 100644 --- a/sys/i386/include/cpu.h +++ b/sys/i386/include/cpu.h @@ -59,32 +59,17 @@ #define cpu_getstack(p) ((p)->p_md.md_regs->tf_esp) #define cpu_setstack(p, ap) ((p)->p_md.md_regs->tf_esp = (ap)) +#define TRAPF_USERMODE(framep) \ + ((ISPL((framep)->tf_cs) == SEL_UPL) || ((framep)->tf_eflags & PSL_VM)) +#define TRAPF_PC(framep) ((framep)->tf_eip) + #define CLKF_USERMODE(framep) \ - ((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM)) + ((ISPL((framep)->cf_cs) == SEL_UPL) || ((framep)->cf_eflags & PSL_VM)) #define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2) #define CLKF_PC(framep) ((framep)->cf_eip) /* - * astpending bits - */ -#define AST_PENDING 0x00000001 -#define AST_RESCHED 0x00000002 - -/* - * Preempt the current process if in interrupt from user mode, - * or after the current trap/syscall if in system mode. - * - * XXX: if astpending is later changed to an |= here due to more flags being - * added, we will have an atomicy problem. The type of atomicy we need is - * a non-locked orl. - */ -#define need_resched() do { \ - PCPU_SET(astpending, AST_RESCHED|AST_PENDING); \ -} while (0) -#define resched_wanted() (PCPU_GET(astpending) & AST_RESCHED) - -/* * Arrange to handle pending profiling ticks before returning to user mode. * * XXX this is now poorly named and implemented. It used to handle only a @@ -92,28 +77,13 @@ * counter in the proc table and flag isn't really necessary. */ #define need_proftick(p) do { \ - mtx_lock_spin(&sched_lock); \ + mtx_lock_spin(&sched_lock); \ (p)->p_sflag |= PS_OWEUPC; \ - mtx_unlock_spin(&sched_lock); \ aston(); \ + mtx_unlock_spin(&sched_lock); \ } while (0) /* - * Notify the current process (p) that it has a signal pending, - * process as soon as possible. - * - * XXX: aston() really needs to be an atomic (not locked, but an orl), - * in case need_resched() is set by an interrupt. But with astpending a - * per-cpu variable this is not trivial to do efficiently. For now we blow - * it off (asynchronous need_resched() conflicts are not critical). - */ -#define signotify(p) aston() -#define aston() do { \ - PCPU_SET(astpending, PCPU_GET(astpending) | AST_PENDING); \ -} while (0) -#define astoff() - -/* * CTL_MACHDEP definitions. */ #define CPU_CONSDEV 1 /* dev_t: console terminal device */ diff --git a/sys/i386/include/globaldata.h b/sys/i386/include/globaldata.h index 83d5103..3b7bca5 100644 --- a/sys/i386/include/globaldata.h +++ b/sys/i386/include/globaldata.h @@ -63,7 +63,6 @@ struct globaldata { int gd_currentldt; /* only used for USER_LDT */ u_int gd_cpuid; u_int gd_other_cpus; - u_int gd_astpending; SLIST_ENTRY(globaldata) gd_allcpu; int gd_witness_spin_check; #ifdef KTR_PERCPU diff --git a/sys/i386/include/pcpu.h b/sys/i386/include/pcpu.h index 83d5103..3b7bca5 100644 --- a/sys/i386/include/pcpu.h +++ b/sys/i386/include/pcpu.h @@ -63,7 +63,6 @@ struct globaldata { int gd_currentldt; /* only used for USER_LDT */ u_int gd_cpuid; u_int gd_other_cpus; - u_int gd_astpending; SLIST_ENTRY(globaldata) gd_allcpu; int gd_witness_spin_check; #ifdef KTR_PERCPU diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s index fbaceff..68b6c77 100644 --- a/sys/i386/isa/apic_vector.s +++ b/sys/i386/isa/apic_vector.s @@ -304,10 +304,9 @@ _Xcpuast: FAKE_MCOUNT(13*4(%esp)) - orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */ + MTX_LOCK_SPIN(sched_lock, 0) movl PCPU(CURPROC),%ebx - incl P_INTR_NESTING_LEVEL(%ebx) - sti + orl $PS_ASTPENDING, P_SFLAG(%ebx) movl PCPU(CPUID), %eax lock @@ -315,13 +314,13 @@ _Xcpuast: lock btrl %eax, CNAME(resched_cpus) jnc 2f - orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING) + orl $PS_NEEDRESCHED, P_SFLAG(%ebx) lock incl CNAME(want_resched_cnt) 2: + MTX_UNLOCK_SPIN(sched_lock) lock incl CNAME(cpuast_cnt) - decl P_INTR_NESTING_LEVEL(%ebx) MEXITCOUNT jmp _doreti 1: diff --git a/sys/i386/isa/ipl.s b/sys/i386/isa/ipl.s index 7c41589..25af1f7 100644 --- a/sys/i386/isa/ipl.s +++ b/sys/i386/isa/ipl.s @@ -55,17 +55,20 @@ SUPERALIGN_TEXT .type _doreti,@function _doreti: + FAKE_MCOUNT(_bintr) /* init "from" _bintr -> _doreti */ doreti_next: /* Check for ASTs that can be handled now. */ - testl $AST_PENDING,PCPU(ASTPENDING) - je doreti_exit /* no AST, exit */ testb $SEL_RPL_MASK,TF_CS(%esp) /* are we in user mode? */ jne doreti_ast /* yes, do it now. */ testl $PSL_VM,TF_EFLAGS(%esp) /* kernel mode */ je doreti_exit /* and not VM86 mode, defer */ cmpl $1,_in_vm86call /* are we in a VM86 call? */ - jne doreti_ast /* yes, we can do it */ + je doreti_exit /* no, defer */ + +doreti_ast: + movl $T_ASTFLT,TF_TRAPNO(%esp) + call _ast /* * doreti_exit: release MP lock, pop registers, iret. @@ -80,7 +83,6 @@ doreti_exit: .globl doreti_popl_fs .globl doreti_syscall_ret -doreti_syscall_ret: doreti_popl_fs: popl %fs .globl doreti_popl_es @@ -120,14 +122,6 @@ doreti_popl_fs_fault: movl $T_PROTFLT,TF_TRAPNO(%esp) jmp alltraps_with_regs_pushed - ALIGN_TEXT -doreti_ast: - andl $~AST_PENDING,PCPU(ASTPENDING) - sti - movl $T_ASTFLT,TF_TRAPNO(%esp) - call _ast - jmp doreti_next - #ifdef APIC_IO #include "i386/isa/apic_ipl.s" #else diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index deee375..a9b5bf2 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1251,8 +1251,8 @@ psignal(p, sig) * It will either never be noticed, or noticed very soon. */ if (p == curproc) { - mtx_unlock_spin(&sched_lock); signotify(p); + mtx_unlock_spin(&sched_lock); } #ifdef SMP else if (p->p_stat == SRUN) { diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index 533d791..d34e4b1 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -105,7 +105,7 @@ int (*pmath_emulate) __P((struct trapframe *)); extern void trap __P((struct trapframe frame)); extern int trapwrite __P((unsigned addr)); -extern void syscall2 __P((struct trapframe frame)); +extern void syscall __P((struct trapframe frame)); extern void ast __P((struct trapframe frame)); static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); @@ -212,7 +212,7 @@ userret(p, frame, oticks) if (!mtx_owned(&Giant)) mtx_lock(&Giant); mtx_lock_spin(&sched_lock); - addupc_task(p, frame->tf_eip, + addupc_task(p, TRAPF_PC(frame), (u_int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; @@ -1075,7 +1075,7 @@ int trapwrite(addr) } /* - * syscall2 - MP aware system call request C handler + * syscall - MP aware system call request C handler * * A system call is essentially treated as a trap except that the * MP lock is not held on entry or return. We are responsible for @@ -1086,7 +1086,7 @@ int trapwrite(addr) * the current stack is allowed without having to hold MP lock. */ void -syscall2(frame) +syscall(frame) struct trapframe frame; { caddr_t params; @@ -1278,10 +1278,22 @@ ast(frame) struct proc *p = CURPROC; u_quad_t sticks; + KASSERT(TRAPF_USERMODE(&frame), ("ast in kernel mode")); + + /* + * We check for a pending AST here rather than in the assembly as + * acquiring and releasing mutexes in assembly is not fun. + */ mtx_lock_spin(&sched_lock); + if (!(astpending() || resched_wanted())) { + mtx_unlock_spin(&sched_lock); + return; + } + sticks = p->p_sticks; - + astoff(); + mtx_intr_enable(&sched_lock); atomic_add_int(&cnt.v_soft, 1); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; diff --git a/sys/powerpc/include/globaldata.h b/sys/powerpc/include/globaldata.h index 84dbf34..8e6cfbe 100644 --- a/sys/powerpc/include/globaldata.h +++ b/sys/powerpc/include/globaldata.h @@ -57,7 +57,6 @@ struct globaldata { u_int32_t gd_next_asn; /* next ASN to allocate */ u_int32_t gd_current_asngen; /* ASN rollover check */ - u_int gd_astpending; SLIST_ENTRY(globaldata) gd_allcpu; int gd_witness_spin_check; #ifdef KTR_PERCPU diff --git a/sys/powerpc/include/pcpu.h b/sys/powerpc/include/pcpu.h index 84dbf34..8e6cfbe 100644 --- a/sys/powerpc/include/pcpu.h +++ b/sys/powerpc/include/pcpu.h @@ -57,7 +57,6 @@ struct globaldata { u_int32_t gd_next_asn; /* next ASN to allocate */ u_int32_t gd_current_asngen; /* ASN rollover check */ - u_int gd_astpending; SLIST_ENTRY(globaldata) gd_allcpu; int gd_witness_spin_check; #ifdef KTR_PERCPU diff --git a/sys/powerpc/powerpc/genassym.c b/sys/powerpc/powerpc/genassym.c index 10bac57..1417c90 100644 --- a/sys/powerpc/powerpc/genassym.c +++ b/sys/powerpc/powerpc/genassym.c @@ -73,7 +73,6 @@ ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb)); ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime)); ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid)); ASSYM(GD_IDLEPCBPHYS, offsetof(struct globaldata, gd_idlepcbphys)); -ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock)); ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse)); diff --git a/sys/sys/proc.h b/sys/sys/proc.h index e4c5559..bf0ac06 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -329,6 +329,8 @@ struct proc { #define PS_CVWAITQ 0x00080 /* Proces is on a cv_waitq (not slpq). */ #define PS_SWAPINREQ 0x00100 /* Swapin request due to wakeup. */ #define PS_SWAPPING 0x00200 /* Process is being swapped. */ +#define PS_ASTPENDING 0x00400 /* Process has a pending ast. */ +#define PS_NEEDRESCHED 0x00800 /* Process needs to yield. */ #define P_MAGIC 0xbeefface @@ -378,6 +380,39 @@ sigonstack(size_t sp) : 0); } +/* + * Preempt the current process if in interrupt from user mode, + * or after the current trap/syscall if in system mode. + */ +#define need_resched() do { \ + mtx_assert(&sched_lock, MA_OWNED); \ + curproc->p_sflag |= PS_NEEDRESCHED; \ +} while (0) + +#define resched_wanted() (curproc->p_sflag & PS_NEEDRESCHED) + +#define clear_resched() do { \ + mtx_assert(&sched_lock, MA_OWNED); \ + curproc->p_sflag &= ~PS_NEEDRESCHED; \ +} while (0) + +/* + * Notify the current process (p) that it has a signal pending, + * process as soon as possible. + */ +#define aston() signotify(CURPROC) +#define signotify(p) do { \ + mtx_assert(&sched_lock, MA_OWNED); \ + (p)->p_sflag |= PS_ASTPENDING; \ +} while (0) + +#define astpending() (curproc->p_sflag & PS_ASTPENDING) + +#define astoff() do { \ + mtx_assert(&sched_lock, MA_OWNED); \ + CURPROC->p_sflag &= ~PS_ASTPENDING; \ +} while (0) + /* Handy macro to determine if p1 can mangle p2. */ #define PRISON_CHECK(p1, p2) \ ((p1)->p_prison == NULL || (p1)->p_prison == (p2)->p_prison) |