diff options
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/amd64/apic_vector.S | 9 | ||||
-rw-r--r-- | sys/amd64/amd64/cpu_switch.S | 3 | ||||
-rw-r--r-- | sys/amd64/amd64/exception.S | 14 | ||||
-rw-r--r-- | sys/amd64/amd64/exception.s | 14 | ||||
-rw-r--r-- | sys/amd64/amd64/genassym.c | 7 | ||||
-rw-r--r-- | sys/amd64/amd64/swtch.s | 3 | ||||
-rw-r--r-- | sys/amd64/amd64/trap.c | 22 | ||||
-rw-r--r-- | sys/amd64/include/cpu.h | 44 | ||||
-rw-r--r-- | sys/amd64/include/pcpu.h | 1 |
9 files changed, 40 insertions, 77 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S index fbaceff..68b6c77 100644 --- a/sys/amd64/amd64/apic_vector.S +++ b/sys/amd64/amd64/apic_vector.S @@ -304,10 +304,9 @@ _Xcpuast: FAKE_MCOUNT(13*4(%esp)) - orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */ + MTX_LOCK_SPIN(sched_lock, 0) movl PCPU(CURPROC),%ebx - incl P_INTR_NESTING_LEVEL(%ebx) - sti + orl $PS_ASTPENDING, P_SFLAG(%ebx) movl PCPU(CPUID), %eax lock @@ -315,13 +314,13 @@ _Xcpuast: lock btrl %eax, CNAME(resched_cpus) jnc 2f - orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING) + orl $PS_NEEDRESCHED, P_SFLAG(%ebx) lock incl CNAME(want_resched_cnt) 2: + MTX_UNLOCK_SPIN(sched_lock) lock incl CNAME(cpuast_cnt) - decl P_INTR_NESTING_LEVEL(%ebx) MEXITCOUNT jmp _doreti 1: diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index 6d255df..20fe50a 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -180,9 +180,6 @@ sw1a: sw1b: movl %eax,%ecx - xorl %eax,%eax - andl $~AST_RESCHED,PCPU(ASTPENDING) - #ifdef INVARIANTS cmpb $SRUN,P_STAT(%ecx) jne badsw2 diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S index d3bce3b..51662ba 100644 --- a/sys/amd64/amd64/exception.S +++ b/sys/amd64/amd64/exception.S @@ -230,7 +230,7 @@ calltrap: * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -250,11 +250,8 @@ IDTVEC(syscall) movl %eax,TF_EFLAGS(%esp) movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti /* @@ -264,7 +261,7 @@ IDTVEC(syscall) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -281,11 +278,8 @@ IDTVEC(int0x80_syscall) mov %ax,%fs movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti ENTRY(fork_trampoline) diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s index d3bce3b..51662ba 100644 --- a/sys/amd64/amd64/exception.s +++ b/sys/amd64/amd64/exception.s @@ -230,7 +230,7 @@ calltrap: * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -250,11 +250,8 @@ IDTVEC(syscall) movl %eax,TF_EFLAGS(%esp) movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti /* @@ -264,7 +261,7 @@ IDTVEC(syscall) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. * - * We do not obtain the MP lock, but the call to syscall2 might. If it + * We do not obtain the MP lock, but the call to syscall might. If it * does it will release the lock prior to returning. */ SUPERALIGN_TEXT @@ -281,11 +278,8 @@ IDTVEC(int0x80_syscall) mov %ax,%fs movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */ FAKE_MCOUNT(13*4(%esp)) - call _syscall2 + call _syscall MEXITCOUNT - cli /* atomic astpending access */ - cmpl $0,PCPU(ASTPENDING) /* AST pending? */ - je doreti_syscall_ret /* no, get out of here */ jmp _doreti ENTRY(fork_trampoline) diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c index d2e1db3..bfb97ad 100644 --- a/sys/amd64/amd64/genassym.c +++ b/sys/amd64/amd64/genassym.c @@ -81,9 +81,13 @@ ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(P_ADDR, offsetof(struct proc, p_addr)); ASSYM(P_INTR_NESTING_LEVEL, offsetof(struct proc, p_intr_nesting_level)); +ASSYM(P_SFLAG, offsetof(struct proc, p_sflag)); ASSYM(P_STAT, offsetof(struct proc, p_stat)); ASSYM(P_WCHAN, offsetof(struct proc, p_wchan)); +ASSYM(PS_ASTPENDING, PS_ASTPENDING); +ASSYM(PS_NEEDRESCHED, PS_NEEDRESCHED); + #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu)); @@ -180,9 +184,6 @@ ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime)); ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks)); ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd)); ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt)); -ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); -ASSYM(AST_PENDING, AST_PENDING); -ASSYM(AST_RESCHED, AST_RESCHED); #ifdef USER_LDT ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt)); diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s index 6d255df..20fe50a 100644 --- a/sys/amd64/amd64/swtch.s +++ b/sys/amd64/amd64/swtch.s @@ -180,9 +180,6 @@ sw1a: sw1b: movl %eax,%ecx - xorl %eax,%eax - andl $~AST_RESCHED,PCPU(ASTPENDING) - #ifdef INVARIANTS cmpb $SRUN,P_STAT(%ecx) jne badsw2 diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 533d791..d34e4b1 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -105,7 +105,7 @@ int (*pmath_emulate) __P((struct trapframe *)); extern void trap __P((struct trapframe frame)); extern int trapwrite __P((unsigned addr)); -extern void syscall2 __P((struct trapframe frame)); +extern void syscall __P((struct trapframe frame)); extern void ast __P((struct trapframe frame)); static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); @@ -212,7 +212,7 @@ userret(p, frame, oticks) if (!mtx_owned(&Giant)) mtx_lock(&Giant); mtx_lock_spin(&sched_lock); - addupc_task(p, frame->tf_eip, + addupc_task(p, TRAPF_PC(frame), (u_int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; @@ -1075,7 +1075,7 @@ int trapwrite(addr) } /* - * syscall2 - MP aware system call request C handler + * syscall - MP aware system call request C handler * * A system call is essentially treated as a trap except that the * MP lock is not held on entry or return. We are responsible for @@ -1086,7 +1086,7 @@ int trapwrite(addr) * the current stack is allowed without having to hold MP lock. */ void -syscall2(frame) +syscall(frame) struct trapframe frame; { caddr_t params; @@ -1278,10 +1278,22 @@ ast(frame) struct proc *p = CURPROC; u_quad_t sticks; + KASSERT(TRAPF_USERMODE(&frame), ("ast in kernel mode")); + + /* + * We check for a pending AST here rather than in the assembly as + * acquiring and releasing mutexes in assembly is not fun. + */ mtx_lock_spin(&sched_lock); + if (!(astpending() || resched_wanted())) { + mtx_unlock_spin(&sched_lock); + return; + } + sticks = p->p_sticks; - + astoff(); + mtx_intr_enable(&sched_lock); atomic_add_int(&cnt.v_soft, 1); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h index 0b99ec6..e644b50 100644 --- a/sys/amd64/include/cpu.h +++ b/sys/amd64/include/cpu.h @@ -59,32 +59,17 @@ #define cpu_getstack(p) ((p)->p_md.md_regs->tf_esp) #define cpu_setstack(p, ap) ((p)->p_md.md_regs->tf_esp = (ap)) +#define TRAPF_USERMODE(framep) \ + ((ISPL((framep)->tf_cs) == SEL_UPL) || ((framep)->tf_eflags & PSL_VM)) +#define TRAPF_PC(framep) ((framep)->tf_eip) + #define CLKF_USERMODE(framep) \ - ((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM)) + ((ISPL((framep)->cf_cs) == SEL_UPL) || ((framep)->cf_eflags & PSL_VM)) #define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2) #define CLKF_PC(framep) ((framep)->cf_eip) /* - * astpending bits - */ -#define AST_PENDING 0x00000001 -#define AST_RESCHED 0x00000002 - -/* - * Preempt the current process if in interrupt from user mode, - * or after the current trap/syscall if in system mode. - * - * XXX: if astpending is later changed to an |= here due to more flags being - * added, we will have an atomicy problem. The type of atomicy we need is - * a non-locked orl. - */ -#define need_resched() do { \ - PCPU_SET(astpending, AST_RESCHED|AST_PENDING); \ -} while (0) -#define resched_wanted() (PCPU_GET(astpending) & AST_RESCHED) - -/* * Arrange to handle pending profiling ticks before returning to user mode. * * XXX this is now poorly named and implemented. It used to handle only a @@ -92,28 +77,13 @@ * counter in the proc table and flag isn't really necessary. */ #define need_proftick(p) do { \ - mtx_lock_spin(&sched_lock); \ + mtx_lock_spin(&sched_lock); \ (p)->p_sflag |= PS_OWEUPC; \ - mtx_unlock_spin(&sched_lock); \ aston(); \ + mtx_unlock_spin(&sched_lock); \ } while (0) /* - * Notify the current process (p) that it has a signal pending, - * process as soon as possible. - * - * XXX: aston() really needs to be an atomic (not locked, but an orl), - * in case need_resched() is set by an interrupt. But with astpending a - * per-cpu variable this is not trivial to do efficiently. For now we blow - * it off (asynchronous need_resched() conflicts are not critical). - */ -#define signotify(p) aston() -#define aston() do { \ - PCPU_SET(astpending, PCPU_GET(astpending) | AST_PENDING); \ -} while (0) -#define astoff() - -/* * CTL_MACHDEP definitions. */ #define CPU_CONSDEV 1 /* dev_t: console terminal device */ diff --git a/sys/amd64/include/pcpu.h b/sys/amd64/include/pcpu.h index 83d5103..3b7bca5 100644 --- a/sys/amd64/include/pcpu.h +++ b/sys/amd64/include/pcpu.h @@ -63,7 +63,6 @@ struct globaldata { int gd_currentldt; /* only used for USER_LDT */ u_int gd_cpuid; u_int gd_other_cpus; - u_int gd_astpending; SLIST_ENTRY(globaldata) gd_allcpu; int gd_witness_spin_check; #ifdef KTR_PERCPU |