summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_clock.c12
-rw-r--r--sys/kern/kern_intr.c6
-rw-r--r--sys/kern/kern_synch.c4
-rw-r--r--sys/kern/ksched.c6
-rw-r--r--sys/kern/subr_prof.c4
-rw-r--r--sys/kern/subr_smp.c2
-rw-r--r--sys/kern/subr_trap.c134
7 files changed, 85 insertions, 83 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 87f9515..f48b212 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -172,15 +172,11 @@ hardclock_process(p, user)
pstats = p->p_stats;
if (user &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- p->p_sflag |= PS_ALRMPEND;
- aston(p);
- }
+ itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
+ p->p_sflag |= PS_ALRMPEND | PS_ASTPENDING;
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- p->p_sflag |= PS_PROFPEND;
- aston(p);
- }
+ itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
+ p->p_sflag |= PS_PROFPEND | PS_ASTPENDING;
}
/*
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 07ee598..84dbc6b 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -371,8 +371,8 @@ ithread_schedule(struct ithd *ithread, int do_switch)
* Set it_need to tell the thread to keep running if it is already
* running. Then, grab sched_lock and see if we actually need to
* put this thread on the runqueue. If so and the do_switch flag is
- * true, then switch to the ithread immediately. Otherwise, use
- * need_resched() to guarantee that this ithread will run before any
+ * true, then switch to the ithread immediately. Otherwise, set the
+ * needresched flag to guarantee that this ithread will run before any
* userland processes.
*/
ithread->it_need = 1;
@@ -387,7 +387,7 @@ ithread_schedule(struct ithd *ithread, int do_switch)
curproc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
} else
- need_resched(curproc);
+ curproc->p_sflag |= PS_NEEDRESCHED;
} else {
CTR3(KTR_INTR, __func__ ": pid %d: it_need %d, state %d",
p->p_pid, ithread->it_need, p->p_stat);
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 140ebd7..813bec1 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -109,7 +109,7 @@ maybe_resched(p)
mtx_assert(&sched_lock, MA_OWNED);
if (p->p_pri.pri_level < curproc->p_pri.pri_level)
- need_resched(curproc);
+ curproc->p_sflag |= PS_NEEDRESCHED;
}
int
@@ -702,7 +702,7 @@ mi_switch()
sched_nest = sched_lock.mtx_recurse;
p->p_lastcpu = p->p_oncpu;
p->p_oncpu = NOCPU;
- clear_resched(p);
+ p->p_sflag &= ~PS_NEEDRESCHED;
cpu_switch();
p->p_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_savecrit = sched_crit;
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c
index c7f746a..c7b6dd3 100644
--- a/sys/kern/ksched.c
+++ b/sys/kern/ksched.c
@@ -176,7 +176,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, &p->p_pri);
- need_resched(p);
+ p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}
else
@@ -198,7 +198,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
* on the scheduling code: You must leave the
* scheduling info alone.
*/
- need_resched(p);
+ p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}
break;
@@ -217,7 +217,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct proc *p)
int ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
- need_resched(curproc);
+ curproc->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
return 0;
}
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 18c2863..015f666 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -422,9 +422,11 @@ addupc_intr(p, pc, ticks)
addr = prof->pr_base + i;
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
+ mtx_lock_spin(&sched_lock);
prof->pr_addr = pc;
prof->pr_ticks = ticks;
- need_proftick(p);
+ p->p_sflag |= PS_OWEUPC | PS_ASTPENDING;
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 598fb16..c107442 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -150,7 +150,7 @@ forward_roundrobin(void)
id = gd->gd_cpuid;
if (id != PCPU_GET(cpuid) && (id & stopped_cpus) == 0 &&
p != gd->gd_idleproc) {
- need_resched(p);
+ p->p_sflag |= PS_NEEDRESCHED;
map |= id;
}
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index bb1deab..b903660 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -63,7 +63,7 @@ void
userret(p, frame, oticks)
struct proc *p;
struct trapframe *frame;
- u_quad_t oticks;
+ u_int oticks;
{
int sig;
@@ -72,11 +72,11 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0)
postsig(sig);
mtx_unlock(&Giant);
+ PROC_UNLOCK(p);
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
p->p_pri.pri_level = p->p_pri.pri_user;
- if (resched_wanted(p)) {
+ if (p->p_sflag & PS_NEEDRESCHED) {
/*
* Since we are curproc, a clock interrupt could
* change our priority without changing run queues
@@ -96,93 +96,97 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0)
postsig(sig);
mtx_unlock(&Giant);
- mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
- }
+ PROC_UNLOCK(p);
+ } else
+ mtx_unlock_spin(&sched_lock);
/*
* Charge system time if profiling.
*/
- if (p->p_sflag & PS_PROFIL) {
- mtx_unlock_spin(&sched_lock);
+ if (p->p_sflag & PS_PROFIL)
addupc_task(p, TRAPF_PC(frame),
- (u_int)(p->p_sticks - oticks) * psratio);
- } else
- mtx_unlock_spin(&sched_lock);
+ ((u_int)p->p_sticks - oticks) * psratio);
}
/*
* Process an asynchronous software trap.
* This is relatively easy.
+ * This function will return with preemption disabled.
*/
void
ast(framep)
struct trapframe *framep;
{
struct proc *p = CURPROC;
- u_quad_t sticks;
+ u_int prticks, sticks;
+ critical_t s;
+ int sflag;
#if defined(DEV_NPX) && !defined(SMP)
int ucode;
#endif
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
-
- /*
- * We check for a pending AST here rather than in the assembly as
- * acquiring and releasing mutexes in assembly is not fun.
- */
- mtx_lock_spin(&sched_lock);
- if (!(astpending(p) || resched_wanted(p))) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
-
- sticks = p->p_sticks;
- p->p_frame = framep;
-
- astoff(p);
- cnt.v_soft++;
- mtx_intr_enable(&sched_lock);
- if (p->p_sflag & PS_OWEUPC) {
- p->p_sflag &= ~PS_OWEUPC;
- mtx_unlock_spin(&sched_lock);
- mtx_lock(&Giant);
- addupc_task(p, p->p_stats->p_prof.pr_addr,
- p->p_stats->p_prof.pr_ticks);
- mtx_lock_spin(&sched_lock);
- }
- if (p->p_sflag & PS_ALRMPEND) {
- p->p_sflag &= ~PS_ALRMPEND;
- mtx_unlock_spin(&sched_lock);
- PROC_LOCK(p);
- psignal(p, SIGVTALRM);
- PROC_UNLOCK(p);
+#ifdef WITNESS
+ if (witness_list(p))
+ panic("Returning to user mode with mutex(s) held");
+#endif
+ mtx_assert(&Giant, MA_NOTOWNED);
+ s = critical_enter();
+ while ((p->p_sflag & (PS_ASTPENDING | PS_NEEDRESCHED)) != 0) {
+ critical_exit(s);
+ p->p_frame = framep;
+ /*
+ * This updates the p_sflag's for the checks below in one
+ * "atomic" operation with turning off the astpending flag.
+ * If another AST is triggered while we are handling the
+ * AST's saved in sflag, the astpending flag will be set and
+ * we will loop again.
+ */
mtx_lock_spin(&sched_lock);
- }
+ sticks = p->p_sticks;
+ sflag = p->p_sflag;
+ p->p_sflag &= ~(PS_OWEUPC | PS_ALRMPEND | PS_PROFPEND |
+ PS_ASTPENDING);
+ cnt.v_soft++;
+ if (sflag & PS_OWEUPC) {
+ prticks = p->p_stats->p_prof.pr_ticks;
+ p->p_stats->p_prof.pr_ticks = 0;
+ mtx_unlock_spin(&sched_lock);
+ addupc_task(p, p->p_stats->p_prof.pr_addr, prticks);
+ } else
+ mtx_unlock_spin(&sched_lock);
+ if (sflag & PS_ALRMPEND) {
+ PROC_LOCK(p);
+ psignal(p, SIGVTALRM);
+ PROC_UNLOCK(p);
+ }
#if defined(DEV_NPX) && !defined(SMP)
- if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
- PCPU_GET(curpcb)->pcb_flags &= ~PCB_NPXTRAP;
- mtx_unlock_spin(&sched_lock);
- ucode = npxtrap();
- if (ucode != -1) {
- if (!mtx_owned(&Giant))
+ if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
+ atomic_clear_char(&PCPU_GET(curpcb)->pcb_flags,
+ PCB_NPXTRAP);
+ ucode = npxtrap();
+ if (ucode != -1) {
mtx_lock(&Giant);
- trapsignal(p, SIGFPE, ucode);
+ trapsignal(p, SIGFPE, ucode);
+ }
}
- mtx_lock_spin(&sched_lock);
- }
#endif
- if (p->p_sflag & PS_PROFPEND) {
- p->p_sflag &= ~PS_PROFPEND;
- mtx_unlock_spin(&sched_lock);
- PROC_LOCK(p);
- psignal(p, SIGPROF);
- PROC_UNLOCK(p);
- } else
- mtx_unlock_spin(&sched_lock);
-
- userret(p, framep, sticks);
+ if (sflag & PS_PROFPEND) {
+ PROC_LOCK(p);
+ psignal(p, SIGPROF);
+ PROC_UNLOCK(p);
+ }
- if (mtx_owned(&Giant))
- mtx_unlock(&Giant);
+ userret(p, framep, sticks);
+ if (mtx_owned(&Giant))
+ mtx_unlock(&Giant);
+ s = critical_enter();
+ }
+ mtx_assert(&Giant, MA_NOTOWNED);
+ /*
+ * We need to keep interrupts disabled so that if any further AST's
+ * come in, the interrupt they come in on will be delayed until we
+ * finish returning to userland. We assume that the return to userland
+ * will perform the equivalent of critical_exit().
+ */
}
OpenPOWER on IntegriCloud