summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/linux/linux_machdep.c2
-rw-r--r--sys/amd64/linux32/linux32_machdep.c2
-rw-r--r--sys/i386/linux/linux_machdep.c2
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_fork.c2
-rw-r--r--sys/kern/kern_intr.c4
-rw-r--r--sys/kern/kern_kse.c4
-rw-r--r--sys/kern/kern_kthread.c2
-rw-r--r--sys/kern/kern_switch.c12
-rw-r--r--sys/kern/kern_thr.c2
-rw-r--r--sys/kern/sched_4bsd.c15
-rw-r--r--sys/kern/sched_ule.c17
-rw-r--r--sys/kern/subr_turnstile.c2
-rw-r--r--sys/sys/proc.h8
-rw-r--r--sys/sys/sched.h2
-rw-r--r--sys/vm/vm_zeroidle.c2
16 files changed, 50 insertions, 30 deletions
diff --git a/sys/alpha/linux/linux_machdep.c b/sys/alpha/linux/linux_machdep.c
index 061928c..6cd0834 100644
--- a/sys/alpha/linux/linux_machdep.c
+++ b/sys/alpha/linux/linux_machdep.c
@@ -180,7 +180,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
*/
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td2);
- setrunqueue(td2);
+ setrunqueue(td2, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = p2->p_pid;
diff --git a/sys/amd64/linux32/linux32_machdep.c b/sys/amd64/linux32/linux32_machdep.c
index 4a593ec..731754c 100644
--- a/sys/amd64/linux32/linux32_machdep.c
+++ b/sys/amd64/linux32/linux32_machdep.c
@@ -503,7 +503,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
*/
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td2);
- setrunqueue(td2);
+ setrunqueue(td2, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = p2->p_pid;
diff --git a/sys/i386/linux/linux_machdep.c b/sys/i386/linux/linux_machdep.c
index 724719a..0e2786c 100644
--- a/sys/i386/linux/linux_machdep.c
+++ b/sys/i386/linux/linux_machdep.c
@@ -365,7 +365,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
*/
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td2);
- setrunqueue(td2);
+ setrunqueue(td2, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = p2->p_pid;
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index e6363e1..cb39a82 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -727,7 +727,7 @@ kick_init(const void *udata __unused)
td = FIRST_THREAD_IN_PROC(initproc);
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td);
- setrunqueue(td); /* XXXKSE */
+ setrunqueue(td, SRQ_BORING); /* XXXKSE */
mtx_unlock_spin(&sched_lock);
}
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 8451861..43a5595 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -710,7 +710,7 @@ again:
*/
if ((flags & RFSTOPPED) == 0) {
TD_SET_CAN_RUN(td2);
- setrunqueue(td2);
+ setrunqueue(td2, SRQ_BORING);
}
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index c349852..ca7c206 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -240,7 +240,7 @@ ithread_destroy(struct ithd *ithread)
mtx_lock_spin(&sched_lock);
if (TD_AWAITING_INTR(td)) {
TD_CLR_IWAIT(td);
- setrunqueue(td);
+ setrunqueue(td, SRQ_INTR);
}
mtx_unlock_spin(&sched_lock);
mtx_unlock(&ithread->it_lock);
@@ -408,7 +408,7 @@ ithread_schedule(struct ithd *ithread)
if (TD_AWAITING_INTR(td)) {
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
TD_CLR_IWAIT(td);
- setrunqueue(td);
+ setrunqueue(td, SRQ_INTR);
} else {
CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
__func__, p->p_pid, ithread->it_need, td->td_state);
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 5636df5..7eca37a 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -751,7 +751,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
*/
if (newtd != td) {
mtx_lock_spin(&sched_lock);
- setrunqueue(newtd);
+ setrunqueue(newtd, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
}
return (0);
@@ -1113,7 +1113,7 @@ thread_switchout(struct thread *td)
td->td_upcall = NULL;
td->td_pflags &= ~TDP_CAN_UNBIND;
td2 = thread_schedule_upcall(td, ku);
- setrunqueue(td2);
+ setrunqueue(td2, SRQ_YIELDING);
}
}
diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c
index 87661db..c51524a 100644
--- a/sys/kern/kern_kthread.c
+++ b/sys/kern/kern_kthread.c
@@ -114,7 +114,7 @@ kthread_create(void (*func)(void *), void *arg,
/* Delay putting it on the run queue until now. */
if (!(flags & RFSTOPPED)) {
mtx_lock_spin(&sched_lock);
- setrunqueue(td);
+ setrunqueue(td, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 2ec45c9..54d0025 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -214,7 +214,7 @@ kse_reassign(struct kse *ke)
td->td_kse = ke;
ke->ke_thread = td;
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
- sched_add(td);
+ sched_add(td, SRQ_BORING);
return;
}
@@ -298,7 +298,7 @@ adjustrunqueue( struct thread *td, int newpri)
td->td_priority = newpri;
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
sched_rem(td);
- sched_add(td);
+ sched_add(td, SRQ_BORING);
}
return;
}
@@ -316,11 +316,11 @@ adjustrunqueue( struct thread *td, int newpri)
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
kg->kg_runnable--;
td->td_priority = newpri;
- setrunqueue(td);
+ setrunqueue(td, SRQ_BORING);
}
void
-setrunqueue(struct thread *td)
+setrunqueue(struct thread *td, int flags)
{
struct kse *ke;
struct ksegrp *kg;
@@ -341,7 +341,7 @@ setrunqueue(struct thread *td)
* and the KSE is always already attached.
* Totally ignore the ksegrp run queue.
*/
- sched_add(td);
+ sched_add(td, flags);
return;
}
@@ -436,7 +436,7 @@ setrunqueue(struct thread *td)
td2->td_kse = ke;
ke->ke_thread = td2;
}
- sched_add(ke->ke_thread);
+ sched_add(ke->ke_thread, flags);
} else {
CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
td, td->td_ksegrp, td->td_proc->p_pid);
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 0303fa9..7be6b6b 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -182,7 +182,7 @@ thr_create(struct thread *td, struct thr_create_args *uap)
TD_SET_CAN_RUN(td0);
if ((uap->flags & THR_SUSPENDED) == 0)
- setrunqueue(td0);
+ setrunqueue(td0, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 6c6f0c3..ae8046a 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -664,7 +664,7 @@ sched_switch(struct thread *td, struct thread *newtd)
TD_SET_CAN_RUN(td);
else if (TD_IS_RUNNING(td)) {
/* Put us back on the run queue (kse and all). */
- setrunqueue(td);
+ setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
} else if (p->p_flag & P_SA) {
/*
* We will not be on the run queue. So we must be
@@ -691,11 +691,11 @@ sched_wakeup(struct thread *td)
if (kg->kg_slptime > 1)
updatepri(kg);
kg->kg_slptime = 0;
- setrunqueue(td);
+ setrunqueue(td, SRQ_BORING);
}
void
-sched_add(struct thread *td)
+sched_add(struct thread *td, int flags)
{
struct kse *ke;
@@ -717,8 +717,13 @@ sched_add(struct thread *td)
*/
if (KSE_CAN_MIGRATE(ke) || ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)])
#endif
- if (maybe_preempt(td))
- return;
+ /*
+ * Don't try preempt if we are already switching.
+ * all hell might break loose.
+ */
+ if ((flags & SRQ_YIELDING) == 0)
+ if (maybe_preempt(td))
+ return;
#ifdef SMP
if (KSE_CAN_MIGRATE(ke)) {
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 0e88c7b..5582a40 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1183,7 +1183,7 @@ sched_switch(struct thread *td, struct thread *newtd)
* Don't allow the kse to migrate from a preemption.
*/
ke->ke_flags |= KEF_HOLD;
- setrunqueue(td);
+ setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
} else {
if (ke->ke_runq) {
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
@@ -1281,7 +1281,7 @@ sched_wakeup(struct thread *td)
td->td_kse, hzticks);
td->td_slptime = 0;
}
- setrunqueue(td);
+ setrunqueue(td, SRQ_BORING);
}
/*
@@ -1581,10 +1581,19 @@ restart:
}
void
-sched_add(struct thread *td)
+sched_add(struct thread *td, int flags)
{
- sched_add_internal(td, 1);
+ /* let jeff work out how to map the flags better */
+ /* I'm open to suggestions */
+ if (flags & SRQ_YIELDING)
+ /*
+ * Preempting during switching can be bad JUJU
+ * especially for KSE processes
+ */
+ sched_add_internal(td, 0);
+ else
+ sched_add_internal(td, 1);
}
static void
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index c983379..58f9c44 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -736,7 +736,7 @@ turnstile_unpend(struct turnstile *ts)
td->td_lockname = NULL;
TD_CLR_LOCK(td);
MPASS(TD_CAN_RUN(td));
- setrunqueue(td);
+ setrunqueue(td, SRQ_BORING);
} else {
td->td_flags |= TDF_TSNOBLOCK;
MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 264d48c..893add0 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -716,6 +716,12 @@ struct proc {
#define SW_VOL 0x0001 /* Voluntary switch. */
#define SW_INVOL 0x0002 /* Involuntary switch. */
+/* flags for setrunqueue(). Why are we setting this thread on the run queue? */
+#define SRQ_BORING 0x0000 /* No special circumstances */
+#define SRQ_YIELDING 0x0001 /* we are yielding (from mi_switch) */
+#define SRQ_OURSELF 0x0002 /* it is ourself (from mi_switch) */
+#define SRQ_INTR 0x0004 /* it is probably urgent */
+
/* How values for thread_single(). */
#define SINGLE_NO_EXIT 0
#define SINGLE_EXIT 1
@@ -905,7 +911,7 @@ void proc_reparent(struct proc *child, struct proc *newparent);
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
void setrunnable(struct thread *);
-void setrunqueue(struct thread *);
+void setrunqueue(struct thread *, int flags);
void setsugid(struct proc *p);
int sigonstack(size_t sp);
void sleepinit(void);
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 6836fe0..ae453c7 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -73,7 +73,7 @@ void sched_wakeup(struct thread *td);
/*
* Threads are moved on and off of run queues
*/
-void sched_add(struct thread *td);
+void sched_add(struct thread *td, int flags);
struct kse *sched_choose(void); /* XXX Should be thread * */
void sched_clock(struct thread *td);
void sched_rem(struct thread *td);
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 10f7800..6e50b6b 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -187,7 +187,7 @@ pagezero_start(void __unused *arg)
pagezero_proc->p_flag |= P_NOLOAD;
PROC_UNLOCK(pagezero_proc);
mtx_lock_spin(&sched_lock);
- setrunqueue(FIRST_THREAD_IN_PROC(pagezero_proc));
+ setrunqueue(FIRST_THREAD_IN_PROC(pagezero_proc), SRQ_BORING);
mtx_unlock_spin(&sched_lock);
}
SYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, pagezero_start, NULL)
OpenPOWER on IntegriCloud