diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/init_main.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_intr.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_kthread.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 12 | ||||
-rw-r--r-- | sys/kern/kern_thr.c | 2 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 15 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 17 | ||||
-rw-r--r-- | sys/kern/subr_turnstile.c | 2 |
10 files changed, 38 insertions, 24 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index e6363e1..cb39a82 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -727,7 +727,7 @@ kick_init(const void *udata __unused) td = FIRST_THREAD_IN_PROC(initproc); mtx_lock_spin(&sched_lock); TD_SET_CAN_RUN(td); - setrunqueue(td); /* XXXKSE */ + setrunqueue(td, SRQ_BORING); /* XXXKSE */ mtx_unlock_spin(&sched_lock); } SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL) diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 8451861..43a5595 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -710,7 +710,7 @@ again: */ if ((flags & RFSTOPPED) == 0) { TD_SET_CAN_RUN(td2); - setrunqueue(td2); + setrunqueue(td2, SRQ_BORING); } mtx_unlock_spin(&sched_lock); diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c index c349852..ca7c206 100644 --- a/sys/kern/kern_intr.c +++ b/sys/kern/kern_intr.c @@ -240,7 +240,7 @@ ithread_destroy(struct ithd *ithread) mtx_lock_spin(&sched_lock); if (TD_AWAITING_INTR(td)) { TD_CLR_IWAIT(td); - setrunqueue(td); + setrunqueue(td, SRQ_INTR); } mtx_unlock_spin(&sched_lock); mtx_unlock(&ithread->it_lock); @@ -408,7 +408,7 @@ ithread_schedule(struct ithd *ithread) if (TD_AWAITING_INTR(td)) { CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid); TD_CLR_IWAIT(td); - setrunqueue(td); + setrunqueue(td, SRQ_INTR); } else { CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d", __func__, p->p_pid, ithread->it_need, td->td_state); diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 5636df5..7eca37a 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -751,7 +751,7 @@ kse_create(struct thread *td, struct kse_create_args *uap) */ if (newtd != td) { mtx_lock_spin(&sched_lock); - setrunqueue(newtd); + setrunqueue(newtd, SRQ_BORING); mtx_unlock_spin(&sched_lock); } return (0); @@ -1113,7 +1113,7 @@ thread_switchout(struct thread *td) td->td_upcall = NULL; td->td_pflags &= ~TDP_CAN_UNBIND; td2 = thread_schedule_upcall(td, ku); - setrunqueue(td2); + setrunqueue(td2, SRQ_YIELDING); } } diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c index 87661db..c51524a 100644 --- a/sys/kern/kern_kthread.c +++ b/sys/kern/kern_kthread.c @@ -114,7 +114,7 @@ kthread_create(void (*func)(void *), void *arg, /* Delay putting it on the run queue until now. */ if (!(flags & RFSTOPPED)) { mtx_lock_spin(&sched_lock); - setrunqueue(td); + setrunqueue(td, SRQ_BORING); mtx_unlock_spin(&sched_lock); } diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 2ec45c9..54d0025 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -214,7 +214,7 @@ kse_reassign(struct kse *ke) td->td_kse = ke; ke->ke_thread = td; CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); - sched_add(td); + sched_add(td, SRQ_BORING); return; } @@ -298,7 +298,7 @@ adjustrunqueue( struct thread *td, int newpri) td->td_priority = newpri; if (ke->ke_rqindex != (newpri / RQ_PPQ)) { sched_rem(td); - sched_add(td); + sched_add(td, SRQ_BORING); } return; } @@ -316,11 +316,11 @@ adjustrunqueue( struct thread *td, int newpri) TAILQ_REMOVE(&kg->kg_runq, td, td_runq); kg->kg_runnable--; td->td_priority = newpri; - setrunqueue(td); + setrunqueue(td, SRQ_BORING); } void -setrunqueue(struct thread *td) +setrunqueue(struct thread *td, int flags) { struct kse *ke; struct ksegrp *kg; @@ -341,7 +341,7 @@ setrunqueue(struct thread *td) * and the KSE is always already attached. * Totally ignore the ksegrp run queue. */ - sched_add(td); + sched_add(td, flags); return; } @@ -436,7 +436,7 @@ setrunqueue(struct thread *td) td2->td_kse = ke; ke->ke_thread = td2; } - sched_add(ke->ke_thread); + sched_add(ke->ke_thread, flags); } else { CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", td, td->td_ksegrp, td->td_proc->p_pid); diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c index 0303fa9..7be6b6b 100644 --- a/sys/kern/kern_thr.c +++ b/sys/kern/kern_thr.c @@ -182,7 +182,7 @@ thr_create(struct thread *td, struct thr_create_args *uap) TD_SET_CAN_RUN(td0); if ((uap->flags & THR_SUSPENDED) == 0) - setrunqueue(td0); + setrunqueue(td0, SRQ_BORING); mtx_unlock_spin(&sched_lock); diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 6c6f0c3..ae8046a 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -664,7 +664,7 @@ sched_switch(struct thread *td, struct thread *newtd) TD_SET_CAN_RUN(td); else if (TD_IS_RUNNING(td)) { /* Put us back on the run queue (kse and all). */ - setrunqueue(td); + setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING); } else if (p->p_flag & P_SA) { /* * We will not be on the run queue. So we must be @@ -691,11 +691,11 @@ sched_wakeup(struct thread *td) if (kg->kg_slptime > 1) updatepri(kg); kg->kg_slptime = 0; - setrunqueue(td); + setrunqueue(td, SRQ_BORING); } void -sched_add(struct thread *td) +sched_add(struct thread *td, int flags) { struct kse *ke; @@ -717,8 +717,13 @@ sched_add(struct thread *td) */ if (KSE_CAN_MIGRATE(ke) || ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)]) #endif - if (maybe_preempt(td)) - return; + /* + * Don't try preempt if we are already switching. + * all hell might break loose. + */ + if ((flags & SRQ_YIELDING) == 0) + if (maybe_preempt(td)) + return; #ifdef SMP if (KSE_CAN_MIGRATE(ke)) { diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 0e88c7b..5582a40 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1183,7 +1183,7 @@ sched_switch(struct thread *td, struct thread *newtd) * Don't allow the kse to migrate from a preemption. */ ke->ke_flags |= KEF_HOLD; - setrunqueue(td); + setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING); } else { if (ke->ke_runq) { kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); @@ -1281,7 +1281,7 @@ sched_wakeup(struct thread *td) td->td_kse, hzticks); td->td_slptime = 0; } - setrunqueue(td); + setrunqueue(td, SRQ_BORING); } /* @@ -1581,10 +1581,19 @@ restart: } void -sched_add(struct thread *td) +sched_add(struct thread *td, int flags) { - sched_add_internal(td, 1); + /* let jeff work out how to map the flags better */ + /* I'm open to suggestions */ + if (flags & SRQ_YIELDING) + /* + * Preempting during switching can be bad JUJU + * especially for KSE processes + */ + sched_add_internal(td, 0); + else + sched_add_internal(td, 1); } static void diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index c983379..58f9c44 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -736,7 +736,7 @@ turnstile_unpend(struct turnstile *ts) td->td_lockname = NULL; TD_CLR_LOCK(td); MPASS(TD_CAN_RUN(td)); - setrunqueue(td); + setrunqueue(td, SRQ_BORING); } else { td->td_flags |= TDF_TSNOBLOCK; MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td)); |