diff options
author | jeff <jeff@FreeBSD.org> | 2003-10-15 07:47:06 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2003-10-15 07:47:06 +0000 |
commit | 72dab909cc26aadb64e45338f150233dc134d10d (patch) | |
tree | f526c3f5c4ad62c797d7ffc9338d81be4a49effc | |
parent | d5430e82187fc86f4912ee0a3bf41cf20300cb66 (diff) | |
download | FreeBSD-src-72dab909cc26aadb64e45338f150233dc134d10d.zip FreeBSD-src-72dab909cc26aadb64e45338f150233dc134d10d.tar.gz |
- If our user_pri doesn't match our actual priority our priority has been
elevated either due to priority propagation or because we're in the
kernel in either case, put us on the current queue so that we dont
stop others from using important resources. At some point the priority
elevations from sleeping in the kernel should go away.
- Remove an optimization in sched_userret(). Before we would only set
NEEDRESCHED if there was something of a higher priority available. This
is a trivial optimization and it breaks priority propagation because it
doesn't take threads which we may be blocking into account. Notice that
the thread which is blocking others gets up to one tick of cpu time before
we honor this NEEDRESCHED in sched_clock().
-rw-r--r-- | sys/kern/sched_ule.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 635a0dd..c0e2044 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -182,9 +182,8 @@ struct td_sched *thread0_sched = &td_sched; #define SCHED_INTERACTIVE(kg) \ (sched_interact_score(kg) < SCHED_INTERACT_THRESH) #define SCHED_CURR(kg, ke) \ - (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || \ - SCHED_INTERACTIVE(kg) || \ - mtx_ownedby(&Giant, (ke)->ke_thread)) + (ke->ke_thread->td_priority != kg->kg_user_pri || \ + SCHED_INTERACTIVE(kg)) /* * Cpu percentage computation macros and defines. @@ -1152,14 +1151,21 @@ void sched_userret(struct thread *td) { struct ksegrp *kg; +#if 0 struct kseq *kseq; struct kse *ke; +#endif kg = td->td_ksegrp; if (td->td_priority != kg->kg_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; + /* + * This optimization is temporarily disabled because it + * breaks priority propagation. + */ +#if 0 kseq = KSEQ_SELF(); if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && #ifdef SMP @@ -1169,6 +1175,7 @@ sched_userret(struct thread *td) #endif (ke = kseq_choose(kseq, 0)) != NULL && ke->ke_thread->td_priority < td->td_priority) +#endif curthread->td_flags |= TDF_NEEDRESCHED; mtx_unlock_spin(&sched_lock); } |