summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2008-03-12 06:31:06 +0000
committerjeff <jeff@FreeBSD.org>2008-03-12 06:31:06 +0000
commit3b1acbdce295a5ace27e22dba0ae318570aea5bf (patch)
tree20ec4b3ab190fc07828f6c3f536e039ecdead59a /sys/kern/sched_ule.c
parentce12a09ced75026eed87f8a2a64d05ba98bb98d3 (diff)
downloadFreeBSD-src-3b1acbdce295a5ace27e22dba0ae318570aea5bf.zip
FreeBSD-src-3b1acbdce295a5ace27e22dba0ae318570aea5bf.tar.gz
- Pass the priority argument from *sleep() into sleepq and down into
sched_sleep(). This removes extra thread_lock() acquisition and allows the scheduler to decide what to do with the static boost. - Change the priority arguments to cv_* to match sleepq/msleep/etc. where 0 means no priority change. Catch -1 in cv_broadcastpri() and convert it to 0 for now. - Set a flag when sleeping in a way that is compatible with swapping since direct priority comparisons are meaningless now. - Add a sysctl to ule, kern.sched.static_boost, that defaults to on which controls the boost behavior. Turning it off gives better performance in some workloads but needs more investigation. - While we're modifying sleepq, change signal and broadcast to both return with the lock held as the lock was held on enter. Reviewed by: jhb, peter
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index c89e45b..ebdc5e0 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -185,6 +185,7 @@ static int preempt_thresh = PRI_MIN_KERN;
#else
static int preempt_thresh = 0;
#endif
+static int static_boost = 1;
/*
* tdq - per processor runqs and statistics. All fields are protected by the
@@ -1856,12 +1857,16 @@ sched_nice(struct proc *p, int nice)
* Record the sleep time for the interactivity scorer.
*/
void
-sched_sleep(struct thread *td)
+sched_sleep(struct thread *td, int prio)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
td->td_slptick = ticks;
+ if (TD_IS_SUSPENDED(td) || prio <= PSOCK)
+ td->td_flags |= TDF_CANSWAP;
+ if (static_boost && prio)
+ sched_prio(td, prio);
}
/*
@@ -1876,6 +1881,7 @@ sched_wakeup(struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
+ td->td_flags &= ~TDF_CANSWAP;
/*
* If we slept for more than a tick update our interactivity and
* priority.
@@ -2555,6 +2561,8 @@ SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
"Interactivity score threshold");
SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
0,"Min priority for preemption, lower priorities have greater precedence");
+SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost,
+ 0,"Controls whether static kernel priorities are assigned to sleeping threads.");
#ifdef SMP
SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
"Number of hz ticks to keep thread affinity for");
OpenPOWER on IntegriCloud