diff options
author | jeff <jeff@FreeBSD.org> | 2007-09-22 02:20:14 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2007-09-22 02:20:14 +0000 |
commit | 280a86b2ee7b98d7ca78ed442f3329cb4e4ce224 (patch) | |
tree | 0913108efb1a9d49f979afd610282936dc8b327e | |
parent | b2b7d089f7b65fca40aa6d1e09199f2c4439750c (diff) | |
download | FreeBSD-src-280a86b2ee7b98d7ca78ed442f3329cb4e4ce224.zip FreeBSD-src-280a86b2ee7b98d7ca78ed442f3329cb4e4ce224.tar.gz |
- Improve grammar. s/it's/its/.
- Improve load long-term load balancer by always IPIing exactly once.
Previously the delay after rebalancing could cause problems with
uneven workloads.
- Allow nice to have a linear effect on the interactivity score. This
allows negatively niced programs to stay interactive longer. It may be
useful with very expensive Xorg servers under high loads. In general
it should not be necessary to alter the nice level to improve interactive
response. We may also want to consider never allowing positively niced
processes to become interactive at all.
- Initialize ccpu to 0 rather than 0.0. The decimal point was leftover
from when the code was copied from 4bsd. ccpu is 0 in ULE because ULE
only exports weighted cpu values.
Reported by: Steve Kargl (Load balancing problem)
Approved by: re
-rw-r--r-- | sys/kern/sched_ule.c | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 1c112a7..df05473 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -30,7 +30,7 @@ * performance under load even on uni-processor systems. * * etymology: - * ULE is the last three letters in schedule. It owes it's name to a + * ULE is the last three letters in schedule. It owes its name to a * generic user created for a scheduling system by Paul Mikesell at * Isilon Systems and a general lack of creativity on the part of the author. */ @@ -638,6 +638,11 @@ sched_balance_pair(struct tdq *high, struct tdq *low) move = min(move, transferable); for (i = 0; i < move; i++) tdq_move(high, low); + /* + * IPI the target cpu to force it to reschedule with the new + * workload. + */ + ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); } TDQ_UNLOCK(high); TDQ_UNLOCK(low); @@ -685,7 +690,6 @@ tdq_move(struct tdq *from, struct tdq *to) ts->ts_cpu = cpu; td->td_lock = TDQ_LOCKPTR(to); tdq_add(to, td, SRQ_YIELDING); - tdq_notify(ts); } /* @@ -926,7 +930,7 @@ sched_setcpu(struct td_sched *ts, int cpu, int flags) return (tdq); #ifdef notyet /* - * If the thread isn't running it's lockptr is a + * If the thread isn't running its lockptr is a * turnstile or a sleepqueue. We can just lock_set without * blocking. */ @@ -1401,8 +1405,12 @@ sched_priority(struct thread *td) * Scores greater than this are placed on the normal timeshare queue * where the priority is partially decided by the most recent cpu * utilization and the rest is decided by nice value. + * + * The nice value of the process has a linear effect on the calculated + * score. Negative nice values make it easier for a thread to be + * considered interactive. */ - score = sched_interact_score(td); + score = sched_interact_score(td) - td->td_proc->p_nice; if (score < sched_interact) { pri = PRI_MIN_REALTIME; pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) @@ -2635,7 +2643,7 @@ SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0, #endif /* ps compat. All cpu percentages from ULE are weighted. */ -static int ccpu = 0.0; +static int ccpu = 0; SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); |