diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-04-19 19:45:00 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 19:45:00 +0200 |
commit | ac884dec6d4a7df252150af875cffddf8f1d9c15 (patch) | |
tree | 6ba7140a8b6e7b332fd687d24de45d2f6ded8035 /kernel/sched_fair.c | |
parent | 58d6c2d72f8628f39e8689fbde8aa177fcf00a37 (diff) | |
download | op-kernel-dev-ac884dec6d4a7df252150af875cffddf8f1d9c15.zip op-kernel-dev-ac884dec6d4a7df252150af875cffddf8f1d9c15.tar.gz |
sched: fair-group scheduling vs latency
Currently FAIR_GROUP sched grows the scheduler latency outside of
sysctl_sched_latency, invert this so it stays within.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 44 |
1 files changed, 31 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index b89fec9..9e301a2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -362,29 +362,47 @@ static u64 __sched_period(unsigned long nr_running) */ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { - return calc_delta_mine(__sched_period(cfs_rq->nr_running), - se->load.weight, &cfs_rq->load); + u64 slice = __sched_period(cfs_rq->nr_running); + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + + slice *= se->load.weight; + do_div(slice, cfs_rq->load.weight); + } + + + return slice; } /* - * We calculate the vruntime slice. + * We calculate the vruntime slice of a to be inserted task * * vs = s/w = p/rw */ -static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) +static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) { - u64 vslice = __sched_period(nr_running); + unsigned long nr_running = cfs_rq->nr_running; + unsigned long weight; + u64 vslice; - vslice *= NICE_0_LOAD; - do_div(vslice, rq_weight); + if (!se->on_rq) + nr_running++; - return vslice; -} + vslice = __sched_period(nr_running); -static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) -{ - return __sched_vslice(cfs_rq->load.weight + se->load.weight, - cfs_rq->nr_running + 1); + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + + weight = cfs_rq->load.weight; + if (!se->on_rq) + weight += se->load.weight; + + vslice *= NICE_0_LOAD; + do_div(vslice, weight); + } + + return vslice; } /* |