diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-08-09 11:16:52 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-08-09 11:16:52 +0200 |
commit | 7cff8cf61cac15fa29a1ca802826d2bcbca66152 (patch) | |
tree | 86fa25bbf7d8cd3b23f7230fb821cdb04990ebfc /kernel/sched_fair.c | |
parent | a69edb55605117cc0f20aa36c49c20b96590774d (diff) | |
download | op-kernel-dev-7cff8cf61cac15fa29a1ca802826d2bcbca66152.zip op-kernel-dev-7cff8cf61cac15fa29a1ca802826d2bcbca66152.tar.gz |
sched: refine negative nice level granularity
refine the granularity of negative nice level tasks: let them
reschedule more often to offset the effect of them consuming
their wait_runtime proportionately slower. (This makes nice-0
task scheduling smoother in the presence of negatively
reniced tasks.)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7a632c5..e91db32 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -222,21 +222,25 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity) { u64 tmp; + if (likely(curr->load.weight == NICE_0_LOAD)) + return granularity; /* - * Negative nice levels get the same granularity as nice-0: + * Positive nice levels get the same granularity as nice-0: */ - if (likely(curr->load.weight >= NICE_0_LOAD)) - return granularity; + if (likely(curr->load.weight < NICE_0_LOAD)) { + tmp = curr->load.weight * (u64)granularity; + return (long) (tmp >> NICE_0_SHIFT); + } /* - * Positive nice level tasks get linearly finer + * Negative nice level tasks get linearly finer * granularity: */ - tmp = curr->load.weight * (u64)granularity; + tmp = curr->load.inv_weight * (u64)granularity; /* * It will always fit into 'long': */ - return (long) (tmp >> NICE_0_SHIFT); + return (long) (tmp >> WMULT_SHIFT); } static inline void |