summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 17:00:04 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:04 +0200
commit08e2388aa1e40cb06f7d04ac621e2ae94e1d8fdc (patch)
tree07ef79ec5f60879471bfcdef1da7e1d37cbddb4e
parent1091985b482fdd577a5c511059b9d7b4467bd15d (diff)
downloadop-kernel-dev-08e2388aa1e40cb06f7d04ac621e2ae94e1d8fdc.zip
op-kernel-dev-08e2388aa1e40cb06f7d04ac621e2ae94e1d8fdc.tar.gz
sched: clean up calc_weighted()
clean up calc_weighted() - we always use the normalized shift so it's not needed to pass that in. Also, push the non-nice0 branch into the function. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/sched_fair.c31
1 files changed, 8 insertions, 23 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 91a227b..b46f807 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -397,27 +397,16 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
}
-/*
- * We calculate fair deltas here, so protect against the random effects
- * of a multiplication overflow by capping it to the runtime limit:
- */
-#if BITS_PER_LONG == 32
static inline unsigned long
-calc_weighted(unsigned long delta, unsigned long weight, int shift)
+calc_weighted(unsigned long delta, struct sched_entity *se)
{
- u64 tmp = (u64)delta * weight >> shift;
+ unsigned long weight = se->load.weight;
- if (unlikely(tmp > sysctl_sched_runtime_limit*2))
- return sysctl_sched_runtime_limit*2;
- return tmp;
+ if (unlikely(weight != NICE_0_LOAD))
+ return (u64)delta * se->load.weight >> NICE_0_SHIFT;
+ else
+ return delta;
}
-#else
-static inline unsigned long
-calc_weighted(unsigned long delta, unsigned long weight, int shift)
-{
- return delta * weight >> shift;
-}
-#endif
/*
* Task is being enqueued - update stats:
@@ -469,9 +458,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
schedstat_set(se->wait_max, max(se->wait_max,
rq_of(cfs_rq)->clock - se->wait_start));
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta_fair = calc_weighted(delta_fair, se->load.weight,
- NICE_0_SHIFT);
+ delta_fair = calc_weighted(delta_fair, se);
add_wait_runtime(cfs_rq, se, delta_fair);
}
@@ -554,9 +541,7 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
delta_fair = div64_likely32((u64)delta_fair * load,
load + se->load.weight);
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta_fair = calc_weighted(delta_fair, se->load.weight,
- NICE_0_SHIFT);
+ delta_fair = calc_weighted(delta_fair, se);
prev_runtime = se->wait_runtime;
__add_wait_runtime(cfs_rq, se, delta_fair);
OpenPOWER on IntegriCloud