diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-24 14:18:21 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-21 13:40:15 +0100 |
commit | 8f190fb3f7a405682666d3723f6ec370b5afe4da (patch) | |
tree | 4494079705c3c18e5e4f48c5a77877677b244d5d /kernel | |
parent | f492e12ef050e02bf0185b6b57874992591b9be1 (diff) | |
download | op-kernel-dev-8f190fb3f7a405682666d3723f6ec370b5afe4da.zip op-kernel-dev-8f190fb3f7a405682666d3723f6ec370b5afe4da.tar.gz |
sched: Assume *balance is valid
Since all load_balance() callers will have !NULL balance parameters we
can now assume so and remove a few checks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index de5ab12..0b482f5 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2465,7 +2465,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, * to do the newly idle load balance. */ if (idle != CPU_NEWLY_IDLE && local_group && - balance_cpu != this_cpu && balance) { + balance_cpu != this_cpu) { *balance = 0; return; } @@ -2528,7 +2528,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle, local_group, cpus, balance, &sgs); - if (local_group && balance && !(*balance)) + if (local_group && !(*balance)) return; sds->total_load += sgs.group_load; @@ -2720,7 +2720,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, * 5) The imbalance is within the specified limit. * 6) Any rebalance would lead to ping-pong */ - if (balance && !(*balance)) + if (!(*balance)) goto ret; if (!sds.busiest || sds.busiest_nr_running == 0) |