diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2005-06-25 14:57:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 16:24:40 -0700 |
commit | 16cfb1c04c3cbe3759f339d3333e7e1e7d59712a (patch) | |
tree | 23e8778f4eef5df4dc256dcabd1b564a8221f651 /kernel | |
parent | 8102679447da7fcbcb5226ee0207c3a034bc6d5f (diff) | |
download | op-kernel-dev-16cfb1c04c3cbe3759f339d3333e7e1e7d59712a.zip op-kernel-dev-16cfb1c04c3cbe3759f339d3333e7e1e7d59712a.tar.gz |
[PATCH] sched: reduce active load balancing
Fix up active load balancing a bit so it doesn't get called when it shouldn't.
Reset the nr_balance_failed counter at more points where we have found
conditions to be balanced. This reduces too aggressive active balancing seen
on some workloads.
Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2794c79..03d7377 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2021,6 +2021,7 @@ out_balanced: schedstat_inc(sd, lb_balanced[idle]); + sd->nr_balance_failed = 0; /* tune up the balancing interval */ if (sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; @@ -2046,16 +2047,14 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE); if (!group) { - schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); - goto out; + goto out_balanced; } busiest = find_busiest_queue(group); if (!busiest || busiest == this_rq) { - schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); - goto out; + goto out_balanced; } /* Attempt to move tasks */ @@ -2066,11 +2065,16 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, imbalance, sd, NEWLY_IDLE, NULL); if (!nr_moved) schedstat_inc(sd, lb_failed[NEWLY_IDLE]); + else + sd->nr_balance_failed = 0; spin_unlock(&busiest->lock); - -out: return nr_moved; + +out_balanced: + schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); + sd->nr_balance_failed = 0; + return 0; } /* |