summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 14:57:11 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 16:24:41 -0700
commitdb935dbd43c4290d710304662cc908f733afea06 (patch)
tree96fed92340b7b5952472dfb1a8cb5d0620122001 /kernel/sched.c
parent3950745131e23472fb5ace2ee4a2093e7590ec69 (diff)
downloadop-kernel-dev-db935dbd43c4290d710304662cc908f733afea06.zip
op-kernel-dev-db935dbd43c4290d710304662cc908f733afea06.tar.gz
[PATCH] sched: add debugging
These conditions should now be impossible, and we need to fix them if they happen. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 41e69b5..8b035a8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1942,15 +1942,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
goto out_balanced;
}
- /*
- * This should be "impossible", but since load
- * balancing is inherently racy and statistical,
- * it could happen in theory.
- */
- if (unlikely(busiest == this_rq)) {
- WARN_ON(1);
- goto out_balanced;
- }
+ BUG_ON(busiest == this_rq);
schedstat_add(sd, lb_imbalance[idle], imbalance);
@@ -2052,11 +2044,13 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
}
busiest = find_busiest_queue(group);
- if (!busiest || busiest == this_rq) {
+ if (!busiest) {
schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
goto out_balanced;
}
+ BUG_ON(busiest == this_rq);
+
/* Attempt to move tasks */
double_lock_balance(this_rq, busiest);
OpenPOWER on IntegriCloud