summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-25 02:35:11 +1030
committerIngo Molnar <mingo@elte.hu>2008-11-24 17:51:37 +0100
commita0e902452da16b79d7c9230630ed8a595d14fa85 (patch)
treecf1a426efc9ac81af60da4f55f825162acf81dec /kernel/sched.c
parent4d2732c63e0c05cfef2a74868d08eace922dfc3e (diff)
downloadop-kernel-dev-a0e902452da16b79d7c9230630ed8a595d14fa85.zip
op-kernel-dev-a0e902452da16b79d7c9230630ed8a595d14fa85.tar.gz
sched: convert rebalance_domains() to cpumask_var_t.
Impact: stack usage reduction Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space in the stack. cpumask_var_t is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 154a95f..67383e7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3850,7 +3850,11 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
int need_serialize;
- cpumask_t tmp;
+ cpumask_var_t tmp;
+
+ /* Fails alloc? Rebalancing probably not a priority right now. */
+ if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
+ return;
for_each_domain(cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
@@ -3875,7 +3879,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
+ if (load_balance(cpu, rq, sd, idle, &balance, tmp)) {
/*
* We've pulled tasks over so either we're no
* longer idle, or one of our SMT siblings is
@@ -3909,6 +3913,8 @@ out:
*/
if (likely(update_next_balance))
rq->next_balance = next_balance;
+
+ free_cpumask_var(tmp);
}
/*
OpenPOWER on IntegriCloud