diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 14:09:55 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 12:58:22 +0200 |
commit | bf28b253266ebd73c331dde24d64606afde32ceb (patch) | |
tree | 7378b3c9fd37ecfdca30fa074f706624f8807f8e /kernel/sched.c | |
parent | 3bd65a80affb9768b91f03c56dba46ee79525f9b (diff) | |
download | op-kernel-dev-bf28b253266ebd73c331dde24d64606afde32ceb.zip op-kernel-dev-bf28b253266ebd73c331dde24d64606afde32ceb.tar.gz |
sched: Remove nodemask allocation
There's only one nodemask user left so remove it with a direct
computation and save some memory and reduce some code-flow
complexity.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.505608966@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 14 |
1 files changed, 3 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d395fe5..f4d3a62 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6838,7 +6838,6 @@ struct sd_data { }; struct s_data { - cpumask_var_t nodemask; cpumask_var_t send_covered; struct sched_domain ** __percpu sd; struct sd_data sdd[SD_LV_MAX]; @@ -6850,7 +6849,6 @@ enum s_alloc { sa_sd, sa_sd_storage, sa_send_covered, - sa_nodemask, sa_none, }; @@ -7035,8 +7033,6 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, } /* fall through */ case sa_send_covered: free_cpumask_var(d->send_covered); /* fall through */ - case sa_nodemask: - free_cpumask_var(d->nodemask); /* fall through */ case sa_none: break; } @@ -7049,10 +7045,8 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, memset(d, 0, sizeof(*d)); - if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) - return sa_none; if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) - return sa_nodemask; + return sa_none; for (i = 0; i < SD_LV_MAX; i++) { d->sdd[i].sd = alloc_percpu(struct sched_domain *); if (!d->sdd[i].sd) @@ -7149,7 +7143,8 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, struct sched_domain *sd; sd = sd_init_CPU(d, i); set_domain_attribute(sd, attr); - cpumask_copy(sched_domain_span(sd), d->nodemask); + cpumask_and(sched_domain_span(sd), + cpumask_of_node(cpu_to_node(i)), cpu_map); sd->parent = parent; if (parent) parent->child = sd; @@ -7219,9 +7214,6 @@ static int build_sched_domains(const struct cpumask *cpu_map, /* Set up domains for cpus specified by the cpu_map. */ for_each_cpu(i, cpu_map) { - cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), - cpu_map); - sd = NULL; sd = __build_allnodes_sched_domain(&d, cpu_map, attr, sd, i); sd = __build_node_sched_domain(&d, cpu_map, attr, sd, i); |