summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-07 14:09:54 +0200
committerIngo Molnar <mingo@elte.hu>2011-04-11 12:58:21 +0200
commit3bd65a80affb9768b91f03c56dba46ee79525f9b (patch)
tree7c62ee65e2fcd936c35eb6e6196e41b2313a332d /kernel/sched.c
parent3859173d43658d51a749bc0201b943922577d39c (diff)
downloadop-kernel-dev-3bd65a80affb9768b91f03c56dba46ee79525f9b.zip
op-kernel-dev-3bd65a80affb9768b91f03c56dba46ee79525f9b.tar.gz
sched: Simplify NODE/ALLNODES domain creation
Don't treat ALLNODES/NODE different for difference's sake. Simply always create the ALLNODES domain and let the sd_degenerate() checks kill it when its redundant. This simplifies the code flow. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110407122942.455464579@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 72c194c..d395fe5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6838,9 +6838,6 @@ struct sd_data {
};
struct s_data {
-#ifdef CONFIG_NUMA
- int sd_allnodes;
-#endif
cpumask_var_t nodemask;
cpumask_var_t send_covered;
struct sched_domain ** __percpu sd;
@@ -7112,30 +7109,35 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
}
}
-static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
- const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
+static struct sched_domain *__build_allnodes_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
{
struct sched_domain *sd = NULL;
#ifdef CONFIG_NUMA
- struct sched_domain *parent;
-
- d->sd_allnodes = 0;
- if (cpumask_weight(cpu_map) >
- SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
- sd = sd_init_ALLNODES(d, i);
- set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), cpu_map);
- d->sd_allnodes = 1;
- }
- parent = sd;
+ sd = sd_init_ALLNODES(d, i);
+ set_domain_attribute(sd, attr);
+ cpumask_copy(sched_domain_span(sd), cpu_map);
+ sd->parent = parent;
+ if (parent)
+ parent->child = sd;
+#endif
+ return sd;
+}
+static struct sched_domain *__build_node_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd = NULL;
+#ifdef CONFIG_NUMA
sd = sd_init_NODE(d, i);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
+ cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
sd->parent = parent;
if (parent)
parent->child = sd;
- cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
#endif
return sd;
}
@@ -7220,7 +7222,9 @@ static int build_sched_domains(const struct cpumask *cpu_map,
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
- sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
+ sd = NULL;
+ sd = __build_allnodes_sched_domain(&d, cpu_map, attr, sd, i);
+ sd = __build_node_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
OpenPOWER on IntegriCloud