summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-12-26 22:23:43 +1030
committerRusty Russell <rusty@rustcorp.com.au>2008-12-26 22:23:43 +1030
commitbe4d638c1597580ed2294d899d9f1a2cd10e462c (patch)
tree2a4800b70c1e2a1249915f74f8e45f62de319457 /kernel/sched.c
parent9be3eec2c83848a1ca57ebad13c63c95d0df01e2 (diff)
downloadop-kernel-dev-be4d638c1597580ed2294d899d9f1a2cd10e462c.zip
op-kernel-dev-be4d638c1597580ed2294d899d9f1a2cd10e462c.tar.gz
cpumask: Replace cpu_coregroup_map with cpu_coregroup_mask
cpu_coregroup_map returned a cpumask_t: it's going away. (Note, the sched part of this patch won't apply meaningfully to the sched tree, but I'm posting it to show the goal). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Ingo Molnar <mingo@redhat.com>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d2d16d1..4292923 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7119,7 +7119,7 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
{
int group;
#ifdef CONFIG_SCHED_MC
- *mask = cpu_coregroup_map(cpu);
+ *mask = *cpu_coregroup_mask(cpu);
cpus_and(*mask, *mask, *cpu_map);
group = first_cpu(*mask);
#elif defined(CONFIG_SCHED_SMT)
@@ -7485,7 +7485,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
sd = &per_cpu(core_domains, i);
SD_INIT(sd, MC);
set_domain_attribute(sd, attr);
- sd->span = cpu_coregroup_map(i);
+ sd->span = *cpu_coregroup_mask(i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
@@ -7528,7 +7528,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
SCHED_CPUMASK_VAR(this_core_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);
- *this_core_map = cpu_coregroup_map(i);
+ *this_core_map = *cpu_coregroup_mask(i);
cpus_and(*this_core_map, *this_core_map, *cpu_map);
if (i != first_cpu(*this_core_map))
continue;
OpenPOWER on IntegriCloud