diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-21 21:07:16 +1030 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2008-12-05 15:20:10 -0500 |
commit | 9963d1aad40946b1b6d34f9bee8d8a1b9032ae22 (patch) | |
tree | de3f4ef50a8aefc180b874d20d3bdcf338d031d9 | |
parent | bbeba4c35c252b2e961f09ce6ebe76b2cd5e7e3e (diff) | |
download | op-kernel-dev-9963d1aad40946b1b6d34f9bee8d8a1b9032ae22.zip op-kernel-dev-9963d1aad40946b1b6d34f9bee8d8a1b9032ae22.tar.gz |
[CPUFREQ] clean up speedstep-centrino and reduce cpumask_t usage
Impact: cleanup
1) The #ifdef CONFIG_HOTPLUG_CPU seems unnecessary these days.
2) The loop can simply skip over offline cpus, rather than creating a tmp mask.
3) set_mask is set to either a single cpu or all online cpus in a policy.
Since it's just used for set_cpus_allowed(), any offline cpus in a policy
don't matter, so we can just use cpumask_of_cpu() or the policy->cpus.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Dave Jones <davej@redhat.com>
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 51 |
1 files changed, 24 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 3b5f064..f0ea6fa 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -459,9 +459,7 @@ static int centrino_verify (struct cpufreq_policy *policy) * Sets a new CPUFreq policy. */ struct allmasks { - cpumask_t online_policy_cpus; cpumask_t saved_mask; - cpumask_t set_mask; cpumask_t covered_cpus; }; @@ -475,9 +473,7 @@ static int centrino_target (struct cpufreq_policy *policy, int retval = 0; unsigned int j, k, first_cpu, tmp; CPUMASK_ALLOC(allmasks); - CPUMASK_PTR(online_policy_cpus, allmasks); CPUMASK_PTR(saved_mask, allmasks); - CPUMASK_PTR(set_mask, allmasks); CPUMASK_PTR(covered_cpus, allmasks); if (unlikely(allmasks == NULL)) @@ -497,30 +493,28 @@ static int centrino_target (struct cpufreq_policy *policy, goto out; } -#ifdef CONFIG_HOTPLUG_CPU - /* cpufreq holds the hotplug lock, so we are safe from here on */ - cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus); -#else - *online_policy_cpus = policy->cpus; -#endif - *saved_mask = current->cpus_allowed; first_cpu = 1; cpus_clear(*covered_cpus); - for_each_cpu_mask_nr(j, *online_policy_cpus) { + for_each_cpu_mask_nr(j, policy->cpus) { + const cpumask_t *mask; + + /* cpufreq holds the hotplug lock, so we are safe here */ + if (!cpu_online(j)) + continue; + /* * Support for SMP systems. * Make sure we are running on CPU that wants to change freq */ - cpus_clear(*set_mask); if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - cpus_or(*set_mask, *set_mask, *online_policy_cpus); + mask = &policy->cpus; else - cpu_set(j, *set_mask); + mask = &cpumask_of_cpu(j); - set_cpus_allowed_ptr(current, set_mask); + set_cpus_allowed_ptr(current, mask); preempt_disable(); - if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) { + if (unlikely(!cpu_isset(smp_processor_id(), *mask))) { dprintk("couldn't limit to CPUs in this domain\n"); retval = -EAGAIN; if (first_cpu) { @@ -548,7 +542,9 @@ static int centrino_target (struct cpufreq_policy *policy, dprintk("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); - for_each_cpu_mask_nr(k, *online_policy_cpus) { + for_each_cpu_mask_nr(k, policy->cpus) { + if (!cpu_online(k)) + continue; freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -571,7 +567,9 @@ static int centrino_target (struct cpufreq_policy *policy, preempt_enable(); } - for_each_cpu_mask_nr(k, *online_policy_cpus) { + for_each_cpu_mask_nr(k, policy->cpus) { + if (!cpu_online(k)) + continue; freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -584,18 +582,17 @@ static int centrino_target (struct cpufreq_policy *policy, * Best effort undo.. */ - if (!cpus_empty(*covered_cpus)) - for_each_cpu_mask_nr(j, *covered_cpus) { - set_cpus_allowed_ptr(current, - &cpumask_of_cpu(j)); - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - } + for_each_cpu_mask_nr(j, *covered_cpus) { + set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); + wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); + } tmp = freqs.new; freqs.new = freqs.old; freqs.old = tmp; - for_each_cpu_mask_nr(j, *online_policy_cpus) { - freqs.cpu = j; + for_each_cpu_mask_nr(j, policy->cpus) { + if (!cpu_online(j)) + continue; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } |