summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 18:37:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 18:37:44 -0700
commit26dcce0fabbef75ae426461edf21b5030bad60f3 (patch)
tree56c64fa47dc29f7ea5a8fd0cab0459fb0a05a2bc /drivers
parentd7b6de14a0ef8a376f9d57b867545b47302b7bfb (diff)
parenteb6a12c2428d21a9f3e0f1a50e927d5fd80fc3d0 (diff)
downloadop-kernel-dev-26dcce0fabbef75ae426461edf21b5030bad60f3.zip
op-kernel-dev-26dcce0fabbef75ae426461edf21b5030bad60f3.tar.gz
Merge branch 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (31 commits) NR_CPUS: Replace NR_CPUS in speedstep-centrino.c cpumask: Provide a generic set of CPUMASK_ALLOC macros, FIXUP NR_CPUS: Replace NR_CPUS in cpufreq userspace routines NR_CPUS: Replace per_cpu(..., smp_processor_id()) with __get_cpu_var NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genapic_flat_64.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genx2apic_uv_x.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/proc.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/mcheck/mce_64.c cpumask: Optimize cpumask_of_cpu in lib/smp_processor_id.c, fix cpumask: Use optimized CPUMASK_ALLOC macros in the centrino_target cpumask: Provide a generic set of CPUMASK_ALLOC macros cpumask: Optimize cpumask_of_cpu in lib/smp_processor_id.c cpumask: Optimize cpumask_of_cpu in kernel/time/tick-common.c cpumask: Optimize cpumask_of_cpu in drivers/misc/sgi-xp/xpc_main.c cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/ldt.c cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/io_apic_64.c cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr Revert "cpumask: introduce new APIs" cpumask: make for_each_cpu_mask a bit smaller net: Pass reference to cpumask variable in net/sunrpc/svc.c ... Fix up trivial conflicts in drivers/cpufreq/cpufreq.c manually
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_throttling.c17
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c79
-rw-r--r--drivers/firmware/dcdbas.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c3
9 files changed, 72 insertions, 58 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0622ace0..a2c3f9c 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
cpumask_t saved_mask;
+ cpumask_of_cpu_ptr_declare(new_mask);
int ret;
if (!pr)
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
* Migrate task to the cpu pointed by pr.
*/
saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ cpumask_of_cpu_ptr_next(new_mask, pr->id);
+ set_cpus_allowed_ptr(current, new_mask);
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
set_cpus_allowed_ptr(current, &saved_mask);
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
cpumask_t saved_mask;
+ cpumask_of_cpu_ptr_declare(new_mask);
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
- for_each_cpu_mask(i, online_throttling_cpus) {
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ cpumask_of_cpu_ptr_next(new_mask, pr->id);
+ set_cpus_allowed_ptr(current, new_mask);
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state);
} else {
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it is necessary to set T-state for every affected
* cpus.
*/
- for_each_cpu_mask(i, online_throttling_cpus) {
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue;
}
t_state.cpu = i;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+ cpumask_of_cpu_ptr_next(new_mask, i);
+ set_cpus_allowed_ptr(current, new_mask);
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state);
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
- for_each_cpu_mask(i, online_throttling_cpus) {
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 20537d5..64f5d54 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -121,14 +121,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
{ \
return print_cpus_map(buf, &cpu_##type##_map); \
} \
-struct sysdev_class_attribute attr_##type##_map = \
+static struct sysdev_class_attribute attr_##type##_map = \
_SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
print_cpus_func(online);
print_cpus_func(possible);
print_cpus_func(present);
-struct sysdev_class_attribute *cpu_state_attr[] = {
+static struct sysdev_class_attribute *cpu_state_attr[] = {
&attr_online_map,
&attr_possible_map,
&attr_present_map,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ee1df0d..8d6a3ff 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
ssize_t i = 0;
unsigned int cpu;
- for_each_cpu_mask(cpu, mask) {
+ for_each_cpu_mask_nr(cpu, mask) {
if (i)
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
}
#endif
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
if (cpu == j)
continue;
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
}
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(policy_cpu, j) = policy->cpu;
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
/* symlink affected CPUs */
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
if (j == cpu)
continue;
if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu_mask(j, policy->cpus)
+ for_each_cpu_mask_nr(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
* the sysfs links afterwards.
*/
if (unlikely(cpus_weight(data->cpus) > 1)) {
- for_each_cpu_mask(j, data->cpus) {
+ for_each_cpu_mask_nr(j, data->cpus) {
if (j == cpu)
continue;
per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (unlikely(cpus_weight(data->cpus) > 1)) {
- for_each_cpu_mask(j, data->cpus) {
+ for_each_cpu_mask_nr(j, data->cpus) {
if (j == cpu)
continue;
dprintk("removing link for cpu %u\n", j);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5d3a04b..fe565ee 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return rc;
}
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d2af20d..33855cb 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
/* Get Idle Time */
idle_ticks = UINT_MAX;
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
cputime64_t total_idle_ticks;
unsigned int tmp_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return rc;
}
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index cb2ac01..32244aa 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -30,16 +30,18 @@
/**
* A few values needed by the userspace governor
*/
-static unsigned int cpu_max_freq[NR_CPUS];
-static unsigned int cpu_min_freq[NR_CPUS];
-static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */
-static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */
-static unsigned int cpu_is_managed[NR_CPUS];
+static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
+static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
+static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
+static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
+ userspace */
+static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
static DEFINE_MUTEX (userspace_mutex);
static int cpus_using_userspace_governor;
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
+#define dprintk(msg...) \
+ cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
/* keep track of frequency transitions */
static int
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
{
struct cpufreq_freqs *freq = data;
- if (!cpu_is_managed[freq->cpu])
+ if (!per_cpu(cpu_is_managed, freq->cpu))
return 0;
dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
freq->cpu, freq->new);
- cpu_cur_freq[freq->cpu] = freq->new;
+ per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
return 0;
}
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
mutex_lock(&userspace_mutex);
- if (!cpu_is_managed[policy->cpu])
+ if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
- cpu_set_freq[policy->cpu] = freq;
+ per_cpu(cpu_set_freq, policy->cpu) = freq;
- if (freq < cpu_min_freq[policy->cpu])
- freq = cpu_min_freq[policy->cpu];
- if (freq > cpu_max_freq[policy->cpu])
- freq = cpu_max_freq[policy->cpu];
+ if (freq < per_cpu(cpu_min_freq, policy->cpu))
+ freq = per_cpu(cpu_min_freq, policy->cpu);
+ if (freq > per_cpu(cpu_max_freq, policy->cpu))
+ freq = per_cpu(cpu_max_freq, policy->cpu);
/*
* We're safe from concurrent calls to ->target() here
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
{
- return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]);
+ return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
}
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
}
cpus_using_userspace_governor++;
- cpu_is_managed[cpu] = 1;
- cpu_min_freq[cpu] = policy->min;
- cpu_max_freq[cpu] = policy->max;
- cpu_cur_freq[cpu] = policy->cur;
- cpu_set_freq[cpu] = policy->cur;
- dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
+ per_cpu(cpu_is_managed, cpu) = 1;
+ per_cpu(cpu_min_freq, cpu) = policy->min;
+ per_cpu(cpu_max_freq, cpu) = policy->max;
+ per_cpu(cpu_cur_freq, cpu) = policy->cur;
+ per_cpu(cpu_set_freq, cpu) = policy->cur;
+ dprintk("managing cpu %u started "
+ "(%u - %u kHz, currently %u kHz)\n",
+ cpu,
+ per_cpu(cpu_min_freq, cpu),
+ per_cpu(cpu_max_freq, cpu),
+ per_cpu(cpu_cur_freq, cpu));
mutex_unlock(&userspace_mutex);
break;
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
CPUFREQ_TRANSITION_NOTIFIER);
}
- cpu_is_managed[cpu] = 0;
- cpu_min_freq[cpu] = 0;
- cpu_max_freq[cpu] = 0;
- cpu_set_freq[cpu] = 0;
+ per_cpu(cpu_is_managed, cpu) = 0;
+ per_cpu(cpu_min_freq, cpu) = 0;
+ per_cpu(cpu_max_freq, cpu) = 0;
+ per_cpu(cpu_set_freq, cpu) = 0;
dprintk("managing cpu %u stopped\n", cpu);
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_LIMITS:
mutex_lock(&userspace_mutex);
- dprintk("limit event for cpu %u: %u - %u kHz,"
+ dprintk("limit event for cpu %u: %u - %u kHz, "
"currently %u kHz, last set to %u kHz\n",
cpu, policy->min, policy->max,
- cpu_cur_freq[cpu], cpu_set_freq[cpu]);
- if (policy->max < cpu_set_freq[cpu]) {
+ per_cpu(cpu_cur_freq, cpu),
+ per_cpu(cpu_set_freq, cpu));
+ if (policy->max < per_cpu(cpu_set_freq, cpu)) {
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- }
- else if (policy->min > cpu_set_freq[cpu]) {
+ } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
- }
- else {
- __cpufreq_driver_target(policy, cpu_set_freq[cpu],
+ } else {
+ __cpufreq_driver_target(policy,
+ per_cpu(cpu_set_freq, cpu),
CPUFREQ_RELATION_L);
}
- cpu_min_freq[cpu] = policy->min;
- cpu_max_freq[cpu] = policy->max;
- cpu_cur_freq[cpu] = policy->cur;
+ per_cpu(cpu_min_freq, cpu) = policy->min;
+ per_cpu(cpu_max_freq, cpu) = policy->max;
+ per_cpu(cpu_cur_freq, cpu) = policy->cur;
mutex_unlock(&userspace_mutex);
break;
}
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 25918f7..0b624e9 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
static int smi_request(struct smi_cmd *smi_cmd)
{
cpumask_t old_mask;
+ cpumask_of_cpu_ptr(new_mask, 0);
int ret = 0;
if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
/* SMI requires CPU 0 */
old_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
+ set_cpus_allowed_ptr(current, new_mask);
if (smp_processor_id() != 0) {
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
__func__);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 0792d93..7a64aa9 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags);
- cpu = next_cpu(pool->last_cpu, cpu_online_map);
- if (cpu == NR_CPUS)
+ cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+ if (cpu >= nr_cpu_ids)
cpu = first_cpu(cpu_online_map);
pool->last_cpu = cpu;
spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 08256ed..579b01f 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore)
int last_IRQ_count = 0;
int new_IRQ_count;
int force_IRQ = 0;
+ cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
/* this thread was marked active by xpc_hb_init() */
- set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
+ set_cpus_allowed_ptr(current, cpumask);
/* set our heartbeating to other partitions into motion */
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
OpenPOWER on IntegriCloud