summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-14 09:58:24 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-14 09:58:24 -0800
commitd0316554d3586cbea60592a41391b5def2553d6f (patch)
tree5e7418f0bacbc68cec5dfd1541e03eb56870aa02 /arch/x86
parentfb0bbb92d42d5bd0ab224605444efdfed06d6934 (diff)
parent51e99be00ce2713cbb841cedc997cafa6e26c7f4 (diff)
downloadop-kernel-dev-d0316554d3586cbea60592a41391b5def2553d6f.zip
op-kernel-dev-d0316554d3586cbea60592a41391b5def2553d6f.tar.gz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits) m68k: rename global variable vmalloc_end to m68k_vmalloc_end percpu: add missing per_cpu_ptr_to_phys() definition for UP percpu: Fix kdump failure if booted with percpu_alloc=page percpu: make misc percpu symbols unique percpu: make percpu symbols in ia64 unique percpu: make percpu symbols in powerpc unique percpu: make percpu symbols in x86 unique percpu: make percpu symbols in xen unique percpu: make percpu symbols in cpufreq unique percpu: make percpu symbols in oprofile unique percpu: make percpu symbols in tracer unique percpu: make percpu symbols under kernel/ and mm/ unique percpu: remove some sparse warnings percpu: make alloc_percpu() handle array types vmalloc: fix use of non-existent percpu variable in put_cpu_var() this_cpu: Use this_cpu_xx in trace_functions_graph.c this_cpu: Use this_cpu_xx for ftrace this_cpu: Use this_cpu_xx in nmi handling this_cpu: Use this_cpu operations in RCU this_cpu: Use this_cpu ops for VM statistics ... Fix up trivial (famous last words) global per-cpu naming conflicts in arch/x86/kvm/svm.c mm/slab.c
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/percpu.h104
-rw-r--r--arch/x86/kernel/apic/nmi.c8
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c30
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c54
-rw-r--r--arch/x86/kernel/ds.c4
-rw-r--r--arch/x86/kvm/svm.c64
-rw-r--r--arch/x86/xen/smp.c41
-rw-r--r--arch/x86/xen/time.c24
10 files changed, 221 insertions, 144 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index b65a36d..0c44196 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
#define percpu_to_op(op, var, val) \
do { \
- typedef typeof(var) T__; \
+ typedef typeof(var) pto_T__; \
if (0) { \
- T__ tmp__; \
- tmp__ = (val); \
+ pto_T__ pto_tmp__; \
+ pto_tmp__ = (val); \
} \
switch (sizeof(var)) { \
case 1: \
asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
- : "qi" ((T__)(val))); \
+ : "qi" ((pto_T__)(val))); \
break; \
case 2: \
asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)(val))); \
+ : "ri" ((pto_T__)(val))); \
break; \
case 4: \
asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)(val))); \
+ : "ri" ((pto_T__)(val))); \
break; \
case 8: \
asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
- : "re" ((T__)(val))); \
+ : "re" ((pto_T__)(val))); \
break; \
default: __bad_percpu_size(); \
} \
@@ -106,31 +106,31 @@ do { \
#define percpu_from_op(op, var, constraint) \
({ \
- typeof(var) ret__; \
+ typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
- : "=q" (ret__) \
+ : "=q" (pfo_ret__) \
: constraint); \
break; \
case 2: \
asm(op "w "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
case 4: \
asm(op "l "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
case 8: \
asm(op "q "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
default: __bad_percpu_size(); \
} \
- ret__; \
+ pfo_ret__; \
})
/*
@@ -153,6 +153,84 @@ do { \
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
+#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+
+#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+/*
+ * Per cpu atomic 64 bit operations are only available under 64 bit.
+ * 32 bit must fall back to generic operations.
+ */
+#ifdef CONFIG_X86_64
+#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#endif
+
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 6389432..0159a69 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
*/
static DEFINE_PER_CPU(unsigned, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog(void)
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
- local_inc(&__get_cpu_var(alert_counter));
- if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+ __this_cpu_inc(per_cpu_var(alert_counter));
+ if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
/*
* die_nmi will return ONLY if NOTIFY_STOP happens..
*/
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- local_set(&__get_cpu_var(alert_counter), 0);
+ __this_cpu_write(per_cpu_var(alert_counter), 0);
}
/* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c1afa99..20399b7 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void)
void __cpuinit cpu_init(void)
{
- struct orig_ist *orig_ist;
+ struct orig_ist *oist;
struct task_struct *me;
struct tss_struct *t;
unsigned long v;
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void)
cpu = stack_smp_processor_id();
t = &per_cpu(init_tss, cpu);
- orig_ist = &per_cpu(orig_ist, cpu);
+ oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void)
/*
* set up and load the per-CPU TSS
*/
- if (!orig_ist->ist[0]) {
+ if (!oist->ist[0]) {
char *estacks = per_cpu(exception_stacks, cpu);
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
estacks += exception_stack_sizes[v];
- orig_ist->ist[v] = t->x86_tss.ist[v] =
+ oist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
}
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index dca325c..b368cd8 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,9 +30,9 @@
#include <asm/apic.h>
#include <asm/desc.h>
-static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
-static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
-static DEFINE_PER_CPU(int, cpu_priv_count);
+static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
+static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
+static DEFINE_PER_CPU(int, cpud_priv_count);
static DEFINE_MUTEX(cpu_debug_lock);
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
/* Already intialized */
if (file == CPU_INDEX_BIT)
- if (per_cpu(cpu_arr[type].init, cpu))
+ if (per_cpu(cpud_arr[type].init, cpu))
return 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
priv->reg = reg;
priv->file = file;
mutex_lock(&cpu_debug_lock);
- per_cpu(priv_arr[type], cpu) = priv;
- per_cpu(cpu_priv_count, cpu)++;
+ per_cpu(cpud_priv_arr[type], cpu) = priv;
+ per_cpu(cpud_priv_count, cpu)++;
mutex_unlock(&cpu_debug_lock);
if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
dentry, (void *)priv, &cpu_fops);
else {
debugfs_create_file(cpu_base[type].name, S_IRUGO,
- per_cpu(cpu_arr[type].dentry, cpu),
+ per_cpu(cpud_arr[type].dentry, cpu),
(void *)priv, &cpu_fops);
mutex_lock(&cpu_debug_lock);
- per_cpu(cpu_arr[type].init, cpu) = 1;
+ per_cpu(cpud_arr[type].init, cpu) = 1;
mutex_unlock(&cpu_debug_lock);
}
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
if (!is_typeflag_valid(cpu, cpu_base[type].flag))
continue;
cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
- per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
+ per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
if (type < CPU_TSS_BIT)
err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
err = cpu_init_allreg(cpu, cpu_dentry);
pr_info("cpu%d(%d) debug files %d\n",
- cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
- if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
+ cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
+ if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
pr_err("Register files count %d exceeds limit %d\n",
- per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
- per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
+ per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
+ per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
err = -ENFILE;
}
if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
debugfs_remove_recursive(cpu_debugfs_dir);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
- for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
- kfree(per_cpu(priv_arr[i], cpu));
+ for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
+ kfree(per_cpu(cpud_priv_arr[i], cpu));
}
module_init(cpu_debug_init);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index d2e7c77..f28decf 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
unsigned int cpu_feature;
};
-static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
+static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-static DEFINE_PER_CPU(struct aperfmperf, old_perf);
+static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance *acpi_perf_data;
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
if (unlikely(cpumask_empty(mask)))
return 0;
- switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
+ switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
- perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
+ perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width;
break;
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
- ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
- per_cpu(old_perf, cpu) = perf;
+ ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
+ per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
unsigned int freq;
unsigned int cached_freq;
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
struct acpi_processor_performance *perf;
struct cpufreq_freqs freqs;
struct drv_cmd cmd;
@@ -416,7 +416,7 @@ out:
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_verify\n");
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM;
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
- per_cpu(drv_data, cpu) = data;
+ per_cpu(acfreq_data, cpu) = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
@@ -725,20 +725,20 @@ err_unreg:
acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
- per_cpu(drv_data, cpu) = NULL;
+ per_cpu(acfreq_data, cpu) = NULL;
return result;
}
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_cpu_exit\n");
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
- per_cpu(drv_data, policy->cpu) = NULL;
+ per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
kfree(data);
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_resume\n");
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6c40f6b..0c06bca 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
#ifdef CONFIG_SYSFS
/* pointer to _cpuid4_info array (for each cache leaf) */
-static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
-#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
+static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
+#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -513,7 +513,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
struct cpuinfo_x86 *d;
for_each_online_cpu(i) {
- if (!per_cpu(cpuid4_info, i))
+ if (!per_cpu(ici_cpuid4_info, i))
continue;
d = &cpu_data(i);
this_leaf = CPUID4_INFO_IDX(i, index);
@@ -535,7 +535,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
c->apicid >> index_msb) {
cpumask_set_cpu(i,
to_cpumask(this_leaf->shared_cpu_map));
- if (i != cpu && per_cpu(cpuid4_info, i)) {
+ if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
sibling_leaf =
CPUID4_INFO_IDX(i, index);
cpumask_set_cpu(cpu, to_cpumask(
@@ -574,8 +574,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i);
- kfree(per_cpu(cpuid4_info, cpu));
- per_cpu(cpuid4_info, cpu) = NULL;
+ kfree(per_cpu(ici_cpuid4_info, cpu));
+ per_cpu(ici_cpuid4_info, cpu) = NULL;
}
static int
@@ -614,15 +614,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
if (num_cache_leaves == 0)
return -ENOENT;
- per_cpu(cpuid4_info, cpu) = kzalloc(
+ per_cpu(ici_cpuid4_info, cpu) = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
- if (per_cpu(cpuid4_info, cpu) == NULL)
+ if (per_cpu(ici_cpuid4_info, cpu) == NULL)
return -ENOMEM;
smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
if (retval) {
- kfree(per_cpu(cpuid4_info, cpu));
- per_cpu(cpuid4_info, cpu) = NULL;
+ kfree(per_cpu(ici_cpuid4_info, cpu));
+ per_cpu(ici_cpuid4_info, cpu) = NULL;
}
return retval;
@@ -634,7 +634,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
/* pointer to kobject for cpuX/cache */
-static DEFINE_PER_CPU(struct kobject *, cache_kobject);
+static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
struct _index_kobject {
struct kobject kobj;
@@ -643,8 +643,8 @@ struct _index_kobject {
};
/* pointer to array of kobjects for cpuX/cache/indexY */
-static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
-#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
+static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
+#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
#define show_one_plus(file_name, object, val) \
static ssize_t show_##file_name \
@@ -863,10 +863,10 @@ static struct kobj_type ktype_percpu_entry = {
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{
- kfree(per_cpu(cache_kobject, cpu));
- kfree(per_cpu(index_kobject, cpu));
- per_cpu(cache_kobject, cpu) = NULL;
- per_cpu(index_kobject, cpu) = NULL;
+ kfree(per_cpu(ici_cache_kobject, cpu));
+ kfree(per_cpu(ici_index_kobject, cpu));
+ per_cpu(ici_cache_kobject, cpu) = NULL;
+ per_cpu(ici_index_kobject, cpu) = NULL;
free_cache_attributes(cpu);
}
@@ -882,14 +882,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return err;
/* Allocate all required memory */
- per_cpu(cache_kobject, cpu) =
+ per_cpu(ici_cache_kobject, cpu) =
kzalloc(sizeof(struct kobject), GFP_KERNEL);
- if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
+ if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
goto err_out;
- per_cpu(index_kobject, cpu) = kzalloc(
+ per_cpu(ici_index_kobject, cpu) = kzalloc(
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
- if (unlikely(per_cpu(index_kobject, cpu) == NULL))
+ if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
goto err_out;
return 0;
@@ -913,7 +913,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
if (unlikely(retval < 0))
return retval;
- retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
+ retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
&ktype_percpu_entry,
&sys_dev->kobj, "%s", "cache");
if (retval < 0) {
@@ -927,12 +927,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_object->index = i;
retval = kobject_init_and_add(&(this_object->kobj),
&ktype_cache,
- per_cpu(cache_kobject, cpu),
+ per_cpu(ici_cache_kobject, cpu),
"index%1lu", i);
if (unlikely(retval)) {
for (j = 0; j < i; j++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
- kobject_put(per_cpu(cache_kobject, cpu));
+ kobject_put(per_cpu(ici_cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
return retval;
}
@@ -940,7 +940,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
- kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
+ kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
return 0;
}
@@ -949,7 +949,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i;
- if (per_cpu(cpuid4_info, cpu) == NULL)
+ if (per_cpu(ici_cpuid4_info, cpu) == NULL)
return;
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
return;
@@ -957,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
for (i = 0; i < num_cache_leaves; i++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
- kobject_put(per_cpu(cache_kobject, cpu));
+ kobject_put(per_cpu(ici_cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
}
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ef42a03..1c47390 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -265,13 +265,13 @@ struct ds_context {
int cpu;
};
-static DEFINE_PER_CPU(struct ds_context *, cpu_context);
+static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
{
struct ds_context **p_context =
- (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
+ (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
struct ds_context *context = NULL;
struct ds_context *new_context = NULL;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3de0b37..1d9b338 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
static int svm_hardware_enable(void *garbage)
{
- struct svm_cpu_data *svm_data;
+ struct svm_cpu_data *sd;
uint64_t efer;
struct descriptor_table gdt_descr;
struct desc_struct *gdt;
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
me);
return -EINVAL;
}
- svm_data = per_cpu(svm_data, me);
+ sd = per_cpu(svm_data, me);
- if (!svm_data) {
+ if (!sd) {
printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
me);
return -EINVAL;
}
- svm_data->asid_generation = 1;
- svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
- svm_data->next_asid = svm_data->max_asid + 1;
+ sd->asid_generation = 1;
+ sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
+ sd->next_asid = sd->max_asid + 1;
kvm_get_gdt(&gdt_descr);
gdt = (struct desc_struct *)gdt_descr.base;
- svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+ sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
wrmsrl(MSR_EFER, efer | EFER_SVME);
- wrmsrl(MSR_VM_HSAVE_PA,
- page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+ wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
return 0;
}
static void svm_cpu_uninit(int cpu)
{
- struct svm_cpu_data *svm_data
- = per_cpu(svm_data, raw_smp_processor_id());
+ struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
- if (!svm_data)
+ if (!sd)
return;
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
- __free_page(svm_data->save_area);
- kfree(svm_data);
+ __free_page(sd->save_area);
+ kfree(sd);
}
static int svm_cpu_init(int cpu)
{
- struct svm_cpu_data *svm_data;
+ struct svm_cpu_data *sd;
int r;
- svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
- if (!svm_data)
+ sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
+ if (!sd)
return -ENOMEM;
- svm_data->cpu = cpu;
- svm_data->save_area = alloc_page(GFP_KERNEL);
+ sd->cpu = cpu;
+ sd->save_area = alloc_page(GFP_KERNEL);
r = -ENOMEM;
- if (!svm_data->save_area)
+ if (!sd->save_area)
goto err_1;
- per_cpu(svm_data, cpu) = svm_data;
+ per_cpu(svm_data, cpu) = sd;
return 0;
err_1:
- kfree(svm_data);
+ kfree(sd);
return r;
}
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
#endif
}
-static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
{
- if (svm_data->next_asid > svm_data->max_asid) {
- ++svm_data->asid_generation;
- svm_data->next_asid = 1;
+ if (sd->next_asid > sd->max_asid) {
+ ++sd->asid_generation;
+ sd->next_asid = 1;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
- svm->asid_generation = svm_data->asid_generation;
- svm->vmcb->control.asid = svm_data->next_asid++;
+ svm->asid_generation = sd->asid_generation;
+ svm->vmcb->control.asid = sd->next_asid++;
}
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
{
int cpu = raw_smp_processor_id();
- struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
- svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+ sd->tss_desc->type = 9; /* available 32/64-bit TSS */
load_TR_desc();
}
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
{
int cpu = raw_smp_processor_id();
- struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
/* FIXME: handle wraparound of asid_generation */
- if (svm->asid_generation != svm_data->asid_generation)
- new_asid(svm, svm_data);
+ if (svm->asid_generation != sd->asid_generation)
+ new_asid(svm, sd);
}
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 64757c0..563d205 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,10 +35,10 @@
cpumask_var_t xen_cpu_initialized_map;
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static DEFINE_PER_CPU(int, callfuncsingle_irq);
-static DEFINE_PER_CPU(int, debug_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq);
+static DEFINE_PER_CPU(int, xen_callfunc_irq);
+static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
+static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(resched_irq, cpu) = rc;
+ per_cpu(xen_resched_irq, cpu) = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(callfunc_irq, cpu) = rc;
+ per_cpu(xen_callfunc_irq, cpu) = rc;
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
debug_name, NULL);
if (rc < 0)
goto fail;
- per_cpu(debug_irq, cpu) = rc;
+ per_cpu(xen_debug_irq, cpu) = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(callfuncsingle_irq, cpu) = rc;
+ per_cpu(xen_callfuncsingle_irq, cpu) = rc;
return 0;
fail:
- if (per_cpu(resched_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
- if (per_cpu(callfunc_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
- if (per_cpu(debug_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
- if (per_cpu(callfuncsingle_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+ if (per_cpu(xen_resched_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+ if (per_cpu(xen_callfunc_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ if (per_cpu(xen_debug_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
+ NULL);
return rc;
}
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10);
}
- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 9d1f853..0d3f07c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,14 +31,14 @@
#define NS_PER_TICK (1000000000LL / HZ)
/* runstate info updated by Xen */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
/* snapshots of runstate info */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
/* unused ns of stolen and blocked time */
-static DEFINE_PER_CPU(u64, residual_stolen);
-static DEFINE_PER_CPU(u64, residual_blocked);
+static DEFINE_PER_CPU(u64, xen_residual_stolen);
+static DEFINE_PER_CPU(u64, xen_residual_blocked);
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
BUG_ON(preemptible());
- state = &__get_cpu_var(runstate);
+ state = &__get_cpu_var(xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
/* return true when a vcpu could run but has no real cpu to run on */
bool xen_vcpu_stolen(int vcpu)
{
- return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
+ return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
}
void xen_setup_runstate_info(int cpu)
{
struct vcpu_register_runstate_memory_area area;
- area.addr.v = &per_cpu(runstate, cpu);
+ area.addr.v = &per_cpu(xen_runstate, cpu);
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
cpu, &area))
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
WARN_ON(state.state != RUNSTATE_running);
- snap = &__get_cpu_var(runstate_snapshot);
+ snap = &__get_cpu_var(xen_runstate_snapshot);
/* work out how much time the VCPU has not been runn*ing* */
blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
/* Add the appropriate number of ticks of stolen time,
including any left-overs from last time. */
- stolen = runnable + offline + __get_cpu_var(residual_stolen);
+ stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
if (stolen < 0)
stolen = 0;
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
- __get_cpu_var(residual_stolen) = stolen;
+ __get_cpu_var(xen_residual_stolen) = stolen;
account_steal_ticks(ticks);
/* Add the appropriate number of ticks of blocked time,
including any left-overs from last time. */
- blocked += __get_cpu_var(residual_blocked);
+ blocked += __get_cpu_var(xen_residual_blocked);
if (blocked < 0)
blocked = 0;
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
- __get_cpu_var(residual_blocked) = blocked;
+ __get_cpu_var(xen_residual_blocked) = blocked;
account_idle_ticks(ticks);
}
OpenPOWER on IntegriCloud