summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/lib/msr-on-cpu.c73
-rw-r--r--include/asm-i386/msr.h12
-rw-r--r--include/asm-x86_64/msr.h11
3 files changed, 89 insertions, 7 deletions
diff --git a/arch/i386/lib/msr-on-cpu.c b/arch/i386/lib/msr-on-cpu.c
index 1c46bda..7767962 100644
--- a/arch/i386/lib/msr-on-cpu.c
+++ b/arch/i386/lib/msr-on-cpu.c
@@ -6,6 +6,7 @@
struct msr_info {
u32 msr_no;
u32 l, h;
+ int err;
};
static void __rdmsr_on_cpu(void *info)
@@ -15,20 +16,38 @@ static void __rdmsr_on_cpu(void *info)
rdmsr(rv->msr_no, rv->l, rv->h);
}
-void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+static void __rdmsr_safe_on_cpu(void *info)
{
+ struct msr_info *rv = info;
+
+ rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
+}
+
+static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
+{
+ int err = 0;
preempt_disable();
if (smp_processor_id() == cpu)
- rdmsr(msr_no, *l, *h);
+ if (safe)
+ err = rdmsr_safe(msr_no, l, h);
+ else
+ rdmsr(msr_no, *l, *h);
else {
struct msr_info rv;
rv.msr_no = msr_no;
- smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
+ if (safe) {
+ smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
+ &rv, 0, 1);
+ err = rv.err;
+ } else {
+ smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
+ }
*l = rv.l;
*h = rv.h;
}
preempt_enable();
+ return err;
}
static void __wrmsr_on_cpu(void *info)
@@ -38,21 +57,63 @@ static void __wrmsr_on_cpu(void *info)
wrmsr(rv->msr_no, rv->l, rv->h);
}
-void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+static void __wrmsr_safe_on_cpu(void *info)
{
+ struct msr_info *rv = info;
+
+ rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
+}
+
+static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
+{
+ int err = 0;
preempt_disable();
if (smp_processor_id() == cpu)
- wrmsr(msr_no, l, h);
+ if (safe)
+ err = wrmsr_safe(msr_no, l, h);
+ else
+ wrmsr(msr_no, l, h);
else {
struct msr_info rv;
rv.msr_no = msr_no;
rv.l = l;
rv.h = h;
- smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
+ if (safe) {
+ smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
+ &rv, 0, 1);
+ err = rv.err;
+ } else {
+ smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
+ }
}
preempt_enable();
+ return err;
+}
+
+void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
+}
+
+void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
+}
+
+/* These "safe" variants are slower and should be used when the target MSR
+ may not actually exist. */
+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ return _wrmsr_on_cpu(cpu, msr_no, l, h, 1);
+}
+
+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ return _rdmsr_on_cpu(cpu, msr_no, l, h, 1);
}
EXPORT_SYMBOL(rdmsr_on_cpu);
EXPORT_SYMBOL(wrmsr_on_cpu);
+EXPORT_SYMBOL(rdmsr_safe_on_cpu);
+EXPORT_SYMBOL(wrmsr_safe_on_cpu);
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 9559894..26861df 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -77,7 +77,7 @@ static inline unsigned long long native_read_pmc(void)
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-
+#include <linux/errno.h>
/*
* Access to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
@@ -148,6 +148,8 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#ifdef CONFIG_SMP
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else /* CONFIG_SMP */
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
@@ -157,6 +159,14 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
wrmsr(msr_no, l, h);
}
+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ return rdmsr_safe(msr_no, l, h);
+}
+static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ return wrmsr_safe(msr_no, l, h);
+}
#endif /* CONFIG_SMP */
#endif
#endif
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index a524f03..d5c55b8 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -4,6 +4,7 @@
#include <asm/msr-index.h>
#ifndef __ASSEMBLY__
+#include <linux/errno.h>
/*
* Access to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
@@ -162,6 +163,8 @@ static inline unsigned int cpuid_edx(unsigned int op)
#ifdef CONFIG_SMP
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else /* CONFIG_SMP */
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
@@ -171,6 +174,14 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
wrmsr(msr_no, l, h);
}
+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ return rdmsr_safe(msr_no, l, h);
+}
+static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ return wrmsr_safe(msr_no, l, h);
+}
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
#endif /* X86_64_MSR_H */
OpenPOWER on IntegriCloud