summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorgnn <gnn@FreeBSD.org>2010-07-13 19:37:45 +0000
committergnn <gnn@FreeBSD.org>2010-07-13 19:37:45 +0000
commit9590d1f432286562ed0e7ac223166b36d0168ba5 (patch)
tree5bca768cfcebac7b4242aee321a6d8ce69540ef9
parent3b068762dadde40f32102667865fbde8de3ebc58 (diff)
downloadFreeBSD-src-9590d1f432286562ed0e7ac223166b36d0168ba5.zip
FreeBSD-src-9590d1f432286562ed0e7ac223166b36d0168ba5.tar.gz
Fix a panic brought about by writing an MSR without a proper mask.
All of the necessary wrmsr calls are now preceded by a rdmsr and we leave the reserved bits alone. Document the bits in the relevant registers for future reference. Tested by: mdf MFC after: 1 week
-rw-r--r--sys/dev/hwpmc/hwpmc_core.c45
-rw-r--r--sys/dev/hwpmc/hwpmc_core.h54
2 files changed, 86 insertions, 13 deletions
diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c
index e7de099..60b0292 100644
--- a/sys/dev/hwpmc/hwpmc_core.c
+++ b/sys/dev/hwpmc/hwpmc_core.c
@@ -147,6 +147,7 @@ core_pcpu_fini(struct pmc_mdep *md, int cpu)
int core_ri, n, npmc;
struct pmc_cpu *pc;
struct core_cpu *cc;
+ uint64_t msr = 0;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[core,%d] insane cpu number (%d)", __LINE__, cpu));
@@ -166,11 +167,14 @@ core_pcpu_fini(struct pmc_mdep *md, int cpu)
npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
- for (n = 0; n < npmc; n++)
- wrmsr(IAP_EVSEL0 + n, 0);
+ for (n = 0; n < npmc; n++) {
+ msr = rdmsr(IAP_EVSEL0 + n);
+ wrmsr(IAP_EVSEL0 + n, msr & ~IAP_EVSEL_MASK);
+ }
if (core_cputype != PMC_CPU_INTEL_CORE) {
- wrmsr(IAF_CTRL, 0);
+ msr = rdmsr(IAF_CTRL);
+ wrmsr(IAF_CTRL, msr & ~IAF_CTRL_MASK);
npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
}
@@ -374,6 +378,7 @@ iaf_start_pmc(int cpu, int ri)
{
struct pmc *pm;
struct core_cpu *iafc;
+ uint64_t msr = 0;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[core,%d] illegal CPU value %d", __LINE__, cpu));
@@ -387,12 +392,15 @@ iaf_start_pmc(int cpu, int ri)
iafc->pc_iafctrl |= pm->pm_md.pm_iaf.pm_iaf_ctrl;
- wrmsr(IAF_CTRL, iafc->pc_iafctrl);
+ msr = rdmsr(IAF_CTRL);
+ wrmsr(IAF_CTRL, msr | (iafc->pc_iafctrl & IAF_CTRL_MASK));
do {
iafc->pc_resync = 0;
iafc->pc_globalctrl |= (1ULL << (ri + IAF_OFFSET));
- wrmsr(IA_GLOBAL_CTRL, iafc->pc_globalctrl);
+ msr = rdmsr(IA_GLOBAL_CTRL);
+ wrmsr(IA_GLOBAL_CTRL, msr | (iafc->pc_globalctrl &
+ IAF_GLOBAL_CTRL_MASK));
} while (iafc->pc_resync != 0);
PMCDBG(MDP,STA,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)",
@@ -407,6 +415,7 @@ iaf_stop_pmc(int cpu, int ri)
{
uint32_t fc;
struct core_cpu *iafc;
+ uint64_t msr = 0;
PMCDBG(MDP,STO,1,"iaf-stop cpu=%d ri=%d", cpu, ri);
@@ -425,12 +434,15 @@ iaf_stop_pmc(int cpu, int ri)
iafc->pc_iafctrl &= ~fc;
PMCDBG(MDP,STO,1,"iaf-stop iafctrl=%x", iafc->pc_iafctrl);
- wrmsr(IAF_CTRL, iafc->pc_iafctrl);
+ msr = rdmsr(IAF_CTRL);
+ wrmsr(IAF_CTRL, msr | (iafc->pc_iafctrl & IAF_CTRL_MASK));
do {
iafc->pc_resync = 0;
iafc->pc_globalctrl &= ~(1ULL << (ri + IAF_OFFSET));
- wrmsr(IA_GLOBAL_CTRL, iafc->pc_globalctrl);
+ msr = rdmsr(IA_GLOBAL_CTRL);
+ wrmsr(IA_GLOBAL_CTRL, msr | (iafc->pc_globalctrl &
+ IAF_GLOBAL_CTRL_MASK));
} while (iafc->pc_resync != 0);
PMCDBG(MDP,STO,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)",
@@ -445,6 +457,7 @@ iaf_write_pmc(int cpu, int ri, pmc_value_t v)
{
struct core_cpu *cc;
struct pmc *pm;
+ uint64_t msr;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[core,%d] illegal cpu value %d", __LINE__, cpu));
@@ -460,9 +473,11 @@ iaf_write_pmc(int cpu, int ri, pmc_value_t v)
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
v = iaf_reload_count_to_perfctr_value(v);
- wrmsr(IAF_CTRL, 0); /* Turn off fixed counters */
- wrmsr(IAF_CTR0 + ri, v);
- wrmsr(IAF_CTRL, cc->pc_iafctrl);
+ msr = rdmsr(IAF_CTRL);
+ wrmsr(IAF_CTRL, msr & ~IAF_CTRL_MASK);
+ wrmsr(IAF_CTR0 + ri, v & ((1ULL << core_iaf_width) - 1));
+ msr = rdmsr(IAF_CTRL);
+ wrmsr(IAF_CTRL, msr | (cc->pc_iafctrl & IAF_CTRL_MASK));
PMCDBG(MDP,WRI,1, "iaf-write cpu=%d ri=%d msr=0x%x v=%jx iafctrl=%jx "
"pmc=%jx", cpu, ri, IAF_RI_TO_MSR(ri), v,
@@ -1879,6 +1894,7 @@ iap_stop_pmc(int cpu, int ri)
{
struct pmc *pm;
struct core_cpu *cc;
+ uint64_t msr;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[core,%d] illegal cpu value %d", __LINE__, cpu));
@@ -1894,7 +1910,8 @@ iap_stop_pmc(int cpu, int ri)
PMCDBG(MDP,STO,1, "iap-stop cpu=%d ri=%d", cpu, ri);
- wrmsr(IAP_EVSEL0 + ri, 0); /* stop hw */
+ msr = rdmsr(IAP_EVSEL0 + ri);
+ wrmsr(IAP_EVSEL0 + ri, msr & IAP_EVSEL_MASK); /* stop hw */
if (core_cputype == PMC_CPU_INTEL_CORE)
return (0);
@@ -1937,7 +1954,7 @@ iap_write_pmc(int cpu, int ri, pmc_value_t v)
* a stopped state when the pcd_write() entry point is called.
*/
- wrmsr(IAP_PMC0 + ri, v);
+ wrmsr(IAP_PMC0 + ri, v & ((1ULL << core_iap_width) - 1));
return (0);
}
@@ -1987,6 +2004,7 @@ core_intr(int cpu, struct trapframe *tf)
struct pmc *pm;
struct core_cpu *cc;
int error, found_interrupt, ri;
+ uint64_t msr = 0;
PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
TRAPF_USERMODE(tf));
@@ -2018,7 +2036,8 @@ core_intr(int cpu, struct trapframe *tf)
* Stop the counter, reload it but only restart it if
* the PMC is not stalled.
*/
- wrmsr(IAP_EVSEL0 + ri, 0);
+ msr = rdmsr(IAP_EVSEL0 + ri);
+ wrmsr(IAP_EVSEL0 + ri, msr & ~IAP_EVSEL_MASK);
wrmsr(IAP_PMC0 + ri, v);
if (error)
diff --git a/sys/dev/hwpmc/hwpmc_core.h b/sys/dev/hwpmc/hwpmc_core.h
index e88ecb0..634ecf8 100644
--- a/sys/dev/hwpmc/hwpmc_core.h
+++ b/sys/dev/hwpmc/hwpmc_core.h
@@ -67,20 +67,59 @@ struct pmc_md_iap_op_pmcallocate {
/*
* Fixed-function counters.
*/
+
#define IAF_MASK 0xF
+#define IAF_COUNTER_MASK 0x0000ffffffffffff
#define IAF_CTR0 0x309
#define IAF_CTR1 0x30A
#define IAF_CTR2 0x30B
+/*
+ * The IAF_CTRL MSR is laid out in the following way.
+ *
+ * Bit Position Use
+ * 63 - 12 Reserved (do not touch)
+ * 11 Ctr 2 PMI
+ * 10 Reserved (do not touch)
+ * 9-8 Ctr 2 Enable
+ * 7 Ctr 1 PMI
+ * 6 Reserved (do not touch)
+ * 5-4 Ctr 1 Enable
+ * 3 Ctr 0 PMI
+ * 2 Reserved (do not touch)
+ * 1-0 Ctr 0 Enable (3: All Levels, 2: User, 1: OS, 0: Disable)
+ */
+
#define IAF_OFFSET 32
#define IAF_CTRL 0x38D
+#define IAF_CTRL_MASK 0x0000000000000bbb
/*
* Programmable counters.
*/
#define IAP_PMC0 0x0C1
+
+/*
+ * IAP_EVSEL(n) is laid out in the following way.
+ *
+ * Bit Position Use
+ * 63-31 Reserved (do not touch)
+ * 31-24 Counter Mask
+ * 23 Invert
+ * 22 Enable
+ * 21 Reserved (do not touch)
+ * 20 APIC Interrupt Enable
+ * 19 Pin Control
+ * 18 Edge Detect
+ * 17 OS
+ * 16 User
+ * 15-8 Unit Mask
+ * 7-0 Event Select
+ */
+
+#define IAP_EVSEL_MASK 0x00000000ffdfffff
#define IAP_EVSEL0 0x186
/*
@@ -90,6 +129,21 @@ struct pmc_md_iap_op_pmcallocate {
#define IA_GLOBAL_STATUS 0x38E
#define IA_GLOBAL_CTRL 0x38F
+
+/*
+ * IA_GLOBAL_CTRL is layed out in the following way.
+ *
+ * Bit Position Use
+ * 63-35 Reserved (do not touch)
+ * 34 IAF Counter 2 Enable
+ * 33 IAF Counter 1 Enable
+ * 32 IAF Counter 0 Enable
+ * 31-0 Depends on programmable counters
+ */
+
+/* The mask is only for the fixed porttion of the register. */
+#define IAF_GLOBAL_CTRL_MASK 0x0000000700000000
+
#define IA_GLOBAL_OVF_CTRL 0x390
#define IA_GLOBAL_STATUS_FLAG_CONDCHG (1ULL << 63)
OpenPOWER on IntegriCloud