summaryrefslogtreecommitdiffstats
path: root/sys/dev/hwpmc
diff options
context:
space:
mode:
authorfabient <fabient@FreeBSD.org>2012-03-28 20:58:30 +0000
committerfabient <fabient@FreeBSD.org>2012-03-28 20:58:30 +0000
commit5edfb77dd3a164bb9d2d40c6604faa6c9f3dce15 (patch)
treefadff08d26576c3d5c1cef9d47abd784602b237a /sys/dev/hwpmc
parent9a7982e5a0267c0421856f3a43a1ae75880058f3 (diff)
downloadFreeBSD-src-5edfb77dd3a164bb9d2d40c6604faa6c9f3dce15.zip
FreeBSD-src-5edfb77dd3a164bb9d2d40c6604faa6c9f3dce15.tar.gz
Add software PMC support.
New kernel events can be added at various location for sampling or counting. This will for example allow easy system profiling whatever the processor is with known tools like pmcstat(8). Simultaneous usage of software PMC and hardware PMC is possible, for example looking at the lock acquire failure, page fault while sampling on instructions. Sponsored by: NETASQ MFC after: 1 month
Diffstat (limited to 'sys/dev/hwpmc')
-rw-r--r--sys/dev/hwpmc/hwpmc_amd.c10
-rw-r--r--sys/dev/hwpmc/hwpmc_core.c6
-rw-r--r--sys/dev/hwpmc/hwpmc_intel.c6
-rw-r--r--sys/dev/hwpmc/hwpmc_logging.c28
-rw-r--r--sys/dev/hwpmc/hwpmc_mips.c2
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c251
-rw-r--r--sys/dev/hwpmc/hwpmc_piv.c4
-rw-r--r--sys/dev/hwpmc/hwpmc_powerpc.c3
-rw-r--r--sys/dev/hwpmc/hwpmc_ppro.c2
-rw-r--r--sys/dev/hwpmc/hwpmc_soft.c485
-rw-r--r--sys/dev/hwpmc/hwpmc_soft.h48
-rw-r--r--sys/dev/hwpmc/hwpmc_tsc.c3
-rw-r--r--sys/dev/hwpmc/hwpmc_x86.c7
-rw-r--r--sys/dev/hwpmc/hwpmc_xscale.c4
-rw-r--r--sys/dev/hwpmc/pmc_events.h12
15 files changed, 802 insertions, 69 deletions
diff --git a/sys/dev/hwpmc/hwpmc_amd.c b/sys/dev/hwpmc/hwpmc_amd.c
index 9ffa62f..ed49ea7 100644
--- a/sys/dev/hwpmc/hwpmc_amd.c
+++ b/sys/dev/hwpmc/hwpmc_amd.c
@@ -687,7 +687,8 @@ amd_intr(int cpu, struct trapframe *tf)
wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v));
/* Restart the counter if logging succeeded. */
- error = pmc_process_interrupt(cpu, pm, tf, TRAPF_USERMODE(tf));
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
if (error == 0)
wrmsr(evsel, config | AMD_PMC_ENABLE);
}
@@ -874,7 +875,7 @@ amd_pcpu_fini(struct pmc_mdep *md, int cpu)
struct pmc_mdep *
pmc_amd_initialize(void)
{
- int classindex, error, i, nclasses, ncpus;
+ int classindex, error, i, ncpus;
struct pmc_classdep *pcd;
enum pmc_cputype cputype;
struct pmc_mdep *pmc_mdep;
@@ -926,12 +927,9 @@ pmc_amd_initialize(void)
* These processors have two classes of PMCs: the TSC and
* programmable PMCs.
*/
- nclasses = 2;
- pmc_mdep = malloc(sizeof(struct pmc_mdep) + nclasses * sizeof (struct pmc_classdep),
- M_PMC, M_WAITOK|M_ZERO);
+ pmc_mdep = pmc_mdep_alloc(2);
pmc_mdep->pmd_cputype = cputype;
- pmc_mdep->pmd_nclass = nclasses;
ncpus = pmc_cpu_max();
diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c
index 1aac3b9..6209c6f 100644
--- a/sys/dev/hwpmc/hwpmc_core.c
+++ b/sys/dev/hwpmc/hwpmc_core.c
@@ -2239,7 +2239,7 @@ core_intr(int cpu, struct trapframe *tf)
if (pm->pm_state != PMC_STATE_RUNNING)
continue;
- error = pmc_process_interrupt(cpu, pm, tf,
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
TRAPF_USERMODE(tf));
v = pm->pm_sc.pm_reloadcount;
@@ -2326,7 +2326,7 @@ core2_intr(int cpu, struct trapframe *tf)
!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
continue;
- error = pmc_process_interrupt(cpu, pm, tf,
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
TRAPF_USERMODE(tf));
if (error)
intrenable &= ~flag;
@@ -2354,7 +2354,7 @@ core2_intr(int cpu, struct trapframe *tf)
!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
continue;
- error = pmc_process_interrupt(cpu, pm, tf,
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
TRAPF_USERMODE(tf));
if (error)
intrenable &= ~flag;
diff --git a/sys/dev/hwpmc/hwpmc_intel.c b/sys/dev/hwpmc/hwpmc_intel.c
index 89fd0b1..f7adb6c 100644
--- a/sys/dev/hwpmc/hwpmc_intel.c
+++ b/sys/dev/hwpmc/hwpmc_intel.c
@@ -162,12 +162,10 @@ pmc_intel_initialize(void)
return (NULL);
}
- pmc_mdep = malloc(sizeof(struct pmc_mdep) + nclasses *
- sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO);
+ /* Allocate base class and initialize machine dependent struct */
+ pmc_mdep = pmc_mdep_alloc(nclasses);
pmc_mdep->pmd_cputype = cputype;
- pmc_mdep->pmd_nclass = nclasses;
-
pmc_mdep->pmd_switch_in = intel_switch_in;
pmc_mdep->pmd_switch_out = intel_switch_out;
diff --git a/sys/dev/hwpmc/hwpmc_logging.c b/sys/dev/hwpmc/hwpmc_logging.c
index 79170ed..880bcaa 100644
--- a/sys/dev/hwpmc/hwpmc_logging.c
+++ b/sys/dev/hwpmc/hwpmc_logging.c
@@ -129,6 +129,7 @@ static struct mtx pmc_kthread_mtx; /* sleep lock */
/* Emit a string. Caution: does NOT update _le, so needs to be last */
#define PMCLOG_EMITSTRING(S,L) do { bcopy((S), _le, (L)); } while (0)
+#define PMCLOG_EMITNULLSTRING(L) do { bzero(_le, (L)); } while (0)
#define PMCLOG_DESPATCH(PO) \
pmclog_release((PO)); \
@@ -835,16 +836,33 @@ void
pmclog_process_pmcallocate(struct pmc *pm)
{
struct pmc_owner *po;
+ struct pmc_soft *ps;
po = pm->pm_owner;
PMCDBG(LOG,ALL,1, "pm=%p", pm);
- PMCLOG_RESERVE(po, PMCALLOCATE, sizeof(struct pmclog_pmcallocate));
- PMCLOG_EMIT32(pm->pm_id);
- PMCLOG_EMIT32(pm->pm_event);
- PMCLOG_EMIT32(pm->pm_flags);
- PMCLOG_DESPATCH(po);
+ if (PMC_TO_CLASS(pm) == PMC_CLASS_SOFT) {
+ PMCLOG_RESERVE(po, PMCALLOCATEDYN,
+ sizeof(struct pmclog_pmcallocatedyn));
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT32(pm->pm_event);
+ PMCLOG_EMIT32(pm->pm_flags);
+ ps = pmc_soft_ev_acquire(pm->pm_event);
+ if (ps != NULL)
+ PMCLOG_EMITSTRING(ps->ps_ev.pm_ev_name,PMC_NAME_MAX);
+ else
+ PMCLOG_EMITNULLSTRING(PMC_NAME_MAX);
+ pmc_soft_ev_release(ps);
+ PMCLOG_DESPATCH(po);
+ } else {
+ PMCLOG_RESERVE(po, PMCALLOCATE,
+ sizeof(struct pmclog_pmcallocate));
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT32(pm->pm_event);
+ PMCLOG_EMIT32(pm->pm_flags);
+ PMCLOG_DESPATCH(po);
+ }
}
void
diff --git a/sys/dev/hwpmc/hwpmc_mips.c b/sys/dev/hwpmc/hwpmc_mips.c
index 3afa5d9..8df27c9 100644
--- a/sys/dev/hwpmc/hwpmc_mips.c
+++ b/sys/dev/hwpmc/hwpmc_mips.c
@@ -287,7 +287,7 @@ mips_pmc_intr(int cpu, struct trapframe *tf)
retval = 1;
if (pm->pm_state != PMC_STATE_RUNNING)
continue;
- error = pmc_process_interrupt(cpu, pm, tf,
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
TRAPF_USERMODE(tf));
if (error) {
/* Clear/disable the relevant counter */
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 7ca7a47..7eb9f92 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -70,6 +70,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
#include <vm/vm_object.h>
+#include "hwpmc_soft.h"
+
/*
* Types
*/
@@ -182,7 +184,7 @@ static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
int cpu);
static int pmc_can_attach(struct pmc *pm, struct proc *p);
-static void pmc_capture_user_callchain(int cpu, struct trapframe *tf);
+static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf);
static void pmc_cleanup(void);
static int pmc_detach_process(struct proc *p, struct pmc *pm);
static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
@@ -206,7 +208,7 @@ static void pmc_process_csw_out(struct thread *td);
static void pmc_process_exit(void *arg, struct proc *p);
static void pmc_process_fork(void *arg, struct proc *p1,
struct proc *p2, int n);
-static void pmc_process_samples(int cpu);
+static void pmc_process_samples(int cpu, int soft);
static void pmc_release_pmc_descriptor(struct pmc *pmc);
static void pmc_remove_owner(struct pmc_owner *po);
static void pmc_remove_process_descriptor(struct pmc_process *pp);
@@ -218,12 +220,16 @@ static int pmc_stop(struct pmc *pm);
static int pmc_syscall_handler(struct thread *td, void *syscall_args);
static void pmc_unlink_target_process(struct pmc *pmc,
struct pmc_process *pp);
+static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp);
+static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp);
+static struct pmc_mdep *pmc_generic_cpu_initialize(void);
+static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
/*
* Kernel tunables and sysctl(8) interface.
*/
-SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
+SYSCTL_DECL(_kern_hwpmc);
static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth);
@@ -1833,7 +1839,9 @@ const char *pmc_hooknames[] = {
"KLDUNLOAD",
"MMAP",
"MUNMAP",
- "CALLCHAIN"
+ "CALLCHAIN-NMI",
+ "CALLCHAIN-SOFT",
+ "SOFTSAMPLING"
};
#endif
@@ -1992,7 +2000,8 @@ pmc_hook_handler(struct thread *td, int function, void *arg)
* lose the interrupt sample.
*/
CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmc_cpumask);
- pmc_process_samples(PCPU_GET(cpuid));
+ pmc_process_samples(PCPU_GET(cpuid), PMC_HR);
+ pmc_process_samples(PCPU_GET(cpuid), PMC_SR);
break;
@@ -2022,11 +2031,30 @@ pmc_hook_handler(struct thread *td, int function, void *arg)
*/
KASSERT(td == curthread, ("[pmc,%d] td != curthread",
__LINE__));
- pmc_capture_user_callchain(PCPU_GET(cpuid),
+
+ pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR,
(struct trapframe *) arg);
td->td_pflags &= ~TDP_CALLCHAIN;
break;
+ case PMC_FN_USER_CALLCHAIN_SOFT:
+ /*
+ * Record a call chain.
+ */
+ KASSERT(td == curthread, ("[pmc,%d] td != curthread",
+ __LINE__));
+ pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_SR,
+ (struct trapframe *) arg);
+ td->td_pflags &= ~TDP_CALLCHAIN;
+ break;
+
+ case PMC_FN_SOFT_SAMPLING:
+ /*
+ * Call soft PMC sampling intr.
+ */
+ pmc_soft_intr((struct pmckern_soft *) arg);
+ break;
+
default:
#ifdef DEBUG
KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
@@ -2221,18 +2249,17 @@ pmc_destroy_pmc_descriptor(struct pmc *pm)
static void
pmc_wait_for_pmc_idle(struct pmc *pm)
{
-#ifdef DEBUG
+#ifdef DEBUG
volatile int maxloop;
maxloop = 100 * pmc_cpu_max();
#endif
-
/*
* Loop (with a forced context switch) till the PMC's runcount
* comes down to zero.
*/
while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
-#ifdef DEBUG
+#ifdef DEBUG
maxloop--;
KASSERT(maxloop > 0,
("[pmc,%d] (ri%d, rc%d) waiting too long for "
@@ -2972,6 +2999,53 @@ pmc_syscall_handler(struct thread *td, void *syscall_args)
}
break;
+ /*
+ * Retrieve soft events list.
+ */
+ case PMC_OP_GETDYNEVENTINFO:
+ {
+ enum pmc_class cl;
+ enum pmc_event ev;
+ struct pmc_op_getdyneventinfo *gei;
+ struct pmc_dyn_event_descr dev;
+ struct pmc_soft *ps;
+ uint32_t nevent;
+
+ sx_assert(&pmc_sx, SX_LOCKED);
+
+ gei = (struct pmc_op_getdyneventinfo *) arg;
+
+ if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0)
+ break;
+
+ /* Only SOFT class is dynamic. */
+ if (cl != PMC_CLASS_SOFT) {
+ error = EINVAL;
+ break;
+ }
+
+ nevent = 0;
+ for (ev = PMC_EV_SOFT_FIRST; ev <= PMC_EV_SOFT_LAST; ev++) {
+ ps = pmc_soft_ev_acquire(ev);
+ if (ps == NULL)
+ continue;
+ bcopy(&ps->ps_ev, &dev, sizeof(dev));
+ pmc_soft_ev_release(ps);
+
+ error = copyout(&dev,
+ &gei->pm_events[nevent],
+ sizeof(struct pmc_dyn_event_descr));
+ if (error != 0)
+ break;
+ nevent++;
+ }
+ if (error != 0)
+ break;
+
+ error = copyout(&nevent, &gei->pm_nevent,
+ sizeof(nevent));
+ }
+ break;
/*
* Get module statistics
@@ -4022,7 +4096,7 @@ pmc_post_callchain_callback(void)
*/
int
-pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
+pmc_process_interrupt(int cpu, int ring, struct pmc *pm, struct trapframe *tf,
int inuserspace)
{
int error, callchaindepth;
@@ -4035,7 +4109,7 @@ pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
/*
* Allocate space for a sample buffer.
*/
- psb = pmc_pcpu[cpu]->pc_sb;
+ psb = pmc_pcpu[cpu]->pc_sb[ring];
ps = psb->ps_write;
if (ps->ps_nsamples) { /* in use, reader hasn't caught up */
@@ -4061,6 +4135,7 @@ pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
pm->pm_runcount));
atomic_add_rel_int(&pm->pm_runcount, 1); /* hold onto PMC */
+
ps->ps_pmc = pm;
if ((td = curthread) && td->td_proc)
ps->ps_pid = td->td_proc->p_pid;
@@ -4080,11 +4155,11 @@ pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
* Kernel stack traversals can be done immediately,
* while we defer to an AST for user space traversals.
*/
- if (!inuserspace)
+ if (!inuserspace) {
callchaindepth =
pmc_save_kernel_callchain(ps->ps_pc,
callchaindepth, tf);
- else {
+ } else {
pmc_post_callchain_callback();
callchaindepth = PMC_SAMPLE_INUSE;
}
@@ -4113,7 +4188,7 @@ pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
*/
static void
-pmc_capture_user_callchain(int cpu, struct trapframe *tf)
+pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf)
{
int i;
struct pmc *pm;
@@ -4124,9 +4199,7 @@ pmc_capture_user_callchain(int cpu, struct trapframe *tf)
int ncallchains;
#endif
- sched_unpin(); /* Can migrate safely now. */
-
- psb = pmc_pcpu[cpu]->pc_sb;
+ psb = pmc_pcpu[cpu]->pc_sb[ring];
td = curthread;
KASSERT(td->td_pflags & TDP_CALLCHAIN,
@@ -4172,23 +4245,25 @@ pmc_capture_user_callchain(int cpu, struct trapframe *tf)
#ifdef INVARIANTS
ncallchains++;
#endif
-
}
KASSERT(ncallchains > 0,
("[pmc,%d] cpu %d didn't find a sample to collect", __LINE__,
cpu));
+ KASSERT(td->td_pinned > 0,
+ ("[pmc,%d] invalid td_pinned value", __LINE__));
+ sched_unpin(); /* Can migrate safely now. */
+
return;
}
-
/*
* Process saved PC samples.
*/
static void
-pmc_process_samples(int cpu)
+pmc_process_samples(int cpu, int ring)
{
struct pmc *pm;
int adjri, n;
@@ -4202,18 +4277,13 @@ pmc_process_samples(int cpu)
("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
PCPU_GET(cpuid), cpu));
- psb = pmc_pcpu[cpu]->pc_sb;
+ psb = pmc_pcpu[cpu]->pc_sb[ring];
for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
ps = psb->ps_read;
if (ps->ps_nsamples == PMC_SAMPLE_FREE)
break;
- if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
- /* Need a rescan at a later time. */
- CPU_SET_ATOMIC(cpu, &pmc_cpumask);
- break;
- }
pm = ps->ps_pmc;
@@ -4231,6 +4301,13 @@ pmc_process_samples(int cpu)
if (pm->pm_state != PMC_STATE_RUNNING)
goto entrydone;
+ /* If there is a pending AST wait for completion */
+ if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
+ /* Need a rescan at a later time. */
+ CPU_SET_ATOMIC(cpu, &pmc_cpumask);
+ break;
+ }
+
PMCDBG(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
pm, ps->ps_nsamples, ps->ps_flags,
(int) (psb->ps_write - psb->ps_samples),
@@ -4256,11 +4333,10 @@ pmc_process_samples(int cpu)
* or a system-wide sampling PMC. Dispatch a log
* entry to the PMC's owner process.
*/
-
pmclog_process_callchain(pm, ps);
entrydone:
- ps->ps_nsamples = 0; /* mark entry as free */
+ ps->ps_nsamples = 0; /* mark entry as free */
atomic_subtract_rel_int(&pm->pm_runcount, 1);
/* increment read pointer, modulo sample size */
@@ -4584,6 +4660,76 @@ static const char *pmc_name_of_pmcclass[] = {
__PMC_CLASSES()
};
+/*
+ * Base class initializer: allocate structure and set default classes.
+ */
+struct pmc_mdep *
+pmc_mdep_alloc(int nclasses)
+{
+ struct pmc_mdep *md;
+ int n;
+
+ /* SOFT + md classes */
+ n = 1 + nclasses;
+ md = malloc(sizeof(struct pmc_mdep) + n *
+ sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO);
+ if (md != NULL) {
+ md->pmd_nclass = n;
+
+ /* Add base class. */
+ pmc_soft_initialize(md);
+ }
+
+ return md;
+}
+
+void
+pmc_mdep_free(struct pmc_mdep *md)
+{
+ pmc_soft_finalize(md);
+ free(md, M_PMC);
+}
+
+static int
+generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ (void) pc; (void) pp;
+
+ return (0);
+}
+
+static int
+generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ (void) pc; (void) pp;
+
+ return (0);
+}
+
+static struct pmc_mdep *
+pmc_generic_cpu_initialize(void)
+{
+ struct pmc_mdep *md;
+
+ md = pmc_mdep_alloc(0);
+
+ md->pmd_cputype = PMC_CPU_GENERIC;
+
+ md->pmd_pcpu_init = NULL;
+ md->pmd_pcpu_fini = NULL;
+ md->pmd_switch_in = generic_switch_in;
+ md->pmd_switch_out = generic_switch_out;
+
+ return (md);
+}
+
+static void
+pmc_generic_cpu_finalize(struct pmc_mdep *md)
+{
+ (void) md;
+}
+
+
static int
pmc_initialize(void)
{
@@ -4643,9 +4789,12 @@ pmc_initialize(void)
}
md = pmc_md_initialize();
-
- if (md == NULL)
- return (ENOSYS);
+ if (md == NULL) {
+ /* Default to generic CPU. */
+ md = pmc_generic_cpu_initialize();
+ if (md == NULL)
+ return (ENOSYS);
+ }
KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
("[pmc,%d] no classes or pmcs", __LINE__));
@@ -4717,7 +4866,25 @@ pmc_initialize(void)
ps->ps_pc = sb->ps_callchains +
(n * pmc_callchaindepth);
- pmc_pcpu[cpu]->pc_sb = sb;
+ pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
+
+ sb = malloc(sizeof(struct pmc_samplebuffer) +
+ pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
+ M_WAITOK|M_ZERO);
+ sb->ps_read = sb->ps_write = sb->ps_samples;
+ sb->ps_fence = sb->ps_samples + pmc_nsamples;
+
+ KASSERT(pmc_pcpu[cpu] != NULL,
+ ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
+
+ sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples *
+ sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO);
+
+ for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
+ ps->ps_pc = sb->ps_callchains +
+ (n * pmc_callchaindepth);
+
+ pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
}
/* allocate space for the row disposition array */
@@ -4887,9 +5054,12 @@ pmc_cleanup(void)
md->pmd_pcpu_fini(md, cpu);
}
- pmc_md_finalize(md);
+ if (md->pmd_cputype == PMC_CPU_GENERIC)
+ pmc_generic_cpu_finalize(md);
+ else
+ pmc_md_finalize(md);
- free(md, M_PMC);
+ pmc_mdep_free(md);
md = NULL;
pmc_restore_cpu_binding(&pb);
}
@@ -4898,11 +5068,16 @@ pmc_cleanup(void)
for (cpu = 0; cpu < maxcpu; cpu++) {
if (!pmc_cpu_is_active(cpu))
continue;
- KASSERT(pmc_pcpu[cpu]->pc_sb != NULL,
- ("[pmc,%d] Null cpu sample buffer cpu=%d", __LINE__,
+ KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL,
+ ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__,
+ cpu));
+ KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL,
+ ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__,
cpu));
- free(pmc_pcpu[cpu]->pc_sb->ps_callchains, M_PMC);
- free(pmc_pcpu[cpu]->pc_sb, M_PMC);
+ free(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC);
+ free(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC);
+ free(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC);
+ free(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC);
free(pmc_pcpu[cpu], M_PMC);
}
diff --git a/sys/dev/hwpmc/hwpmc_piv.c b/sys/dev/hwpmc/hwpmc_piv.c
index 8ee8518..26b23a1 100644
--- a/sys/dev/hwpmc/hwpmc_piv.c
+++ b/sys/dev/hwpmc/hwpmc_piv.c
@@ -1463,7 +1463,7 @@ p4_intr(int cpu, struct trapframe *tf)
!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
continue;
}
- (void) pmc_process_interrupt(cpu, pm, tf,
+ (void) pmc_process_interrupt(cpu, PMC_HR, pm, tf,
TRAPF_USERMODE(tf));
continue;
}
@@ -1513,7 +1513,7 @@ p4_intr(int cpu, struct trapframe *tf)
* Process the interrupt. Re-enable the PMC if
* processing was successful.
*/
- error = pmc_process_interrupt(cpu, pm, tf,
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
TRAPF_USERMODE(tf));
/*
diff --git a/sys/dev/hwpmc/hwpmc_powerpc.c b/sys/dev/hwpmc/hwpmc_powerpc.c
index 8e97b97..ccbcb2c 100644
--- a/sys/dev/hwpmc/hwpmc_powerpc.c
+++ b/sys/dev/hwpmc/hwpmc_powerpc.c
@@ -690,7 +690,8 @@ powerpc_intr(int cpu, struct trapframe *tf)
powerpc_pmcn_write(i, v);
/* Restart the counter if logging succeeded. */
- error = pmc_process_interrupt(cpu, pm, tf, TRAPF_USERMODE(tf));
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
mtspr(SPR_MMCR0, config);
if (error != 0)
powerpc_stop_pmc(cpu, i);
diff --git a/sys/dev/hwpmc/hwpmc_ppro.c b/sys/dev/hwpmc/hwpmc_ppro.c
index 8da185b..416a540 100644
--- a/sys/dev/hwpmc/hwpmc_ppro.c
+++ b/sys/dev/hwpmc/hwpmc_ppro.c
@@ -704,7 +704,7 @@ p6_intr(int cpu, struct trapframe *tf)
if (pm->pm_state != PMC_STATE_RUNNING)
continue;
- error = pmc_process_interrupt(cpu, pm, tf,
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
TRAPF_USERMODE(tf));
if (error)
P6_MARK_STOPPED(pc,ri);
diff --git a/sys/dev/hwpmc/hwpmc_soft.c b/sys/dev/hwpmc/hwpmc_soft.c
new file mode 100644
index 0000000..c3d2dec
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_soft.c
@@ -0,0 +1,485 @@
+/*-
+ * Copyright (c) 2012 Fabien Thomas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+#include <sys/mutex.h>
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+
+#include "hwpmc_soft.h"
+
+/*
+ * Software PMC support.
+ */
+
+#define SOFT_CAPS (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \
+ PMC_CAP_USER | PMC_CAP_SYSTEM)
+
+struct soft_descr {
+ struct pmc_descr pm_descr; /* "base class" */
+};
+
+static struct soft_descr soft_pmcdesc[SOFT_NPMCS] =
+{
+#define SOFT_PMCDESCR(N) \
+ { \
+ .pm_descr = \
+ { \
+ .pd_name = #N, \
+ .pd_class = PMC_CLASS_SOFT, \
+ .pd_caps = SOFT_CAPS, \
+ .pd_width = 64 \
+ }, \
+ }
+
+ SOFT_PMCDESCR(SOFT0),
+ SOFT_PMCDESCR(SOFT1),
+ SOFT_PMCDESCR(SOFT2),
+ SOFT_PMCDESCR(SOFT3),
+ SOFT_PMCDESCR(SOFT4),
+ SOFT_PMCDESCR(SOFT5),
+ SOFT_PMCDESCR(SOFT6),
+ SOFT_PMCDESCR(SOFT7),
+ SOFT_PMCDESCR(SOFT8),
+ SOFT_PMCDESCR(SOFT9),
+ SOFT_PMCDESCR(SOFT10),
+ SOFT_PMCDESCR(SOFT11),
+ SOFT_PMCDESCR(SOFT12),
+ SOFT_PMCDESCR(SOFT13),
+ SOFT_PMCDESCR(SOFT14),
+ SOFT_PMCDESCR(SOFT15)
+};
+
+/*
+ * Per-CPU data structure.
+ */
+
+struct soft_cpu {
+ struct pmc_hw soft_hw[SOFT_NPMCS];
+ pmc_value_t soft_values[SOFT_NPMCS];
+};
+
+
+static struct soft_cpu **soft_pcpu;
+
+static int
+soft_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event ev;
+ struct pmc_soft *ps;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ if (a->pm_class != PMC_CLASS_SOFT)
+ return (EINVAL);
+
+ if ((pm->pm_caps & SOFT_CAPS) == 0)
+ return (EINVAL);
+
+ if ((pm->pm_caps & ~SOFT_CAPS) != 0)
+ return (EPERM);
+
+ ev = pm->pm_event;
+ if (ev < PMC_EV_SOFT_FIRST || ev > PMC_EV_SOFT_LAST)
+ return (EINVAL);
+
+ /* Check if event is registered. */
+ ps = pmc_soft_ev_acquire(ev);
+ if (ps == NULL)
+ return (EINVAL);
+ pmc_soft_ev_release(ps);
+
+ return (0);
+}
+
+static int
+soft_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &soft_pcpu[cpu]->soft_hw[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[soft,%d] pm=%p phw->pm=%p hwpmc not unconfigured", __LINE__,
+ pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return (0);
+}
+
+static int
+soft_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ const struct soft_descr *pd;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &soft_pcpu[cpu]->soft_hw[ri];
+ pd = &soft_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return (error);
+
+ pi->pm_class = pd->pm_descr.pd_class;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+soft_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ (void) ri;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ *ppm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc;
+ return (0);
+}
+
+static int
+soft_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ int ri;
+ struct pmc_cpu *pc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal cpu %d", __LINE__, cpu));
+ KASSERT(soft_pcpu[cpu] != NULL, ("[soft,%d] null pcpu", __LINE__));
+
+ free(soft_pcpu[cpu], M_PMC);
+ soft_pcpu[cpu] = NULL;
+
+ ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri;
+
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] ri=%d", __LINE__, ri));
+
+ pc = pmc_pcpu[cpu];
+ pc->pc_hwpmcs[ri] = NULL;
+
+ return (0);
+}
+
+static int
+soft_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, n;
+ struct pmc_cpu *pc;
+ struct soft_cpu *soft_pc;
+ struct pmc_hw *phw;
+
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal cpu %d", __LINE__, cpu));
+ KASSERT(soft_pcpu, ("[soft,%d] null pcpu", __LINE__));
+ KASSERT(soft_pcpu[cpu] == NULL, ("[soft,%d] non-null per-cpu",
+ __LINE__));
+
+ soft_pc = malloc(sizeof(struct soft_cpu), M_PMC, M_WAITOK|M_ZERO);
+ if (soft_pc == NULL)
+ return (ENOMEM);
+
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc != NULL, ("[soft,%d] cpu %d null per-cpu", __LINE__, cpu));
+
+ soft_pcpu[cpu] = soft_pc;
+ phw = soft_pc->soft_hw;
+ first_ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri;
+
+ for (n = 0; n < SOFT_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[n + first_ri] = phw;
+ }
+
+ return (0);
+}
+
+static int
+soft_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ const struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &soft_pcpu[cpu]->soft_hw[ri];
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[soft,%d] no owner for PHW [cpu%d,pmc%d]", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,REA,1,"soft-read id=%d", ri);
+
+ *v = soft_pcpu[cpu]->soft_values[ri];
+
+ return (0);
+}
+
+static int
+soft_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+ const struct soft_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc;
+ pd = &soft_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,WRI,1, "soft-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ soft_pcpu[cpu]->soft_values[ri] = v;
+
+ return (0);
+}
+
+static int
+soft_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ (void) pmc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &soft_pcpu[cpu]->soft_hw[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[soft,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ /*
+ * Nothing to do.
+ */
+ return (0);
+}
+
+static int
+soft_start_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct soft_cpu *pc;
+ struct pmc_soft *ps;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = soft_pcpu[cpu];
+ pm = pc->soft_hw[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ ps = pmc_soft_ev_acquire(pm->pm_event);
+ if (ps == NULL)
+ return (EINVAL);
+ atomic_add_int(&ps->ps_running, 1);
+ pmc_soft_ev_release(ps);
+
+ return (0);
+}
+
+static int
+soft_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct soft_cpu *pc;
+ struct pmc_soft *ps;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = soft_pcpu[cpu];
+ pm = pc->soft_hw[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ ps = pmc_soft_ev_acquire(pm->pm_event);
+ /* event unregistered ? */
+ if (ps != NULL) {
+ atomic_subtract_int(&ps->ps_running, 1);
+ pmc_soft_ev_release(ps);
+ }
+
+ return (0);
+}
+
+int
+pmc_soft_intr(struct pmckern_soft *ks)
+{
+ struct pmc *pm;
+ struct soft_cpu *pc;
+ int ri, processed, error, user_mode;
+
+ KASSERT(ks->pm_cpu >= 0 && ks->pm_cpu < pmc_cpu_max(),
+ ("[soft,%d] CPU %d out of range", __LINE__, ks->pm_cpu));
+
+ processed = 0;
+ pc = soft_pcpu[ks->pm_cpu];
+
+ for (ri = 0; ri < SOFT_NPMCS; ri++) {
+
+ pm = pc->soft_hw[ri].phw_pmc;
+ if (pm == NULL ||
+ pm->pm_state != PMC_STATE_RUNNING ||
+ pm->pm_event != ks->pm_ev) {
+ continue;
+ }
+
+ processed = 1;
+ pc->soft_values[ri]++;
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ user_mode = TRAPF_USERMODE(ks->pm_tf);
+ error = pmc_process_interrupt(ks->pm_cpu, PMC_SR, pm,
+ ks->pm_tf, user_mode);
+ if (error) {
+ soft_stop_pmc(ks->pm_cpu, ri);
+ continue;
+ }
+
+ if (user_mode) {
+ /* If in user mode setup AST to process
+ * callchain out of interrupt context.
+ */
+ curthread->td_flags |= TDF_ASTPENDING;
+ }
+ }
+ }
+
+ atomic_add_int(processed ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ return (processed);
+}
+
+void
+pmc_soft_initialize(struct pmc_mdep *md)
+{
+ struct pmc_classdep *pcd;
+
+ /* Add SOFT PMCs. */
+ soft_pcpu = malloc(sizeof(struct soft_cpu *) * pmc_cpu_max(), M_PMC,
+ M_ZERO|M_WAITOK);
+
+ pcd = &md->pmd_classdep[PMC_CLASS_INDEX_SOFT];
+
+ pcd->pcd_caps = SOFT_CAPS;
+ pcd->pcd_class = PMC_CLASS_SOFT;
+ pcd->pcd_num = SOFT_NPMCS;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = 64;
+
+ pcd->pcd_allocate_pmc = soft_allocate_pmc;
+ pcd->pcd_config_pmc = soft_config_pmc;
+ pcd->pcd_describe = soft_describe;
+ pcd->pcd_get_config = soft_get_config;
+ pcd->pcd_get_msr = NULL;
+ pcd->pcd_pcpu_init = soft_pcpu_init;
+ pcd->pcd_pcpu_fini = soft_pcpu_fini;
+ pcd->pcd_read_pmc = soft_read_pmc;
+ pcd->pcd_write_pmc = soft_write_pmc;
+ pcd->pcd_release_pmc = soft_release_pmc;
+ pcd->pcd_start_pmc = soft_start_pmc;
+ pcd->pcd_stop_pmc = soft_stop_pmc;
+
+ md->pmd_npmc += SOFT_NPMCS;
+}
+
+void
+pmc_soft_finalize(struct pmc_mdep *md)
+{
+#ifdef INVARIANTS
+ int i, ncpus;
+
+ ncpus = pmc_cpu_max();
+ for (i = 0; i < ncpus; i++)
+ KASSERT(soft_pcpu[i] == NULL, ("[soft,%d] non-null pcpu cpu %d",
+ __LINE__, i));
+
+ KASSERT(md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_class ==
+ PMC_CLASS_SOFT, ("[soft,%d] class mismatch", __LINE__));
+#endif
+ free(soft_pcpu, M_PMC);
+ soft_pcpu = NULL;
+}
diff --git a/sys/dev/hwpmc/hwpmc_soft.h b/sys/dev/hwpmc/hwpmc_soft.h
new file mode 100644
index 0000000..f82baff
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_soft.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2012 Fabien Thomas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_SOFT_H_
+#define _DEV_HWPMC_SOFT_H_ 1
+
+#include <sys/pmckern.h>
+
+#ifdef _KERNEL
+
+#define PMC_CLASS_INDEX_SOFT 0
+#define SOFT_NPMCS 16
+
+/*
+ * Prototypes.
+ */
+
+void pmc_soft_initialize(struct pmc_mdep *md);
+void pmc_soft_finalize(struct pmc_mdep *md);
+int pmc_soft_intr(struct pmckern_soft *ks);
+
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_SOFT_H */
diff --git a/sys/dev/hwpmc/hwpmc_tsc.c b/sys/dev/hwpmc/hwpmc_tsc.c
index 0b71a5b..237b7a1 100644
--- a/sys/dev/hwpmc/hwpmc_tsc.c
+++ b/sys/dev/hwpmc/hwpmc_tsc.c
@@ -190,9 +190,6 @@ tsc_pcpu_fini(struct pmc_mdep *md, int cpu)
ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_TSC].pcd_ri;
- KASSERT(ri == 0 && ri < TSC_NPMCS, ("[tsc,%d] ri=%d", __LINE__,
- ri));
-
pc = pmc_pcpu[cpu];
pc->pc_hwpmcs[ri] = NULL;
diff --git a/sys/dev/hwpmc/hwpmc_x86.c b/sys/dev/hwpmc/hwpmc_x86.c
index 72ed518..e7485a4 100644
--- a/sys/dev/hwpmc/hwpmc_x86.c
+++ b/sys/dev/hwpmc/hwpmc_x86.c
@@ -48,6 +48,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
#include <vm/pmap.h>
+#include "hwpmc_soft.h"
+
/*
* Attempt to walk a user call stack using a too-simple algorithm.
* In the general case we need unwind information associated with
@@ -251,8 +253,11 @@ pmc_md_initialize()
/* disallow sampling if we do not have an LAPIC */
if (md != NULL && !lapic_enable_pmc())
- for (i = 1; i < md->pmd_nclass; i++)
+ for (i = 0; i < md->pmd_nclass; i++) {
+ if (i == PMC_CLASS_INDEX_SOFT)
+ continue;
md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT;
+ }
return (md);
}
diff --git a/sys/dev/hwpmc/hwpmc_xscale.c b/sys/dev/hwpmc/hwpmc_xscale.c
index 466f3b6..9b73337 100644
--- a/sys/dev/hwpmc/hwpmc_xscale.c
+++ b/sys/dev/hwpmc/hwpmc_xscale.c
@@ -638,11 +638,9 @@ pmc_xscale_initialize()
M_WAITOK|M_ZERO);
/* Just one class */
- pmc_mdep = malloc(sizeof(struct pmc_mdep) + sizeof(struct pmc_classdep),
- M_PMC, M_WAITOK|M_ZERO);
+ pmc_mdep = pmc_mdep_alloc(1);
pmc_mdep->pmd_cputype = PMC_CPU_INTEL_XSCALE;
- pmc_mdep->pmd_nclass = 1;
pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_XSCALE];
pcd->pcd_caps = XSCALE_PMC_CAPS;
diff --git a/sys/dev/hwpmc/pmc_events.h b/sys/dev/hwpmc/pmc_events.h
index 2397b17..98ad10f 100644
--- a/sys/dev/hwpmc/pmc_events.h
+++ b/sys/dev/hwpmc/pmc_events.h
@@ -2599,6 +2599,15 @@ __PMC_EV_ALIAS("SQ_MISC.SPLIT_LOCK", IAP_EVENT_F4H_10H)
#define PMC_EV_TSC_FIRST PMC_EV_TSC_TSC
#define PMC_EV_TSC_LAST PMC_EV_TSC_TSC
+/*
+ * Software events are dynamicaly defined.
+ */
+
+#define PMC_EV_DYN_COUNT 0x1000
+
+#define PMC_EV_SOFT_FIRST 0x20000
+#define PMC_EV_SOFT_LAST (PMC_EV_SOFT_FIRST + PMC_EV_DYN_COUNT - 1)
+
#define __PMC_EV_UCF() \
__PMC_EV(UCF, UCLOCK)
@@ -3716,6 +3725,7 @@ __PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
* 0x11100 0x0100 INTEL Pentium Pro/P-II/P-III/Pentium-M events
* 0x11200 0x00FF INTEL XScale events
* 0x11300 0x00FF MIPS 24K events
+ * 0x20000 0x1000 Software events
*/
#define __PMC_EVENTS() \
__PMC_EV_BLOCK(TSC, 0x01000) \
@@ -3748,6 +3758,6 @@ __PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
__PMC_EV_PPC7450() \
#define PMC_EVENT_FIRST PMC_EV_TSC_TSC
-#define PMC_EVENT_LAST PMC_EV_UCP_LAST
+#define PMC_EVENT_LAST PMC_EV_SOFT_LAST
#endif /* _DEV_HWPMC_PMC_EVENTS_H_ */
OpenPOWER on IntegriCloud