summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--gnu/usr.bin/groff/tmac/mdoc.local1
-rw-r--r--lib/Makefile4
-rw-r--r--lib/libpmc/Makefile43
-rw-r--r--lib/libpmc/libpmc.c2136
-rw-r--r--lib/libpmc/pmc.33090
-rw-r--r--lib/libpmc/pmc.h79
-rw-r--r--share/doc/papers/Makefile1
-rw-r--r--share/doc/papers/hwpmc/Makefile8
-rw-r--r--share/doc/papers/hwpmc/hwpmc.ms34
-rw-r--r--share/examples/hwpmc/README8
-rw-r--r--share/man/man4/Makefile1
-rw-r--r--share/man/man4/hwpmc.4583
-rw-r--r--share/mk/bsd.libnames.mk1
-rw-r--r--sys/amd64/include/pmc_mdep.h76
-rw-r--r--sys/conf/NOTES13
-rw-r--r--sys/conf/files5
-rw-r--r--sys/conf/files.amd641
-rw-r--r--sys/conf/files.i3862
-rw-r--r--sys/conf/options3
-rw-r--r--sys/dev/hwpmc/hwpmc_amd.c996
-rw-r--r--sys/dev/hwpmc/hwpmc_intel.c142
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c3671
-rw-r--r--sys/dev/hwpmc/hwpmc_pentium.c51
-rw-r--r--sys/dev/hwpmc/hwpmc_piv.c1484
-rw-r--r--sys/dev/hwpmc/hwpmc_ppro.c742
-rw-r--r--sys/hwpmc/hwpmc_amd.c996
-rw-r--r--sys/hwpmc/hwpmc_intel.c142
-rw-r--r--sys/hwpmc/hwpmc_mod.c3671
-rw-r--r--sys/hwpmc/hwpmc_pentium.c51
-rw-r--r--sys/hwpmc/hwpmc_piv.c1484
-rw-r--r--sys/hwpmc/hwpmc_ppro.c742
-rw-r--r--sys/i386/i386/local_apic.c7
-rw-r--r--sys/i386/include/pmc_mdep.h184
-rw-r--r--sys/kern/kern_exec.c22
-rw-r--r--sys/kern/kern_pmc.c82
-rw-r--r--sys/kern/sched_4bsd.c23
-rw-r--r--sys/kern/sched_ule.c23
-rw-r--r--sys/modules/Makefile1
-rw-r--r--sys/modules/hwpmc/Makefile21
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/pmc.h1418
-rw-r--r--sys/sys/pmckern.h93
-rw-r--r--sys/sys/proc.h2
-rw-r--r--sys/sys/sched.h1
-rw-r--r--usr.sbin/Makefile2
-rw-r--r--usr.sbin/pmccontrol/Makefile17
-rw-r--r--usr.sbin/pmccontrol/pmccontrol.8132
-rw-r--r--usr.sbin/pmccontrol/pmccontrol.c476
-rw-r--r--usr.sbin/pmcstat/Makefile17
-rw-r--r--usr.sbin/pmcstat/pmcstat.8196
-rw-r--r--usr.sbin/pmcstat/pmcstat.c728
51 files changed, 23703 insertions, 5 deletions
diff --git a/gnu/usr.bin/groff/tmac/mdoc.local b/gnu/usr.bin/groff/tmac/mdoc.local
index de03d09..55bfadf 100644
--- a/gnu/usr.bin/groff/tmac/mdoc.local
+++ b/gnu/usr.bin/groff/tmac/mdoc.local
@@ -48,6 +48,7 @@
.ds doc-str-Lb-libkiconv Kernel side iconv library (libkiconv, \-lkiconv)
.ds doc-str-Lb-libmd Message Digest (MD4, MD5, etc.) Support Library (libmd, \-lmd)
.ds doc-str-Lb-libnetgraph Netgraph User Library (libnetgraph, \-lnetgraph)
+.ds doc-str-Lb-libpmc Performance monitoring counters API (libpmc, \-lpmc)
.ds doc-str-Lb-librpcsvc RPC Service Library (librpcsvc, \-lrpcsvc)
.ds doc-str-Lb-libsdp Bluetooth Service Discovery Protocol User Library (libsdp, \-lsdp)
.ds doc-str-Lb-libthr 1:1 Threading Library (libthr, \-lthr)
diff --git a/lib/Makefile b/lib/Makefile
index 76f4f70..2720004 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -31,7 +31,7 @@ SUBDIR= ${_csu} libcom_err libcrypt libkvm msun libmd libncurses \
${_libio} libipsec \
libipx libkiconv libmagic libmenu ${_libmilter} ${_libmp} \
${_libncp} ${_libngatm} libopie libpam libpanel libpcap \
- ${_libpthread} ${_libsdp} ${_libsm} ${_libsmb} ${_libsmdb} \
+ ${_libpmc} ${_libpthread} ${_libsdp} ${_libsm} ${_libsmb} ${_libsmdb} \
${_libsmutil} libstand libtelnet ${_libthr} ${_libthread_db} libufs \
libugidfw ${_libusbhid} ${_libvgl} libwrap liby libz ${_bind}
@@ -59,6 +59,7 @@ _libsdp= libsdp
.if ${MACHINE_ARCH} == "i386"
_libncp= libncp
+_libpmc= libpmc
_libsmb= libsmb
_libvgl= libvgl
.endif
@@ -89,6 +90,7 @@ _libmp= libmp
.if ${MACHINE_ARCH} == "amd64"
_libncp= libncp
+_libpmc= libpmc
_libsmb= libsmb
.endif
diff --git a/lib/libpmc/Makefile b/lib/libpmc/Makefile
new file mode 100644
index 0000000..c6857da
--- /dev/null
+++ b/lib/libpmc/Makefile
@@ -0,0 +1,43 @@
+# $FreeBSD$
+
+LIB= pmc
+
+SRCS= libpmc.c
+INCS= pmc.h
+
+CFLAGS+= -I${.CURDIR} -I${.CURDIR}/../../sys
+
+WARNS?= 6
+
+MAN= pmc.3
+
+MLINKS+= \
+ pmc.3 pmc_allocate.3 \
+ pmc.3 pmc_attach.3 \
+ pmc.3 pmc_configure_logfile.3 \
+ pmc.3 pmc_cpuinfo.3 \
+ pmc.3 pmc_detach.3 \
+ pmc.3 pmc_disable.3 \
+ pmc.3 pmc_enable.3 \
+ pmc.3 pmc_event_names_of_class.3 \
+ pmc.3 pmc_get_driver_stats.3 \
+ pmc.3 pmc_init.3 \
+ pmc.3 pmc_name_of_capability.3 \
+ pmc.3 pmc_name_of_class.3 \
+ pmc.3 pmc_name_of_cputype.3 \
+ pmc.3 pmc_name_of_event.3 \
+ pmc.3 pmc_name_of_mode.3 \
+ pmc.3 pmc_name_of_state.3 \
+ pmc.3 pmc_ncpu.3 \
+ pmc.3 pmc_npmc.3 \
+ pmc.3 pmc_pmcinfo.3 \
+ pmc.3 pmc_read.3 \
+ pmc.3 pmc_release.3 \
+ pmc.3 pmc_rw.3 \
+ pmc.3 pmc_set.3 \
+ pmc.3 pmc_start.3 \
+ pmc.3 pmc_stop.3 \
+ pmc.3 pmc_write.3 \
+ pmc.3 pmc_x86_get_msr.3
+
+.include <bsd.lib.mk>
diff --git a/lib/libpmc/libpmc.c b/lib/libpmc/libpmc.c
new file mode 100644
index 0000000..925e3f9
--- /dev/null
+++ b/lib/libpmc/libpmc.c
@@ -0,0 +1,2136 @@
+/*-
+ * Copyright (c) 2003,2004 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/module.h>
+#include <sys/pmc.h>
+#include <sys/syscall.h>
+
+#include <machine/pmc_mdep.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pmc.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+
+/* Function prototypes */
+#if __i386__
+static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+#elif __amd64__
+static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+#endif
+
+#define PMC_CALL(cmd, params) \
+ syscall(pmc_syscall, PMC_OP_##cmd, (params))
+
+/*
+ * Event aliases provide a way for the user to ask for generic events
+ * like "cache-misses", or "instructions-retired". These aliases are
+ * mapped to the appropriate canonical event descriptions using a
+ * lookup table.
+ */
+
+struct pmc_event_alias {
+ const char *pm_alias;
+ const char *pm_spec;
+};
+
+static const struct pmc_event_alias *pmc_mdep_event_aliases;
+
+/*
+ * The pmc_event_descr table maps symbolic names known to the user
+ * to integer codes used by the PMC KLD.
+ */
+
+struct pmc_event_descr {
+ const char *pm_ev_name;
+ enum pmc_event pm_ev_code;
+ enum pmc_class pm_ev_class;
+};
+
+static const struct pmc_event_descr
+pmc_event_table[] =
+{
+#undef __PMC_EV
+#define __PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
+ __PMC_EVENTS()
+};
+
+/*
+ * Mapping tables, mapping enumeration values to human readable
+ * strings.
+ */
+
+static const char * pmc_capability_names[] = {
+#undef __PMC_CAP
+#define __PMC_CAP(N,V,D) #N ,
+ __PMC_CAPS()
+};
+
+static const char * pmc_class_names[] = {
+#undef __PMC_CLASS
+#define __PMC_CLASS(C) #C ,
+ __PMC_CLASSES()
+};
+
+static const char * pmc_cputype_names[] = {
+#undef __PMC_CPU
+#define __PMC_CPU(S, D) #S ,
+ __PMC_CPUS()
+};
+
+static const char * pmc_disposition_names[] = {
+#undef __PMC_DISP
+#define __PMC_DISP(D) #D ,
+ __PMC_DISPOSITIONS()
+};
+
+static const char * pmc_mode_names[] = {
+#undef __PMC_MODE
+#define __PMC_MODE(M,N) #M ,
+ __PMC_MODES()
+};
+
+static const char * pmc_state_names[] = {
+#undef __PMC_STATE
+#define __PMC_STATE(S) #S ,
+ __PMC_STATES()
+};
+
+static int pmc_syscall = -1; /* filled in by pmc_init() */
+
+struct pmc_op_getcpuinfo cpu_info; /* filled in by pmc_init() */
+
+/* Architecture dependent event parsing */
+static int (*pmc_mdep_allocate_pmc)(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+
+/* Event masks for events */
+struct pmc_masks {
+ const char *pm_name;
+ const uint32_t pm_value;
+};
+#define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) }
+#define NULLMASK PMCMASK(NULL,0)
+
+static int
+pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
+{
+ const struct pmc_masks *pm;
+ char *q, *r;
+ int c;
+
+ if (pmask == NULL) /* no mask keywords */
+ return -1;
+ q = strchr(p, '='); /* skip '=' */
+ if (*++q == '\0') /* no more data */
+ return -1;
+ c = 0; /* count of mask keywords seen */
+ while ((r = strsep(&q, "+")) != NULL) {
+ for (pm = pmask; pm->pm_name && strcmp(r, pm->pm_name); pm++)
+ ;
+ if (pm->pm_name == NULL) /* not found */
+ return -1;
+ *evmask |= pm->pm_value;
+ c++;
+ }
+ return c;
+}
+
+#define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0)
+#define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
+#define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S }
+
+#if __i386__
+
+/*
+ * AMD K7 (Athlon) CPUs.
+ */
+
+static struct pmc_event_alias k7_aliases[] = {
+EV_ALIAS("branches", "k7-retired-branches"),
+EV_ALIAS("branch-mispredicts", "k7-retired-branches-mispredicted"),
+EV_ALIAS("cycles", "tsc"),
+EV_ALIAS("dc-misses", "k7-dc-misses,mask=moesi"),
+EV_ALIAS("ic-misses", "k7-ic-misses"),
+EV_ALIAS("instructions", "k7-retired-instructions"),
+EV_ALIAS("interrupts", "k7-hardware-interrupts"),
+EV_ALIAS(NULL, NULL)
+};
+
+#define K7_KW_COUNT "count"
+#define K7_KW_EDGE "edge"
+#define K7_KW_INV "inv"
+#define K7_KW_OS "os"
+#define K7_KW_UNITMASK "unitmask"
+#define K7_KW_USR "usr"
+
+static int
+k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+ char *e, *p, *q;
+ int c, has_unitmask;
+ uint32_t count, unitmask;
+
+ pmc_config->pm_amd_config = 0;
+ pmc_config->pm_caps |= PMC_CAP_READ;
+
+ if (pe == PMC_EV_TSC_TSC) {
+ /* TSC events must be unqualified. */
+ if (ctrspec && *ctrspec != '\0')
+ return -1;
+ return 0;
+ }
+
+ if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
+ pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
+ pe == PMC_EV_K7_DC_WRITEBACKS) {
+ has_unitmask = 1;
+ unitmask = K7_PMC_UNITMASK_MOESI;
+ } else
+ unitmask = has_unitmask = 0;
+
+ pmc_config->pm_caps |= PMC_CAP_WRITE;
+
+ while ((p = strsep(&ctrspec, ",")) != NULL) {
+ if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+
+ count = strtol(q, &e, 0);
+ if (e == q || *e != '\0')
+ return -1;
+
+ pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
+ pmc_config->pm_amd_config |= K7_PMC_TO_COUNTER(count);
+
+ } else if (KWMATCH(p, K7_KW_EDGE)) {
+ pmc_config->pm_caps |= PMC_CAP_EDGE;
+ } else if (KWMATCH(p, K7_KW_INV)) {
+ pmc_config->pm_caps |= PMC_CAP_INVERT;
+ } else if (KWMATCH(p, K7_KW_OS)) {
+ pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+ } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
+ if (has_unitmask == 0)
+ return -1;
+ unitmask = 0;
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+
+ while ((c = tolower(*q++)) != 0)
+ if (c == 'm')
+ unitmask |= K7_PMC_UNITMASK_M;
+ else if (c == 'o')
+ unitmask |= K7_PMC_UNITMASK_O;
+ else if (c == 'e')
+ unitmask |= K7_PMC_UNITMASK_E;
+ else if (c == 's')
+ unitmask |= K7_PMC_UNITMASK_S;
+ else if (c == 'i')
+ unitmask |= K7_PMC_UNITMASK_I;
+ else if (c == '+')
+ continue;
+ else
+ return -1;
+
+ if (unitmask == 0)
+ return -1;
+
+ } else if (KWMATCH(p, K7_KW_USR)) {
+ pmc_config->pm_caps |= PMC_CAP_USER;
+ } else
+ return -1;
+ }
+
+ if (has_unitmask) {
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ pmc_config->pm_amd_config |=
+ K7_PMC_TO_UNITMASK(unitmask);
+ }
+
+ return 0;
+
+}
+
+/*
+ * Intel P4 PMCs
+ */
+
+static struct pmc_event_alias p4_aliases[] = {
+ EV_ALIAS("cycles", "tsc"),
+ EV_ALIAS(NULL, NULL)
+};
+
+#define P4_KW_ACTIVE "active"
+#define P4_KW_ACTIVE_ANY "any"
+#define P4_KW_ACTIVE_BOTH "both"
+#define P4_KW_ACTIVE_NONE "none"
+#define P4_KW_ACTIVE_SINGLE "single"
+#define P4_KW_BUSREQTYPE "busreqtype"
+#define P4_KW_CASCADE "cascade"
+#define P4_KW_EDGE "edge"
+#define P4_KW_INV "complement"
+#define P4_KW_OS "os"
+#define P4_KW_MASK "mask"
+#define P4_KW_PRECISE "precise"
+#define P4_KW_TAG "tag"
+#define P4_KW_THRESHOLD "threshold"
+#define P4_KW_USR "usr"
+
+#define __P4MASK(N,V) PMCMASK(N, (1 << (V)))
+
+static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
+ __P4MASK(dd, 0),
+ __P4MASK(db, 1),
+ __P4MASK(di, 2),
+ __P4MASK(bd, 3),
+ __P4MASK(bb, 4),
+ __P4MASK(bi, 5),
+ __P4MASK(id, 6),
+ __P4MASK(ib, 7),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
+ __P4MASK(tcmiss, 0),
+ NULLMASK,
+};
+
+static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
+ __P4MASK(hit, 0),
+ __P4MASK(miss, 1),
+ __P4MASK(hit-uc, 2),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
+ __P4MASK(st-rb-full, 2),
+ __P4MASK(64k-conf, 3),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
+ __P4MASK(lsc, 0),
+ __P4MASK(ssc, 1),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
+ __P4MASK(split-ld, 1),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
+ __P4MASK(split-st, 1),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
+ __P4MASK(no-sta, 1),
+ __P4MASK(no-std, 3),
+ __P4MASK(partial-data, 4),
+ __P4MASK(unalgn-addr, 5),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
+ __P4MASK(dtmiss, 0),
+ __P4MASK(itmiss, 1),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
+ __P4MASK(rd-2ndl-hits, 0),
+ __P4MASK(rd-2ndl-hite, 1),
+ __P4MASK(rd-2ndl-hitm, 2),
+ __P4MASK(rd-3rdl-hits, 3),
+ __P4MASK(rd-3rdl-hite, 4),
+ __P4MASK(rd-3rdl-hitm, 5),
+ __P4MASK(rd-2ndl-miss, 8),
+ __P4MASK(rd-3rdl-miss, 9),
+ __P4MASK(wr-2ndl-miss, 10),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
+ __P4MASK(all-read, 5),
+ __P4MASK(all-write, 6),
+ __P4MASK(mem-uc, 7),
+ __P4MASK(mem-wc, 8),
+ __P4MASK(mem-wt, 9),
+ __P4MASK(mem-wp, 10),
+ __P4MASK(mem-wb, 11),
+ __P4MASK(own, 13),
+ __P4MASK(other, 14),
+ __P4MASK(prefetch, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
+ __P4MASK(all-read, 5),
+ __P4MASK(all-write, 6),
+ __P4MASK(mem-uc, 7),
+ __P4MASK(mem-wc, 8),
+ __P4MASK(mem-wt, 9),
+ __P4MASK(mem-wp, 10),
+ __P4MASK(mem-wb, 11),
+ __P4MASK(own, 13),
+ __P4MASK(other, 14),
+ __P4MASK(prefetch, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
+ __P4MASK(drdy-drv, 0),
+ __P4MASK(drdy-own, 1),
+ __P4MASK(drdy-other, 2),
+ __P4MASK(dbsy-drv, 3),
+ __P4MASK(dbsy-own, 4),
+ __P4MASK(dbsy-other, 5),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
+ __P4MASK(req-type0, 0),
+ __P4MASK(req-type1, 1),
+ __P4MASK(req-len0, 2),
+ __P4MASK(req-len1, 3),
+ __P4MASK(req-io-type, 5),
+ __P4MASK(req-lock-type, 6),
+ __P4MASK(req-cache-type, 7),
+ __P4MASK(req-split-type, 8),
+ __P4MASK(req-dem-type, 9),
+ __P4MASK(req-ord-type, 10),
+ __P4MASK(mem-type0, 11),
+ __P4MASK(mem-type1, 12),
+ __P4MASK(mem-type2, 13),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
+ __P4MASK(all, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
+ __P4MASK(all, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
+ __P4MASK(all, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
+ __P4MASK(all, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
+ __P4MASK(all, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
+ __P4MASK(all, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
+ __P4MASK(all, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
+ __P4MASK(all, 15),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
+ __P4MASK(allp0, 3),
+ __P4MASK(allp2, 4),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
+ __P4MASK(running, 0),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
+ __P4MASK(cisc, 0),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
+ __P4MASK(from-tc-build, 0),
+ __P4MASK(from-tc-deliver, 1),
+ __P4MASK(from-rom, 2),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_rmbt[] = { /* retired mispred branch type */
+ __P4MASK(conditional, 1),
+ __P4MASK(call, 2),
+ __P4MASK(return, 3),
+ __P4MASK(indirect, 4),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
+ __P4MASK(conditional, 1),
+ __P4MASK(call, 2),
+ __P4MASK(retired, 3),
+ __P4MASK(indirect, 4),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
+ __P4MASK(sbfull, 5),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
+ __P4MASK(wcb-evicts, 0),
+ __P4MASK(wcb-full-evict, 1),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_fee[] = { /* front end event */
+ __P4MASK(nbogus, 0),
+ __P4MASK(bogus, 1),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_ee[] = { /* execution event */
+ __P4MASK(nbogus0, 0),
+ __P4MASK(nbogus1, 1),
+ __P4MASK(nbogus2, 2),
+ __P4MASK(nbogus3, 3),
+ __P4MASK(bogus0, 4),
+ __P4MASK(bogus1, 5),
+ __P4MASK(bogus2, 6),
+ __P4MASK(bogus3, 7),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_re[] = { /* replay event */
+ __P4MASK(nbogus, 0),
+ __P4MASK(bogus, 1),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
+ __P4MASK(nbogusntag, 0),
+ __P4MASK(nbogustag, 1),
+ __P4MASK(bogusntag, 2),
+ __P4MASK(bogustag, 3),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
+ __P4MASK(nbogus, 0),
+ __P4MASK(bogus, 1),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_ut[] = { /* uop type */
+ __P4MASK(tagloads, 1),
+ __P4MASK(tagstores, 2),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_br[] = { /* branch retired */
+ __P4MASK(mmnp, 0),
+ __P4MASK(mmnm, 1),
+ __P4MASK(mmtp, 2),
+ __P4MASK(mmtm, 3),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
+ __P4MASK(nbogus, 0),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
+ __P4MASK(fpsu, 0),
+ __P4MASK(fpso, 1),
+ __P4MASK(poao, 2),
+ __P4MASK(poau, 3),
+ __P4MASK(prea, 4),
+ NULLMASK
+};
+
+static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
+ __P4MASK(clear, 0),
+ __P4MASK(moclear, 2),
+ __P4MASK(smclear, 3),
+ NULLMASK
+};
+
+/* P4 event parser */
+static int
+p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+
+ char *e, *p, *q;
+ int count, has_tag, has_busreqtype, n;
+ uint32_t evmask, cccractivemask;
+ const struct pmc_masks *pm, *pmask;
+
+ pmc_config->pm_caps |= PMC_CAP_READ;
+ pmc_config->pm_p4_cccrconfig = pmc_config->pm_p4_escrconfig = 0;
+
+ if (pe == PMC_EV_TSC_TSC) {
+ /* TSC must not be further qualified */
+ if (ctrspec && *ctrspec != '\0')
+ return -1;
+ return 0;
+ }
+
+ pmask = NULL;
+ evmask = 0;
+ cccractivemask = 0x3;
+ has_tag = has_busreqtype = 0;
+ pmc_config->pm_caps |= PMC_CAP_WRITE;
+
+#define __P4SETMASK(M) do { \
+ pmask = p4_mask_##M; \
+} while (0)
+
+ switch (pe) {
+ case PMC_EV_P4_TC_DELIVER_MODE:
+ __P4SETMASK(tcdm);
+ break;
+ case PMC_EV_P4_BPU_FETCH_REQUEST:
+ __P4SETMASK(bfr);
+ break;
+ case PMC_EV_P4_ITLB_REFERENCE:
+ __P4SETMASK(ir);
+ break;
+ case PMC_EV_P4_MEMORY_CANCEL:
+ __P4SETMASK(memcan);
+ break;
+ case PMC_EV_P4_MEMORY_COMPLETE:
+ __P4SETMASK(memcomp);
+ break;
+ case PMC_EV_P4_LOAD_PORT_REPLAY:
+ __P4SETMASK(lpr);
+ break;
+ case PMC_EV_P4_STORE_PORT_REPLAY:
+ __P4SETMASK(spr);
+ break;
+ case PMC_EV_P4_MOB_LOAD_REPLAY:
+ __P4SETMASK(mlr);
+ break;
+ case PMC_EV_P4_PAGE_WALK_TYPE:
+ __P4SETMASK(pwt);
+ break;
+ case PMC_EV_P4_BSQ_CACHE_REFERENCE:
+ __P4SETMASK(bcr);
+ break;
+ case PMC_EV_P4_IOQ_ALLOCATION:
+ __P4SETMASK(ia);
+ has_busreqtype = 1;
+ break;
+ case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
+ __P4SETMASK(iae);
+ has_busreqtype = 1;
+ break;
+ case PMC_EV_P4_FSB_DATA_ACTIVITY:
+ __P4SETMASK(fda);
+ break;
+ case PMC_EV_P4_BSQ_ALLOCATION:
+ __P4SETMASK(ba);
+ break;
+ case PMC_EV_P4_SSE_INPUT_ASSIST:
+ __P4SETMASK(sia);
+ break;
+ case PMC_EV_P4_PACKED_SP_UOP:
+ __P4SETMASK(psu);
+ break;
+ case PMC_EV_P4_PACKED_DP_UOP:
+ __P4SETMASK(pdu);
+ break;
+ case PMC_EV_P4_SCALAR_SP_UOP:
+ __P4SETMASK(ssu);
+ break;
+ case PMC_EV_P4_SCALAR_DP_UOP:
+ __P4SETMASK(sdu);
+ break;
+ case PMC_EV_P4_64BIT_MMX_UOP:
+ __P4SETMASK(64bmu);
+ break;
+ case PMC_EV_P4_128BIT_MMX_UOP:
+ __P4SETMASK(128bmu);
+ break;
+ case PMC_EV_P4_X87_FP_UOP:
+ __P4SETMASK(xfu);
+ break;
+ case PMC_EV_P4_X87_SIMD_MOVES_UOP:
+ __P4SETMASK(xsmu);
+ break;
+ case PMC_EV_P4_GLOBAL_POWER_EVENTS:
+ __P4SETMASK(gpe);
+ break;
+ case PMC_EV_P4_TC_MS_XFER:
+ __P4SETMASK(tmx);
+ break;
+ case PMC_EV_P4_UOP_QUEUE_WRITES:
+ __P4SETMASK(uqw);
+ break;
+ case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
+ __P4SETMASK(rmbt);
+ break;
+ case PMC_EV_P4_RETIRED_BRANCH_TYPE:
+ __P4SETMASK(rbt);
+ break;
+ case PMC_EV_P4_RESOURCE_STALL:
+ __P4SETMASK(rs);
+ break;
+ case PMC_EV_P4_WC_BUFFER:
+ __P4SETMASK(wb);
+ break;
+ case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
+ case PMC_EV_P4_B2B_CYCLES:
+ case PMC_EV_P4_BNR:
+ case PMC_EV_P4_SNOOP:
+ case PMC_EV_P4_RESPONSE:
+ break;
+ case PMC_EV_P4_FRONT_END_EVENT:
+ __P4SETMASK(fee);
+ break;
+ case PMC_EV_P4_EXECUTION_EVENT:
+ __P4SETMASK(ee);
+ break;
+ case PMC_EV_P4_REPLAY_EVENT:
+ __P4SETMASK(re);
+ break;
+ case PMC_EV_P4_INSTR_RETIRED:
+ __P4SETMASK(insret);
+ break;
+ case PMC_EV_P4_UOPS_RETIRED:
+ __P4SETMASK(ur);
+ break;
+ case PMC_EV_P4_UOP_TYPE:
+ __P4SETMASK(ut);
+ break;
+ case PMC_EV_P4_BRANCH_RETIRED:
+ __P4SETMASK(br);
+ break;
+ case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
+ __P4SETMASK(mbr);
+ break;
+ case PMC_EV_P4_X87_ASSIST:
+ __P4SETMASK(xa);
+ break;
+ case PMC_EV_P4_MACHINE_CLEAR:
+ __P4SETMASK(machclr);
+ break;
+ default:
+ return -1;
+ }
+
+ /* process additional flags */
+ while ((p = strsep(&ctrspec, ",")) != NULL) {
+ if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+
+ if (strcmp(q, P4_KW_ACTIVE_NONE) == 0)
+ cccractivemask = 0x0;
+ else if (strcmp(q, P4_KW_ACTIVE_SINGLE) == 0)
+ cccractivemask = 0x1;
+ else if (strcmp(q, P4_KW_ACTIVE_BOTH) == 0)
+ cccractivemask = 0x2;
+ else if (strcmp(q, P4_KW_ACTIVE_ANY) == 0)
+ cccractivemask = 0x3;
+ else
+ return -1;
+
+ } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
+ if (has_busreqtype == 0)
+ return -1;
+
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+
+ count = strtol(q, &e, 0);
+ if (e == q || *e != '\0')
+ return -1;
+ evmask = (evmask & ~0x1F) | (count & 0x1F);
+ } else if (KWMATCH(p, P4_KW_CASCADE))
+ pmc_config->pm_caps |= PMC_CAP_CASCADE;
+ else if (KWMATCH(p, P4_KW_EDGE))
+ pmc_config->pm_caps |= PMC_CAP_EDGE;
+ else if (KWMATCH(p, P4_KW_INV))
+ pmc_config->pm_caps |= PMC_CAP_INVERT;
+ else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
+ if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
+ return -1;
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ } else if (KWMATCH(p, P4_KW_OS))
+ pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+ else if (KWMATCH(p, P4_KW_PRECISE))
+ pmc_config->pm_caps |= PMC_CAP_PRECISE;
+ else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
+ if (has_tag == 0)
+ return -1;
+
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+
+ count = strtol(q, &e, 0);
+ if (e == q || *e != '\0')
+ return -1;
+
+ pmc_config->pm_caps |= PMC_CAP_TAGGING;
+ pmc_config->pm_p4_escrconfig |=
+ P4_ESCR_TO_TAG_VALUE(count);
+ } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+
+ count = strtol(q, &e, 0);
+ if (e == q || *e != '\0')
+ return -1;
+
+ pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
+ pmc_config->pm_p4_cccrconfig &= ~P4_CCCR_THRESHOLD_MASK;
+ pmc_config->pm_p4_cccrconfig |= P4_CCCR_TO_THRESHOLD(count);
+ } else if (KWMATCH(p, P4_KW_USR))
+ pmc_config->pm_caps |= PMC_CAP_USER;
+ else
+ return -1;
+ }
+
+ /* other post processing */
+ if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
+ pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
+ pe == PMC_EV_P4_BSQ_ALLOCATION)
+ pmc_config->pm_caps |= PMC_CAP_EDGE;
+
+ /* fill in thread activity mask */
+ pmc_config->pm_p4_cccrconfig |=
+ P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
+
+ if (evmask)
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+
+ switch (pe) {
+ case PMC_EV_P4_FSB_DATA_ACTIVITY:
+ if ((evmask & 0x06) == 0x06 ||
+ (evmask & 0x18) == 0x18)
+ return -1; /* can't have own+other bits together */
+ if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
+ evmask = 0x1D;
+ break;
+ case PMC_EV_P4_MACHINE_CLEAR:
+ /* only one bit is allowed to be set */
+ if ((evmask & (evmask - 1)) != 0)
+ return -1;
+ if (evmask == 0) {
+ evmask = 0x1; /* 'CLEAR' */
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ }
+ break;
+ default:
+ if (evmask == 0 && pmask) {
+ for (pm = pmask; pm->pm_name; pm++)
+ evmask |= pm->pm_value;
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ }
+ }
+
+ pmc_config->pm_p4_escrconfig = P4_ESCR_TO_EVENT_MASK(evmask);
+
+ return 0;
+}
+
+/*
+ * Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III,
+ * and Pentium M CPUs.
+ */
+
+static struct pmc_event_alias p6_aliases[] = {
+EV_ALIAS("branches", "p6-br-inst-retired"),
+EV_ALIAS("branch-mispredicts", "p6-br-miss-pred-retired"),
+EV_ALIAS("cycles", "tsc"),
+EV_ALIAS("instructions", "p6-inst-retired"),
+EV_ALIAS("interrupts", "p6-hw-int-rx"),
+EV_ALIAS(NULL, NULL)
+};
+
+#define P6_KW_CMASK "cmask"
+#define P6_KW_EDGE "edge"
+#define P6_KW_INV "inv"
+#define P6_KW_OS "os"
+#define P6_KW_UMASK "umask"
+#define P6_KW_USR "usr"
+
+static struct pmc_masks p6_mask_mesi[] = {
+ PMCMASK(m, 0x01),
+ PMCMASK(e, 0x02),
+ PMCMASK(s, 0x04),
+ PMCMASK(i, 0x08),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_mesihw[] = {
+ PMCMASK(m, 0x01),
+ PMCMASK(e, 0x02),
+ PMCMASK(s, 0x04),
+ PMCMASK(i, 0x08),
+ PMCMASK(nonhw, 0x00),
+ PMCMASK(hw, 0x10),
+ PMCMASK(both, 0x30),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_hw[] = {
+ PMCMASK(nonhw, 0x00),
+ PMCMASK(hw, 0x10),
+ PMCMASK(both, 0x30),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_any[] = {
+ PMCMASK(self, 0x00),
+ PMCMASK(any, 0x20),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_ekp[] = {
+ PMCMASK(nta, 0x00),
+ PMCMASK(t1, 0x01),
+ PMCMASK(t2, 0x02),
+ PMCMASK(wos, 0x03),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_pps[] = {
+ PMCMASK(packed-and-scalar, 0x00),
+ PMCMASK(scalar, 0x01),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_mite[] = {
+ PMCMASK(packed-multiply, 0x01),
+ PMCMASK(packed-shift, 0x02),
+ PMCMASK(pack, 0x04),
+ PMCMASK(unpack, 0x08),
+ PMCMASK(packed-logical, 0x10),
+ PMCMASK(packed-arithmetic, 0x20),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_fmt[] = {
+ PMCMASK(mmxtofp, 0x00),
+ PMCMASK(fptommx, 0x01),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_sr[] = {
+ PMCMASK(es, 0x01),
+ PMCMASK(ds, 0x02),
+ PMCMASK(fs, 0x04),
+ PMCMASK(gs, 0x08),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_eet[] = {
+ PMCMASK(all, 0x00),
+ PMCMASK(freq, 0x02),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_efur[] = {
+ PMCMASK(all, 0x00),
+ PMCMASK(loadop, 0x01),
+ PMCMASK(stdsta, 0x02),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_essir[] = {
+ PMCMASK(sse-packed-single, 0x00),
+ PMCMASK(sse-packed-single-scalar-single, 0x01),
+ PMCMASK(sse2-packed-double, 0x02),
+ PMCMASK(sse2-scalar-double, 0x03),
+ NULLMASK
+};
+
+static struct pmc_masks p6_mask_esscir[] = {
+ PMCMASK(sse-packed-single, 0x00),
+ PMCMASK(sse-scalar-single, 0x01),
+ PMCMASK(sse2-packed-double, 0x02),
+ PMCMASK(sse2-scalar-double, 0x03),
+ NULLMASK
+};
+
+/* P6 event parser */
+static int
+p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+ char *e, *p, *q;
+ uint32_t evmask;
+ int count, n;
+ const struct pmc_masks *pm, *pmask;
+
+ pmc_config->pm_caps |= PMC_CAP_READ;
+ pmc_config->pm_p6_config = 0;
+
+ if (pe == PMC_EV_TSC_TSC) {
+ if (ctrspec && *ctrspec != '\0')
+ return -1;
+ return 0;
+ }
+
+ pmc_config->pm_caps |= PMC_CAP_WRITE;
+ evmask = 0;
+
+#define P6MASKSET(M) pmask = p6_mask_ ## M
+
+ switch(pe) {
+ case PMC_EV_P6_L2_IFETCH: P6MASKSET(mesi); break;
+ case PMC_EV_P6_L2_LD: P6MASKSET(mesi); break;
+ case PMC_EV_P6_L2_ST: P6MASKSET(mesi); break;
+ case PMC_EV_P6_L2_RQSTS: P6MASKSET(mesi); break;
+ case PMC_EV_P6_BUS_DRDY_CLOCKS:
+ case PMC_EV_P6_BUS_LOCK_CLOCKS:
+ case PMC_EV_P6_BUS_TRAN_BRD:
+ case PMC_EV_P6_BUS_TRAN_RFO:
+ case PMC_EV_P6_BUS_TRANS_WB:
+ case PMC_EV_P6_BUS_TRAN_IFETCH:
+ case PMC_EV_P6_BUS_TRAN_INVAL:
+ case PMC_EV_P6_BUS_TRAN_PWR:
+ case PMC_EV_P6_BUS_TRANS_P:
+ case PMC_EV_P6_BUS_TRANS_IO:
+ case PMC_EV_P6_BUS_TRAN_DEF:
+ case PMC_EV_P6_BUS_TRAN_BURST:
+ case PMC_EV_P6_BUS_TRAN_ANY:
+ case PMC_EV_P6_BUS_TRAN_MEM:
+ P6MASKSET(any); break;
+ case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
+ case PMC_EV_P6_EMON_KNI_PREF_MISS:
+ P6MASKSET(ekp); break;
+ case PMC_EV_P6_EMON_KNI_INST_RETIRED:
+ case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
+ P6MASKSET(pps); break;
+ case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
+ P6MASKSET(mite); break;
+ case PMC_EV_P6_FP_MMX_TRANS:
+ P6MASKSET(fmt); break;
+ case PMC_EV_P6_SEG_RENAME_STALLS:
+ case PMC_EV_P6_SEG_REG_RENAMES:
+ P6MASKSET(sr); break;
+ case PMC_EV_P6_EMON_EST_TRANS:
+ P6MASKSET(eet); break;
+ case PMC_EV_P6_EMON_FUSED_UOPS_RET:
+ P6MASKSET(efur); break;
+ case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
+ P6MASKSET(essir); break;
+ case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
+ P6MASKSET(esscir); break;
+ default:
+ pmask = NULL;
+ break;
+ }
+
+ /* Pentium M PMCs have a few events with different semantics */
+ if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
+ if (pe == PMC_EV_P6_L2_LD ||
+ pe == PMC_EV_P6_L2_LINES_IN ||
+ pe == PMC_EV_P6_L2_LINES_OUT)
+ P6MASKSET(mesihw);
+ else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
+ P6MASKSET(hw);
+ }
+
+ /* Parse additional modifiers if present */
+ while ((p = strsep(&ctrspec, ",")) != NULL) {
+ if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+ count = strtol(q, &e, 0);
+ if (e == q || *e != '\0')
+ return -1;
+ pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
+ pmc_config->pm_p6_config |= P6_EVSEL_TO_CMASK(count);
+ } else if (KWMATCH(p, P6_KW_EDGE)) {
+ pmc_config->pm_caps |= PMC_CAP_EDGE;
+ } else if (KWMATCH(p, P6_KW_INV)) {
+ pmc_config->pm_caps |= PMC_CAP_INVERT;
+ } else if (KWMATCH(p, P6_KW_OS)) {
+ pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+ } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
+ evmask = 0;
+ if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
+ return -1;
+ if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
+ pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
+ pe == PMC_EV_P6_BUS_TRAN_BRD ||
+ pe == PMC_EV_P6_BUS_TRAN_RFO ||
+ pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
+ pe == PMC_EV_P6_BUS_TRAN_INVAL ||
+ pe == PMC_EV_P6_BUS_TRAN_PWR ||
+ pe == PMC_EV_P6_BUS_TRAN_DEF ||
+ pe == PMC_EV_P6_BUS_TRAN_BURST ||
+ pe == PMC_EV_P6_BUS_TRAN_ANY ||
+ pe == PMC_EV_P6_BUS_TRAN_MEM ||
+ pe == PMC_EV_P6_BUS_TRANS_IO ||
+ pe == PMC_EV_P6_BUS_TRANS_P ||
+ pe == PMC_EV_P6_BUS_TRANS_WB ||
+ pe == PMC_EV_P6_EMON_EST_TRANS ||
+ pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
+ pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
+ pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
+ pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
+ pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
+ pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
+ pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
+ pe == PMC_EV_P6_FP_MMX_TRANS)
+ && (n > 1))
+ return -1; /* only one mask keyword allowed */
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ } else if (KWMATCH(p, P6_KW_USR)) {
+ pmc_config->pm_caps |= PMC_CAP_USER;
+ } else
+ return -1;
+ }
+
+ /* post processing */
+ switch (pe) {
+
+ /*
+ * The following events default to an evmask of 0
+ */
+
+ /* default => 'self' */
+ case PMC_EV_P6_BUS_DRDY_CLOCKS:
+ case PMC_EV_P6_BUS_LOCK_CLOCKS:
+ case PMC_EV_P6_BUS_TRAN_BRD:
+ case PMC_EV_P6_BUS_TRAN_RFO:
+ case PMC_EV_P6_BUS_TRANS_WB:
+ case PMC_EV_P6_BUS_TRAN_IFETCH:
+ case PMC_EV_P6_BUS_TRAN_INVAL:
+ case PMC_EV_P6_BUS_TRAN_PWR:
+ case PMC_EV_P6_BUS_TRANS_P:
+ case PMC_EV_P6_BUS_TRANS_IO:
+ case PMC_EV_P6_BUS_TRAN_DEF:
+ case PMC_EV_P6_BUS_TRAN_BURST:
+ case PMC_EV_P6_BUS_TRAN_ANY:
+ case PMC_EV_P6_BUS_TRAN_MEM:
+
+ /* default => 'nta' */
+ case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
+ case PMC_EV_P6_EMON_KNI_PREF_MISS:
+
+ /* default => 'packed and scalar' */
+ case PMC_EV_P6_EMON_KNI_INST_RETIRED:
+ case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
+
+ /* default => 'mmx to fp transitions' */
+ case PMC_EV_P6_FP_MMX_TRANS:
+
+ /* default => 'SSE Packed Single' */
+ case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
+ case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
+
+ /* default => 'all fused micro-ops' */
+ case PMC_EV_P6_EMON_FUSED_UOPS_RET:
+
+ /* default => 'all transitions' */
+ case PMC_EV_P6_EMON_EST_TRANS:
+ break;
+
+ case PMC_EV_P6_MMX_UOPS_EXEC:
+ evmask = 0x0F; /* only value allowed */
+ break;
+
+ default:
+
+ /*
+ * For all other events, set the default event mask
+ * to a logical OR of all the allowed event mask bits.
+ */
+
+ if (evmask == 0 && pmask) {
+ for (pm = pmask; pm->pm_name; pm++)
+ evmask |= pm->pm_value;
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ }
+
+ break;
+ }
+
+ if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
+ pmc_config->pm_p6_config |= P6_EVSEL_TO_UMASK(evmask);
+
+ return 0;
+}
+
+/*
+ * Pentium style PMCs
+ */
+
+static struct pmc_event_alias p5_aliases[] = {
+ EV_ALIAS("cycles", "tsc"),
+ EV_ALIAS(NULL, NULL)
+};
+
+static int
+p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+ return -1 || pe || ctrspec || pmc_config; /* shut up gcc */
+}
+
+#elif __amd64__
+
+/*
+ * AMD K8 PMCs.
+ *
+ * These are very similar to AMD K7 PMCs, but support more kinds of
+ * events.
+ */
+
+static struct pmc_event_alias k8_aliases[] = {
+ EV_ALIAS("cycles", "tsc"),
+ EV_ALIAS(NULL, NULL)
+};
+
+#define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
+
+/*
+ * Parsing tables
+ */
+
+/* fp dispatched fpu ops */
+static const struct pmc_masks k8_mask_fdfo[] = {
+ __K8MASK(add-pipe-excluding-junk-ops, 0),
+ __K8MASK(multiply-pipe-excluding-junk-ops, 1),
+ __K8MASK(store-pipe-excluding-junk-ops, 2),
+ __K8MASK(add-pipe-junk-ops, 3),
+ __K8MASK(multiply-pipe-junk-ops, 4),
+ __K8MASK(store-pipe-junk-ops, 5),
+ NULLMASK
+};
+
+/* ls segment register loads */
+static const struct pmc_masks k8_mask_lsrl[] = {
+ __K8MASK(es, 0),
+ __K8MASK(cs, 1),
+ __K8MASK(ss, 2),
+ __K8MASK(ds, 3),
+ __K8MASK(fs, 4),
+ __K8MASK(gs, 5),
+ __K8MASK(hs, 6),
+ NULLMASK
+};
+
+/* ls locked operation */
+static const struct pmc_masks k8_mask_llo[] = {
+ __K8MASK(locked-instructions, 0),
+ __K8MASK(cycles-in-request, 1),
+ __K8MASK(cycles-to-complete, 2),
+ NULLMASK
+};
+
+/* dc refill from {l2,system} and dc copyback */
+static const struct pmc_masks k8_mask_dc[] = {
+ __K8MASK(invalid, 0),
+ __K8MASK(shared, 1),
+ __K8MASK(exclusive, 2),
+ __K8MASK(owner, 3),
+ __K8MASK(modified, 4),
+ NULLMASK
+};
+
+/* dc one bit ecc error */
+static const struct pmc_masks k8_mask_dobee[] = {
+ __K8MASK(scrubber, 0),
+ __K8MASK(piggyback, 1),
+ NULLMASK
+};
+
+/* dc dispatched prefetch instructions */
+static const struct pmc_masks k8_mask_ddpi[] = {
+ __K8MASK(load, 0),
+ __K8MASK(store, 1),
+ __K8MASK(nta, 2),
+ NULLMASK
+};
+
+/* dc dcache accesses by locks */
+static const struct pmc_masks k8_mask_dabl[] = {
+ __K8MASK(accesses, 0),
+ __K8MASK(misses, 1),
+ NULLMASK
+};
+
+/* bu internal l2 request */
+static const struct pmc_masks k8_mask_bilr[] = {
+ __K8MASK(ic-fill, 0),
+ __K8MASK(dc-fill, 1),
+ __K8MASK(tlb-reload, 2),
+ __K8MASK(tag-snoop, 3),
+ __K8MASK(cancelled, 4),
+ NULLMASK
+};
+
+/* bu fill request l2 miss */
+static const struct pmc_masks k8_mask_bfrlm[] = {
+ __K8MASK(ic-fill, 0),
+ __K8MASK(dc-fill, 1),
+ __K8MASK(tlb-reload, 2),
+ NULLMASK
+};
+
+/* bu fill into l2 */
+static const struct pmc_masks k8_mask_bfil[] = {
+ __K8MASK(dirty-l2-victim, 0),
+ __K8MASK(victim-from-l2, 1),
+ NULLMASK
+};
+
+/* fr retired fpu instructions */
+static const struct pmc_masks k8_mask_frfi[] = {
+ __K8MASK(x87, 0),
+ __K8MASK(mmx-3dnow, 1),
+ __K8MASK(packed-sse-sse2, 2),
+ __K8MASK(scalar-sse-sse2, 3),
+ NULLMASK
+};
+
+/* fr retired fastpath double op instructions */
+static const struct pmc_masks k8_mask_frfdoi[] = {
+ __K8MASK(low-op-pos-0, 0),
+ __K8MASK(low-op-pos-1, 1),
+ __K8MASK(low-op-pos-2, 2),
+ NULLMASK
+};
+
+/* fr fpu exceptions */
+static const struct pmc_masks k8_mask_ffe[] = {
+ __K8MASK(x87-reclass-microfaults, 0),
+ __K8MASK(sse-retype-microfaults, 1),
+ __K8MASK(sse-reclass-microfaults, 2),
+ __K8MASK(sse-and-x87-microtraps, 3),
+ NULLMASK
+};
+
+/* nb memory controller page access event */
+static const struct pmc_masks k8_mask_nmcpae[] = {
+ __K8MASK(page-hit, 0),
+ __K8MASK(page-miss, 1),
+ __K8MASK(page-conflict, 2),
+ NULLMASK
+};
+
+/* nb memory controller turnaround */
+static const struct pmc_masks k8_mask_nmct[] = {
+ __K8MASK(dimm-turnaround, 0),
+ __K8MASK(read-to-write-turnaround, 1),
+ __K8MASK(write-to-read-turnaround, 2),
+ NULLMASK
+};
+
+/* nb memory controller bypass saturation */
+static const struct pmc_masks k8_mask_nmcbs[] = {
+ __K8MASK(memory-controller-hi-pri-bypass, 0),
+ __K8MASK(memory-controller-lo-pri-bypass, 1),
+ __K8MASK(dram-controller-interface-bypass, 2),
+ __K8MASK(dram-controller-queue-bypass, 3),
+ NULLMASK
+};
+
+/* nb sized commands */
+static const struct pmc_masks k8_mask_nsc[] = {
+ __K8MASK(nonpostwrszbyte, 0),
+ __K8MASK(nonpostwrszdword, 1),
+ __K8MASK(postwrszbyte, 2),
+ __K8MASK(postwrszdword, 3),
+ __K8MASK(rdszbyte, 4),
+ __K8MASK(rdszdword, 5),
+ __K8MASK(rdmodwr, 6),
+ NULLMASK
+};
+
+/* nb probe result */
+static const struct pmc_masks k8_mask_npr[] = {
+ __K8MASK(probe-miss, 0),
+ __K8MASK(probe-hit, 1),
+ __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
+ __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
+ NULLMASK
+};
+
+/* nb hypertransport bus bandwidth */
+static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
+ __K8MASK(command, 0),
+ __K8MASK(data, 1),
+ __K8MASK(buffer-release, 2),
+ __K8MASK(nop, 3),
+ NULLMASK
+};
+
+#undef __K8MASK
+
+#define K8_KW_COUNT "count"
+#define K8_KW_EDGE "edge"
+#define K8_KW_INV "inv"
+#define K8_KW_MASK "mask"
+#define K8_KW_OS "os"
+#define K8_KW_USR "usr"
+
+static int
+k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+ char *e, *p, *q;
+ int n;
+ uint32_t count, evmask;
+ const struct pmc_masks *pm, *pmask;
+
+ pmc_config->pm_caps |= PMC_CAP_READ;
+ pmc_config->pm_amd_config = 0;
+
+ if (pe == PMC_EV_TSC_TSC) {
+ /* TSC events must be unqualified. */
+ if (ctrspec && *ctrspec != '\0')
+ return -1;
+ return 0;
+ }
+
+ pmask = NULL;
+ evmask = 0;
+
+#define __K8SETMASK(M) pmask = k8_mask_##M
+
+ /* setup parsing tables */
+ switch (pe) {
+ case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
+ __K8SETMASK(fdfo);
+ break;
+ case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
+ __K8SETMASK(lsrl);
+ break;
+ case PMC_EV_K8_LS_LOCKED_OPERATION:
+ __K8SETMASK(llo);
+ break;
+ case PMC_EV_K8_DC_REFILL_FROM_L2:
+ case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
+ case PMC_EV_K8_DC_COPYBACK:
+ __K8SETMASK(dc);
+ break;
+ case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
+ __K8SETMASK(dobee);
+ break;
+ case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
+ __K8SETMASK(ddpi);
+ break;
+ case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
+ __K8SETMASK(dabl);
+ break;
+ case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
+ __K8SETMASK(bilr);
+ break;
+ case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
+ __K8SETMASK(bfrlm);
+ break;
+ case PMC_EV_K8_BU_FILL_INTO_L2:
+ __K8SETMASK(bfil);
+ break;
+ case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
+ __K8SETMASK(frfi);
+ break;
+ case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
+ __K8SETMASK(frfdoi);
+ break;
+ case PMC_EV_K8_FR_FPU_EXCEPTIONS:
+ __K8SETMASK(ffe);
+ break;
+ case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
+ __K8SETMASK(nmcpae);
+ break;
+ case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
+ __K8SETMASK(nmct);
+ break;
+ case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
+ __K8SETMASK(nmcbs);
+ break;
+ case PMC_EV_K8_NB_SIZED_COMMANDS:
+ __K8SETMASK(nsc);
+ break;
+ case PMC_EV_K8_NB_PROBE_RESULT:
+ __K8SETMASK(npr);
+ break;
+ case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
+ case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
+ case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
+ __K8SETMASK(nhbb);
+ break;
+
+ default:
+ break; /* no options defined */
+ }
+
+ pmc_config->pm_caps |= PMC_CAP_WRITE;
+
+ while ((p = strsep(&ctrspec, ",")) != NULL) {
+ if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+
+ count = strtol(q, &e, 0);
+ if (e == q || *e != '\0')
+ return -1;
+
+ pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
+ pmc_config->pm_amd_config |= K8_PMC_TO_COUNTER(count);
+
+ } else if (KWMATCH(p, K8_KW_EDGE)) {
+ pmc_config->pm_caps |= PMC_CAP_EDGE;
+ } else if (KWMATCH(p, K8_KW_INV)) {
+ pmc_config->pm_caps |= PMC_CAP_INVERT;
+ } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
+ if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
+ return -1;
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ } else if (KWMATCH(p, K8_KW_OS)) {
+ pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+ } else if (KWMATCH(p, K8_KW_USR)) {
+ pmc_config->pm_caps |= PMC_CAP_USER;
+ } else
+ return -1;
+ }
+
+ /* other post processing */
+
+ switch (pe) {
+ case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
+ case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
+ case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
+ case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
+ case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
+ case PMC_EV_K8_FR_FPU_EXCEPTIONS:
+ /* XXX only available in rev B and later */
+ break;
+ case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
+ /* XXX only available in rev C and later */
+ break;
+ case PMC_EV_K8_LS_LOCKED_OPERATION:
+ /* XXX CPU Rev A,B evmask is to be zero */
+ if (evmask & (evmask - 1)) /* > 1 bit set */
+ return -1;
+ if (evmask == 0) {
+ evmask = 0x01; /* Rev C and later: #instrs */
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ }
+ break;
+ default:
+ if (evmask == 0 && pmask != NULL) {
+ for (pm = pmask; pm->pm_name; pm++)
+ evmask |= pm->pm_value;
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ }
+ }
+
+ if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
+ pmc_config->pm_amd_config = K8_PMC_TO_UNITMASK(evmask);
+
+ return 0;
+}
+#endif
+
+/*
+ * API entry points
+ */
+
+int
+pmc_init(void)
+{
+ int error, pmc_mod_id;
+ uint32_t abi_version;
+ struct module_stat pmc_modstat;
+
+ if (pmc_syscall != -1) /* already inited */
+ return 0;
+
+ /* retrieve the system call number from the KLD */
+ if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
+ return -1;
+
+ pmc_modstat.version = sizeof(struct module_stat);
+ if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
+ return -1;
+
+ pmc_syscall = pmc_modstat.data.intval;
+
+ /* check ABI version against compiled-in version */
+ if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
+ return (pmc_syscall = -1);
+
+ /* ignore patch numbers for the comparision */
+ if ((abi_version & 0xFFFF0000) != (PMC_VERSION & 0xFFFF0000)) {
+ errno = EPROGMISMATCH;
+ return (pmc_syscall = -1);
+ }
+
+ if (PMC_CALL(GETCPUINFO, &cpu_info) < 0)
+ return (pmc_syscall = -1);
+
+ /* set parser pointer */
+ switch (cpu_info.pm_cputype) {
+#if __i386__
+ case PMC_CPU_AMD_K7:
+ pmc_mdep_event_aliases = k7_aliases;
+ pmc_mdep_allocate_pmc = k7_allocate_pmc;
+ break;
+ case PMC_CPU_INTEL_P5:
+ pmc_mdep_event_aliases = p5_aliases;
+ pmc_mdep_allocate_pmc = p5_allocate_pmc;
+ break;
+ case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */
+ case PMC_CPU_INTEL_PII: /* similar PMCs. */
+ case PMC_CPU_INTEL_PIII:
+ case PMC_CPU_INTEL_PM:
+ pmc_mdep_event_aliases = p6_aliases;
+ pmc_mdep_allocate_pmc = p6_allocate_pmc;
+ break;
+ case PMC_CPU_INTEL_PIV:
+ pmc_mdep_event_aliases = p4_aliases;
+ pmc_mdep_allocate_pmc = p4_allocate_pmc;
+ break;
+#elif __amd64__
+ case PMC_CPU_AMD_K8:
+ pmc_mdep_event_aliases = k8_aliases;
+ pmc_mdep_allocate_pmc = k8_allocate_pmc;
+ break;
+#endif
+
+ default:
+ /*
+ * Some kind of CPU this version of the library knows nothing
+ * about. This shouldn't happen since the abi version check
+ * should have caught this.
+ */
+ errno = ENXIO;
+ return (pmc_syscall = -1);
+ }
+
+ return 0;
+}
+
+int
+pmc_allocate(const char *ctrspec, enum pmc_mode mode,
+ uint32_t flags, int cpu, pmc_id_t *pmcid)
+{
+ int retval;
+ enum pmc_event pe;
+ char *r, *spec_copy;
+ const char *ctrname;
+ const struct pmc_event_alias *p;
+ struct pmc_op_pmcallocate pmc_config;
+
+ spec_copy = NULL;
+ retval = -1;
+
+ if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
+ mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
+ errno = EINVAL;
+ goto out;
+ }
+
+ /* replace an event alias with the canonical event specifier */
+ if (pmc_mdep_event_aliases)
+ for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
+ if (!strcmp(ctrspec, p->pm_alias)) {
+ spec_copy = strdup(p->pm_spec);
+ break;
+ }
+
+ if (spec_copy == NULL)
+ spec_copy = strdup(ctrspec);
+
+ r = spec_copy;
+ ctrname = strsep(&r, ",");
+
+ /* look for the given counter name */
+
+ for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
+ if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
+ break;
+
+ if (pe > PMC_EVENT_LAST) {
+ errno = EINVAL;
+ goto out;
+ }
+
+ bzero(&pmc_config, sizeof(pmc_config));
+ pmc_config.pm_ev = pmc_event_table[pe].pm_ev_code;
+ pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
+ pmc_config.pm_cpu = cpu;
+ pmc_config.pm_mode = mode;
+ pmc_config.pm_flags = flags;
+
+ if (PMC_IS_SAMPLING_MODE(mode))
+ pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
+
+ if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
+ errno = EINVAL;
+ goto out;
+ }
+
+ if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
+ goto out;
+
+ *pmcid = pmc_config.pm_pmcid;
+
+ retval = 0;
+
+ out:
+ if (spec_copy)
+ free(spec_copy);
+
+ return retval;
+}
+
+int
+pmc_attach(pmc_id_t pmc, pid_t pid)
+{
+ struct pmc_op_pmcattach pmc_attach_args;
+
+ pmc_attach_args.pm_pmc = pmc;
+ pmc_attach_args.pm_pid = pid;
+
+ return PMC_CALL(PMCATTACH, &pmc_attach_args);
+}
+
+int
+pmc_detach(pmc_id_t pmc, pid_t pid)
+{
+ struct pmc_op_pmcattach pmc_detach_args;
+
+ pmc_detach_args.pm_pmc = pmc;
+ pmc_detach_args.pm_pid = pid;
+
+ return PMC_CALL(PMCDETACH, &pmc_detach_args);
+}
+
+int
+pmc_release(pmc_id_t pmc)
+{
+ struct pmc_op_simple pmc_release_args;
+
+ pmc_release_args.pm_pmcid = pmc;
+
+ return PMC_CALL(PMCRELEASE, &pmc_release_args);
+}
+
+int
+pmc_start(pmc_id_t pmc)
+{
+ struct pmc_op_simple pmc_start_args;
+
+ pmc_start_args.pm_pmcid = pmc;
+ return PMC_CALL(PMCSTART, &pmc_start_args);
+}
+
+int
+pmc_stop(pmc_id_t pmc)
+{
+ struct pmc_op_simple pmc_stop_args;
+
+ pmc_stop_args.pm_pmcid = pmc;
+ return PMC_CALL(PMCSTOP, &pmc_stop_args);
+}
+
+int
+pmc_read(pmc_id_t pmc, pmc_value_t *value)
+{
+ struct pmc_op_pmcrw pmc_read_op;
+
+ pmc_read_op.pm_pmcid = pmc;
+ pmc_read_op.pm_flags = PMC_F_OLDVALUE;
+ pmc_read_op.pm_value = -1;
+
+ if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
+ return -1;
+
+ *value = pmc_read_op.pm_value;
+
+ return 0;
+}
+
+int
+pmc_write(pmc_id_t pmc, pmc_value_t value)
+{
+ struct pmc_op_pmcrw pmc_write_op;
+
+ pmc_write_op.pm_pmcid = pmc;
+ pmc_write_op.pm_flags = PMC_F_NEWVALUE;
+ pmc_write_op.pm_value = value;
+
+ return PMC_CALL(PMCRW, &pmc_write_op);
+}
+
+int
+pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
+{
+ struct pmc_op_pmcrw pmc_rw_op;
+
+ pmc_rw_op.pm_pmcid = pmc;
+ pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
+ pmc_rw_op.pm_value = newvalue;
+
+ if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
+ return -1;
+
+ *oldvaluep = pmc_rw_op.pm_value;
+
+ return 0;
+}
+
+int
+pmc_set(pmc_id_t pmc, pmc_value_t value)
+{
+ struct pmc_op_pmcsetcount sc;
+
+ sc.pm_pmcid = pmc;
+ sc.pm_count = value;
+
+ if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
+ return -1;
+
+ return 0;
+
+}
+
+int
+pmc_configure_logfile(int fd)
+{
+ struct pmc_op_configurelog cla;
+
+ cla.pm_logfd = fd;
+ if (PMC_CALL(CONFIGURELOG, &cla) < 0)
+ return -1;
+
+ return 0;
+}
+
+int
+pmc_get_driver_stats(struct pmc_op_getdriverstats *gms)
+{
+ return PMC_CALL(GETDRIVERSTATS, gms);
+}
+
+int
+pmc_ncpu(void)
+{
+ if (pmc_syscall == -1) {
+ errno = ENXIO;
+ return -1;
+ }
+
+ return cpu_info.pm_ncpu;
+}
+
+int
+pmc_npmc(int cpu)
+{
+ if (pmc_syscall == -1) {
+ errno = ENXIO;
+ return -1;
+ }
+
+ if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ return cpu_info.pm_npmc;
+}
+
+int
+pmc_enable(int cpu, int pmc)
+{
+ struct pmc_op_pmcadmin ssa;
+
+ ssa.pm_cpu = cpu;
+ ssa.pm_pmc = pmc;
+ ssa.pm_state = PMC_STATE_FREE;
+ return PMC_CALL(PMCADMIN, &ssa);
+}
+
+int
+pmc_disable(int cpu, int pmc)
+{
+ struct pmc_op_pmcadmin ssa;
+
+ ssa.pm_cpu = cpu;
+ ssa.pm_pmc = pmc;
+ ssa.pm_state = PMC_STATE_DISABLED;
+ return PMC_CALL(PMCADMIN, &ssa);
+}
+
+
+int
+pmc_pmcinfo(int cpu, struct pmc_op_getpmcinfo **ppmci)
+{
+ int nbytes, npmc, saved_errno;
+ struct pmc_op_getpmcinfo *pmci;
+
+ if ((npmc = pmc_npmc(cpu)) < 0)
+ return -1;
+
+ nbytes = sizeof(struct pmc_op_getpmcinfo) +
+ npmc * sizeof(struct pmc_info);
+
+ if ((pmci = calloc(1, nbytes)) == NULL)
+ return -1;
+
+ pmci->pm_cpu = cpu;
+
+ if (PMC_CALL(GETPMCINFO, pmci) < 0) {
+ saved_errno = errno;
+ free(pmci);
+ errno = saved_errno;
+ return -1;
+ }
+
+ *ppmci = pmci;
+ return 0;
+}
+
+int
+pmc_cpuinfo(const struct pmc_op_getcpuinfo **pci)
+{
+ if (pmc_syscall == -1) {
+ errno = ENXIO;
+ return -1;
+ }
+
+ *pci = &cpu_info;
+ return 0;
+}
+
+const char *
+pmc_name_of_cputype(enum pmc_cputype cp)
+{
+ if ((int) cp >= PMC_CPU_FIRST &&
+ cp <= PMC_CPU_LAST)
+ return pmc_cputype_names[cp];
+ errno = EINVAL;
+ return NULL;
+}
+
+const char *
+pmc_name_of_class(enum pmc_class pc)
+{
+ if ((int) pc >= PMC_CLASS_FIRST &&
+ pc <= PMC_CLASS_LAST)
+ return pmc_class_names[pc];
+
+ errno = EINVAL;
+ return NULL;
+}
+
+const char *
+pmc_name_of_mode(enum pmc_mode pm)
+{
+ if ((int) pm >= PMC_MODE_FIRST &&
+ pm <= PMC_MODE_LAST)
+ return pmc_mode_names[pm];
+
+ errno = EINVAL;
+ return NULL;
+}
+
+const char *
+pmc_name_of_event(enum pmc_event pe)
+{
+ if ((int) pe >= PMC_EVENT_FIRST &&
+ pe <= PMC_EVENT_LAST)
+ return pmc_event_table[pe].pm_ev_name;
+
+ errno = EINVAL;
+ return NULL;
+}
+
+const char *
+pmc_name_of_state(enum pmc_state ps)
+{
+ if ((int) ps >= PMC_STATE_FIRST &&
+ ps <= PMC_STATE_LAST)
+ return pmc_state_names[ps];
+
+ errno = EINVAL;
+ return NULL;
+}
+
+const char *
+pmc_name_of_disposition(enum pmc_disp pd)
+{
+ if ((int) pd >= PMC_DISP_FIRST &&
+ pd <= PMC_DISP_LAST)
+ return pmc_disposition_names[pd];
+
+ errno = EINVAL;
+ return NULL;
+}
+
+const char *
+pmc_name_of_capability(enum pmc_caps cap)
+{
+ int i;
+
+ /*
+ * 'cap' should have a single bit set and should be in
+ * range.
+ */
+
+ if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
+ cap > PMC_CAP_LAST) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ i = ffs(cap);
+
+ return pmc_capability_names[i - 1];
+}
+
+/*
+ * Return a list of events known to a given PMC class. 'cl' is the
+ * PMC class identifier, 'eventnames' is the returned list of 'const
+ * char *' pointers pointing to the names of the events. 'nevents' is
+ * the number of event name pointers returned.
+ *
+ * The space for 'eventnames' is allocated using malloc(3). The caller
+ * is responsible for freeing this space when done.
+ */
+
+int
+pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
+ int *nevents)
+{
+ int count;
+ const char **names;
+ const struct pmc_event_descr *ev;
+
+ switch (cl)
+ {
+ case PMC_CLASS_TSC:
+ ev = &pmc_event_table[PMC_EV_TSC_TSC];
+ count = 1;
+ break;
+ case PMC_CLASS_K7:
+ ev = &pmc_event_table[PMC_EV_K7_FIRST];
+ count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
+ break;
+ case PMC_CLASS_K8:
+ ev = &pmc_event_table[PMC_EV_K8_FIRST];
+ count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
+ break;
+ case PMC_CLASS_P5:
+ ev = &pmc_event_table[PMC_EV_P5_FIRST];
+ count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
+ break;
+ case PMC_CLASS_P6:
+ ev = &pmc_event_table[PMC_EV_P6_FIRST];
+ count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
+ break;
+ case PMC_CLASS_P4:
+ ev = &pmc_event_table[PMC_EV_P4_FIRST];
+ count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
+ break;
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+
+ if ((names = malloc(count * sizeof(const char *))) == NULL)
+ return -1;
+
+ *eventnames = names;
+ *nevents = count;
+
+ for (;count--; ev++, names++)
+ *names = ev->pm_ev_name;
+ return 0;
+}
+
+/*
+ * Architecture specific APIs
+ */
+
+#if __i386__ || __amd64__
+
+int
+pmc_x86_get_msr(pmc_id_t pmc, uint32_t *msr)
+{
+ struct pmc_op_x86_getmsr gm;
+
+ gm.pm_pmcid = pmc;
+ if (PMC_CALL(PMCX86GETMSR, &gm) < 0)
+ return -1;
+ *msr = gm.pm_msr;
+ return 0;
+}
+
+#endif
diff --git a/lib/libpmc/pmc.3 b/lib/libpmc/pmc.3
new file mode 100644
index 0000000..2fce168
--- /dev/null
+++ b/lib/libpmc/pmc.3
@@ -0,0 +1,3090 @@
+.\" Copyright (c) 2003 Joseph Koshy. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" This software is provided by Joseph Koshy ``as is'' and
+.\" any express or implied warranties, including, but not limited to, the
+.\" implied warranties of merchantability and fitness for a particular purpose
+.\" are disclaimed. in no event shall Joseph Koshy be liable
+.\" for any direct, indirect, incidental, special, exemplary, or consequential
+.\" damages (including, but not limited to, procurement of substitute goods
+.\" or services; loss of use, data, or profits; or business interruption)
+.\" however caused and on any theory of liability, whether in contract, strict
+.\" liability, or tort (including negligence or otherwise) arising in any way
+.\" out of the use of this software, even if advised of the possibility of
+.\" such damage.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd Apr 15, 2005
+.Os
+.Dt PMC 3
+.Sh NAME
+.Nm pmc_allocate ,
+.Nm pmc_attach ,
+.Nm pmc_configure_logfile ,
+.Nm pmc_cpuinfo ,
+.Nm pmc_detach ,
+.Nm pmc_disable ,
+.Nm pmc_enable ,
+.Nm pmc_event_names_of_class ,
+.Nm pmc_get_driver_stats ,
+.Nm pmc_init ,
+.Nm pmc_name_of_capability ,
+.Nm pmc_name_of_class ,
+.Nm pmc_name_of_cputype ,
+.Nm pmc_name_of_event ,
+.Nm pmc_name_of_mode ,
+.Nm pmc_name_of_state ,
+.Nm pmc_ncpu ,
+.Nm pmc_npmc ,
+.Nm pmc_pmcinfo ,
+.Nm pmc_read ,
+.Nm pmc_release ,
+.Nm pmc_rw ,
+.Nm pmc_set ,
+.Nm pmc_start ,
+.Nm pmc_stop ,
+.Nm pmc_write ,
+.Nm pmc_x86_get_msr
+.Nd programming API for using hardware performance monitoring counters
+.Sh LIBRARY
+.Lb libpmc
+.Sh SYNOPSIS
+.In pmc.h
+.Ft int
+.Fo pmc_allocate
+.Fa "const char *eventspecifier"
+.Fa "enum pmc_mode mode"
+.Fa "uint32_t flags"
+.Fa "uint32_t cpu"
+.Fa "pmc_id_t *pmcid"
+.Fc
+.Ft int
+.Fo pmc_attach
+.Fa "pmc_id_t pmcid"
+.Fa "pid_t pid"
+.Fc
+.Ft int
+.Fn pmc_configure_logfile "int fd"
+.Ft int
+.Fn pmc_cpuinfo "const struct pmc_op_getcpuinfo **cpu_info"
+.Ft int
+.Fo pmc_detach
+.Fa "pmc_id_t pmcid"
+.Fa "pid_t pid"
+.Fc
+.Ft int
+.Fn pmc_disable "uint32_t cpu" "int pmc"
+.Ft int
+.Fn pmc_enable "uint32_t cpu" "int pmc"
+.Ft int
+.Fo pmc_event_names_of_class
+.Fa "enum pmc_class cl"
+.Fa "const char ***eventnames"
+.Fa "int *nevents"
+.Fc
+.Ft int
+.Fn pmc_get_driver_stats "struct pmc_op_getdriverstats *gms"
+.Ft int
+.Fn pmc_init "void"
+.Ft "const char *"
+.Fn pmc_name_of_capability "enum pmc_caps pc"
+.Ft "const char *"
+.Fn pmc_name_of_class "enum pmc_class pc"
+.Ft "const char *"
+.Fn pmc_name_of_cputype "enum pmc_cputype ct"
+.Ft "const char *"
+.Fn pmc_name_of_disposition "enum pmc_disp pd"
+.Ft "const char *"
+.Fn pmc_name_of_event "enum pmc_event pe"
+.Ft "const char *"
+.Fn pmc_name_of_mode "enum pmc_mode pm"
+.Ft "const char *"
+.Fn pmc_name_of_state "enum pmc_state ps"
+.Ft int
+.Fn pmc_ncpu "void"
+.Ft int
+.Fn pmc_npmc "uint32_t cpu"
+.Ft int
+.Fn pmc_pmcinfo "uint32_t cpu" "struct pmc_op_getpmcinfo **pmc_info"
+.Ft int
+.Fn pmc_read "pmc_id_t pmc" "pmc_value_t *value"
+.Ft int
+.Fn pmc_release "pmc_id_t pmc"
+.Ft int
+.Fn pmc_rw "pmc_id_t pmc" "pmc_value_t newvalue" "pmc_value_t *oldvaluep"
+.Ft int
+.Fn pmc_set "pmc_id_t pmc" "pmc_value_t value"
+.Ft int
+.Fn pmc_start "pmc_id_t pmc"
+.Ft int
+.Fn pmc_stop "pmc_id_t pmc"
+.Ft int
+.Fn pmc_write "pmc_id_t pmc" "pmc_value_t value"
+.Ft int
+.Fn pmc_x86_get_msr "int pmc" "uint32_t *msr"
+.Sh DESCRIPTION
+These functions implement a high-level library for using the
+system's hardware performance counters.
+.Pp
+PMCs are allocated using
+.Fn pmc_allocate ,
+released using
+.Fn pmc_release
+and read using
+.Fn pmc_read .
+Allocated PMCs may be started or stopped at any time using
+.Fn pmc_start
+and
+.Fn pmc_stop
+respectively.
+An allocated PMC may be of
+.Qq global
+scope, meaning that the PMC measures system-wide events, or
+.Qq process-private
+scope, meaning that the PMC only counts hardware events when
+the allocating process (or, optionally, its children)
+are active.
+.Pp
+PMCs may further be in
+.Qq "counting mode" ,
+or in
+.Qq "sampling mode" .
+Sampling mode PMCs deliver an interrupt to the CPU after
+a configured number of hardware events have been seen.
+A process-private sampling mode PMC will cause its owner
+process to get periodic
+.Sy SIGPROF
+interrupts, while a global sampling mode PMC is used to
+do system-wide statistical sampling (see
+.Xr hwpmc 4 ) .
+The sampling rate desired of a sampling-mode PMC is set using
+.Fn pmc_set .
+Counting mode PMCs do not interrupt the CPU; their values
+can be read using
+.Fn pmc_read .
+.Pp
+System-wide statistical sampling is configured by allocating
+at least one sampling mode PMC with
+global scope, and when a log file is configured using
+.Fn pmc_configure_logfile .
+The
+.Xr hwpmc 4
+driver manages system-wide statistical sampling; for more
+information please see
+.Xr hwpmc 4 .
+.Ss APPLICATION PROGRAMMING INTERFACE
+.Fn pmc_init
+initializes the
+.Xr pmc 3
+library.
+This function must be called first, before any of the other
+functions in the library.
+.Pp
+.Fn pmc_allocate
+allocates a counter that counts the events named by
+.Fa eventspecifier ,
+and writes the allocated counter id to
+.Fa *pmcid .
+Argument
+.Fa eventspecifier
+comprises an PMC event name followed by an optional comma separated
+list of keywords and qualifiers.
+The allowed syntax for
+.Fa eventspecifier
+is processor architecture specific and is listed in section
+.Sx "EVENT SPECIFIERS"
+below.
+The desired PMC mode is specified by
+.Fa mode ,
+and any mode specific modifiers are specified using
+.Fa flags .
+The
+.Fa cpu
+argument is the value
+.Li PMC_CPU_ANY ,
+or names the cpu the allocation is to be on.
+Requesting a specific CPU makes only makes sense for global PMCs;
+process-private PMC allocations should always specify
+.Li PMC_CPU_ANY .
+.Pp
+By default a PMC configured in process-virtual counting mode is setup
+to profile its owner process.
+The function
+.Fn pmc_attach
+may be used to attach the PMC to a different process.
+.Fn pmc_attach
+needs to be called before the counter is first started
+with
+.Fn pmc_start .
+The function
+.Fn pmc_detach
+may be used to detach a PMC from a process it was attached to
+using a prior call to
+.Fn pmc_attach .
+.Pp
+.Fn pmc_release
+releases a PMC previously allocated with
+.Fn pmc_allocate .
+This function call implicitly detaches the PMC from all its target
+processes.
+.Pp
+An allocated PMC may be started and stopped using
+.Fn pmc_start
+and
+.Fn pmc_stop
+respectively.
+.Pp
+The current value of a PMC may be read with
+.Fn pmc_read
+and written using
+.Fn pmc_write ,
+provided the underlying hardware supports these operations on
+the allocated PMC.
+The read and write operation may be combined using
+.Fn pmc_rw .
+.Pp
+The
+.Fn pmc_configure_logfile
+function causes the
+.Xr hwpmc 4
+driver to log system wide performance data to file corresponding
+to the process' file handle
+.Fa fd .
+.Pp
+.Fn pmc_set
+configures an sampling PMC
+.Fa pmc
+to interrupt every
+.Fa value
+events.
+For counting PMCs,
+.Fn pmc_set
+sets the initial value of the PMC to
+.Fa value .
+.Pp
+.Fn pmc_get_driver_statistics
+copies a snapshot of the usage statistics maintained by
+.Xr hwpmc 4
+into the memory area pointed to be argument
+.Fa gms .
+.Ss SIGNAL HANDLING REQUIREMENTS
+Applications using PMCs are required to handle the following signals:
+.Bl -tag -width indent
+.It SIGBUS
+When the
+.Xr hwpmc 4
+module is unloaded using
+.Xr kldunload 8 ,
+processes that have PMCs allocated to them will be sent a
+SIGBUS signal.
+.It SIGIO
+Attempting to read a PMC that is not currently attached to a running
+process will cause a SIGIO signal to be sent to the reader.
+.El
+.Ss CONVENIENCE FUNCTIONS
+.Fn pmc_ncpu
+returns the number of CPUs present in the system.
+.Pp
+.Fn pmc_npmc
+returns the number of PMCs supported on CPU
+.Fa cpu .
+.Fn pmc_cpuinfo
+sets argument
+.Fa cpu_info
+to point to a structure with information about the system's CPUs.
+.Fn pmc_pmcinfo
+returns information about the current state of CPU
+.Fa cpu Ap s
+PMCs.
+.Pp
+The functions
+.Fn pmc_name_of_capability ,
+.Fn pmc_name_of_class ,
+.Fn pmc_name_of_cputype ,
+.Fn pmc_name_of_disposition ,
+.Fn pmc_name_of_event ,
+.Fn pmc_name_of_mode
+and
+.Fn pmc_name_of_state
+are useful for code wanting to print error messages.
+They return
+.Ft "const char *"
+pointers to human-readable representations of their arguments.
+These return values should not be freed using
+.Xr free 3 .
+.Pp
+.Fn pmc_event_names_of_class
+returns a list of event names supported by a given PMC class
+.Fa cl .
+On successful return, an array of
+.Ft "const char *"
+pointers to the names of valid events supported by class
+.Fa cl
+is allocated by the library using
+.Xr malloc 3 ,
+and a pointer to this array is returned in the location pointed to by
+.Fa eventnames .
+The number of pointers allocated is returned in the location pointed
+to by
+.Fa nevents .
+.Ss ADMINISTRATION
+Individual PMCs may be enabled or disabled on a given CPU using
+.Fn pmc_enable
+and
+.Fn pmc_disable
+respectively.
+For these functions,
+.Fa cpu
+is the CPU number, and
+.Fa pmc
+is the index of the PMC to be operated on.
+Only the super-user is allowed to enable and disable PMCs.
+.Ss X86 ARCHITECTURE SPECIFIC API
+The
+.Fn pmc_x86_get_msr
+function returns the processor model specific register number
+associated with
+.Fa pmc .
+Applications may use the x86
+.Sy RDPMC
+instruction to directly read the contents of the PMC.
+.Sh EVENT SPECIFIERS
+Event specifiers are strings comprising of an event name, followed by
+optional parameters modifying the semantics of the hardware event
+being probed.
+Event names are PMC architecture dependent, but the
+.Xr hwpmc 4
+library defines machine independent aliases for commonly used
+events.
+.Ss Event Name Aliases
+Event name aliases are CPU architecture independent names for commonly
+used events.
+The following aliases are known to this version of the
+.Xr pmc 3
+library:
+.Bl -tag -width indent
+.It Li branches
+Measure the number of branches retired.
+.It Li branch-mispredicts
+Measure the number of retired branches that were mispredicted.
+.It Li cycles
+Measure processor cycles.
+This event is implemented using the processor's Time Stamp Counter
+register.
+.It Li dc-misses
+Measure the number of data cache misses.
+.It Li ic-misses
+Measure the number of instruction cache misses.
+.It Li instructions
+Measure the number of instructions retired.
+.It Li interrupts
+Measure the number of interrupts seen.
+.El
+.Ss Time Stamp Counter (TSC)
+The timestamp counter is a monontonically non-decreasing counter that
+counts processor cycles.
+.Pp
+In the i386 architecture this counter may
+be selected by requesting an event with eventspecifier
+.Ic tsc .
+The
+.Ic tsc
+event does not support any further qualifiers.
+It can only be allocated in system-wide counting mode,
+and is a read-only counter.
+Multiple processes are allowed to allocate the TSC.
+Once allocated, it may be read using the
+.Fn pmc_read
+function, or by using the RDTSC instruction.
+.Ss AMD (K7) PMCs
+These PMCs are present in the
+.Tn "AMD Athlon"
+series of CPUs and are documented in:
+.Rs
+.%B "AMD Athlon Processor x86 Code Optimization Guide"
+.%N "Publication No. 22007"
+.%D "February 2002"
+.%Q "Advanced Micronic Devices, Inc."
+.Re
+.Pp
+Event specifiers for AMD K7 PMCs can have the following optional
+qualifiers:
+.Bl -tag -width indent
+.It Li count= Ns Ar value
+Configure the counter to increment only if the number of configured
+events measured in a cycle is greater than or equal to
+.Ar value .
+.It Li edge
+Configure the counter to only count negated-to-asserted transitions
+of the conditions expressed by the other qualifiers.
+In other words, the counter will increment only once whenever a given
+condition becomes true, irrespective of the number of clocks during
+which the condition remains true.
+.It Li inv
+Invert the sense of comparision when the
+.Li count
+qualifier is present, making the counter to increment when the
+number of events per cycle is less than the value specified by
+the
+.Li count
+qualifier.
+.It Li os
+Configure the PMC to count events happening at privilege level 0.
+.It Li unitmask= Ns Ar mask
+This qualifier is used to further qualify a select few events,
+.Li k7-dc-refills-from-l2 ,
+.Li k7-dc-refills-from-system
+and
+.Li k7-dc-writebacks .
+Here
+.Ar mask
+is a string of the following characters optionally seperated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li m
+Count operations for lines in the
+.Dq Modified
+state.
+.It Li o
+Count operations for lines in the
+.Dq Owner
+state.
+.It Li e
+Count operations for lines in the
+.Dq Exclusive
+state.
+.It Li s
+Count operations for lines in the
+.Dq Shared
+state.
+.It Li i
+Count operations for lines in the
+.Dq Invalid
+state.
+.El
+If no
+.Ar unitmask
+qualifier is specified, the default is to count events for caches
+lines in any of the above states.
+.It Li usr
+Configure the PMC to count events occurring at privilege levels 1, 2
+or 3.
+.El
+If neither of the
+.Li os
+or
+.Li usr
+qualifiers were specified, the default is to enable both.
+.Pp
+The event specifiers support on AMD K7 PMCs are:
+.Bl -tag -width indent
+.It Li k7-dc-accesses
+Count data cache accesses.
+.It Li k7-dc-misses
+Count data cache misses.
+.It Li k7-dc-refills-from-l2 Op Li ,unitmask= Ns Ar mask
+Count data cache refills from L2 cache.
+This event may be further qualified using the
+.Li unitmask
+qualifier.
+.It Li k7-dc-refills-from-system Op Li ,unitmask= Ns Ar mask
+Count data cache refills from system memory.
+This event may be further qualified using the
+.Li unitmask
+qualifier.
+.It Li k7-dc-writebacks Op Li ,unitmask= Ns Ar mask
+Count data cache writebacks.
+This event may be further qualified using the
+.Li unitmask
+qualifier.
+.It Li k7-l1-dtlb-miss-and-l2-dtlb-hits
+Count L1 DTLB misses and L2 DTLB hits.
+.It Li k7-l1-and-l2-dtlb-misses
+Count L1 and L2 DTLB misses.
+.It Li k7-misaligned-references
+Count misaligned data references.
+.It Li k7-ic-fetches
+Count instruction cache fetches.
+.It Li k7-ic-misses
+Count instruction cache misses.
+.It Li k7-l1-itlb-misses
+Count L1 ITLB misses that are L2 ITLB hits.
+.It Li k7-l1-l2-itlb-misses
+Count L1 (and L2) ITLB misses.
+.It Li k7-retired-instructions
+Count all retired instructions.
+.It Li k7-retired-ops
+Count retired ops.
+.It Li k7-retired-branches
+Count all retired branches (conditional, unconditional, exceptions
+and interrupts).
+.It Li k7-retired-branches-mispredicted
+Count all misprediced retired branches.
+.It Li k7-retired-taken-branches
+Count retired taken branches.
+.It Li k7-retired-taken-branches-mispredicted
+Count mispredicted taken branches that were retired.
+.It Li k7-retired-far-control-transfers
+Count retired far control transfers.
+.It Li k7-retired-resync-branches
+Count retired resync branches (non control transfer branches).
+.It Li k7-interrupts-masked-cycles
+Count the number of cycles when the processor's
+.Li IF
+flag was zero.
+.It Li k7-interrupts-masked-while-pending-cycles
+Count the number of cycles interrupts were masked while pending due
+to the processor's
+.Li IF
+flag being zero.
+.It Li k7-hardware-interrupts
+Count the number of taken hardware interrupts.
+.El
+.Ss AMD (K8) PMCs
+These PMCs are present in the
+.Tn "AMD Athlon64"
+and
+.Tn "AMD Opteron"
+series of CPUs.
+They are documented in:
+.Rs
+.%B "BIOS and Kernel Developer's Guide for the AMD Athlon(tm) 64 and AMD Opteron Processors"
+.%N "Publication No. 26094"
+.%D "April 2004"
+.%Q "Advanced Micronic Devices, Inc."
+.Re
+.Pp
+Event specifiers for AMD K8 PMCs can have the following optional
+qualifiers:
+.Bl -tag -width indent
+.It Li count= Ns Ar value
+Configure the counter to increment only if the number of configured
+events measured in a cycle is greater than or equal to
+.Ar value .
+.It Li edge
+Configure the counter to only count negated-to-asserted transitions
+of the conditions expressed by the other fields.
+In other words, the counter will increment only once whenever a given
+condition becomes true, irrespective of the number of clocks during
+which the condition remains true.
+.It Li inv
+Invert the sense of comparision when the
+.Li count
+qualifier is present, making the counter to increment when the
+number of events per cycle is less than the value specified by
+the
+.Li count
+qualifier.
+.It Li mask= Ns Ar qualifier
+Many event specifiers for AMD K8 PMCs need to be additionally
+qualified using a mask qualifier.
+These additional qualifiers are event-specific and are documented
+along with their associated event specifiers below.
+.It Li os
+Configure the PMC to count events happening at privilege level 0.
+.It Li usr
+Configure the PMC to count events occurring at privilege levels 1, 2
+or 3.
+.El
+If neither of the
+.Li os
+or
+.Li usr
+qualifiers were specified, the default is to enable both.
+.Pp
+The event specifiers support on AMD K8 PMCs are:
+.Bl -tag -width indent
+.It Li k8-bu-cpu-clk-unhalted
+Count the number of clock cycles when the CPU is not in the HLT or
+STPCLK states.
+.It Li k8-bu-fill-request-l2-miss Op Li ,mask= Ns Ar qualifier
+Count fill requests that missed in the L2 cache.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li + Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It Li dc-fill
+Count data cache fill requests.
+.It Li ic-fill
+Count instruction cache fill requests.
+.It Li tlb-reload
+Count TLB reloads.
+.El
+The default is to count all types of requests.
+.It Li k8-bu-internal-l2-request Op Li ,mask= Ns Ar qualifier
+Count internally generated requests to the L2 cache.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It Li cancelled
+Count cancelled requests.
+.It Li dc-fill
+Count data cache fill requests.
+.It Li ic-fill
+Count instruction cache fill requests.
+.It Li tag-snoop
+Count tag snoop requests.
+.It Li tlb-reload
+Count TLB reloads.
+.El
+The default is to count all types of requests.
+.It Li k8-dc-access
+Count data cache accesses including microcode scratchpad accesses.
+.It Li k8-dc-copyback Op Li ,mask= Ns Ar qualifier
+Count data cache copyback operations.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "exclusive" -compact
+.It Li exclusive
+Count operations for lines in the
+.Dq exclusive
+state.
+.It Li invalid
+Count operations for lines in the
+.Dq invalid
+state.
+.It Li modified
+Count operations for lines in the
+.Dq modified
+state.
+.It Li owner
+Count operations for lines in the
+.Dq owner
+state.
+.It Li shared
+Count operations for lines in the
+.Dq shared
+state.
+.El
+The default is to count operations for lines in all the
+above states.
+.It Li k8-dc-dcache-accesses-by-locks Op Li ,mask= Ns Ar qualifier
+Count data cache accesses by lock instructions.
+This event is only available on processors of revision C or later
+vintage.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "exclusive" -compact
+.It Li accesses
+Count data cache accesses by lock instructions.
+.It Li misses
+Count data cache misses by lock instructions.
+.El
+The default is to count all accesses.
+.It Li k8-dc-dispatched-prefetch-instructions Op Li ,mask= Ns Ar qualifier
+Count the number of dispatched prefetch instructions.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "exclusive" -compact
+.It Li load
+Count load operations.
+.It Li nta
+Count non-temporal operations.
+.It Li store
+Count store operations.
+.El
+The default is to count all operations.
+.It Li k8-dc-l1-dtlb-miss-and-l2-dtlb-hit
+Count L1 DTLB misses that are L2 DTLB hits.
+.It Li k8-dc-l1-dtlb-miss-and-l2-dtlb-miss
+Count L1 DTLB misses that are also misses in the L2 DTLB.
+.It Li k8-dc-microarchitectural-early-cancel-of-an-access
+Count microarchitectural early cancels of data cache accesses.
+.It Li k8-dc-microarchitectural-late-cancel-of-an-access
+Count microarchitectural late cancels of data cache accesses.
+.It Li k8-dc-misaligned-data-reference
+Count misaligned data references.
+.It Li k8-dc-miss
+Count data cache misses.
+.It Li k8-dc-one-bit-ecc-error Op Li ,mask= Ns Ar qualifier
+Count one bit ECC errors found by the scrubber.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "piggyback" -compact
+.It Li scrubber
+Count scrubber detected errors.
+.It Li piggyback
+Count piggyback scrubber errors.
+.El
+The default is to count both kinds of errors.
+.It Li k8-dc-refill-from-l2 Op Li ,mask= Ns Ar qualifier
+Count data cache refills from L2 cache.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "exclusive" -compact
+.It Li exclusive
+Count operations for lines in the
+.Dq exclusive
+state.
+.It Li invalid
+Count operations for lines in the
+.Dq invalid
+state.
+.It Li modified
+Count operations for lines in the
+.Dq modified
+state.
+.It Li owner
+Count operations for lines in the
+.Dq owner
+state.
+.It Li shared
+Count operations for lines in the
+.Dq shared
+state.
+.El
+The default is to count operations for lines in all the
+above states.
+.It Li k8-dc-refill-from-system Op Li ,mask= Ns Ar qualifier
+Count data cache refills from system memory.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "exclusive" -compact
+.It Li exclusive
+Count operations for lines in the
+.Dq exclusive
+state.
+.It Li invalid
+Count operations for lines in the
+.Dq invalid
+state.
+.It Li modified
+Count operations for lines in the
+.Dq modified
+state.
+.It Li owner
+Count operations for lines in the
+.Dq owner
+state.
+.It Li shared
+Count operations for lines in the
+.Dq shared
+state.
+.El
+The default is to count operations for lines in all the
+above states.
+.It Li k8-fp-dispatched-fpu-ops Op Li ,mask= Ns Ar qualifier
+Count the number of dispatched FPU ops.
+This event is supported in revision B and later CPUs.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It Li add-pipe-excluding-junk-ops
+Count add pipe ops excluding junk ops.
+.It Li add-pipe-junk-ops
+Count junk ops in the add pipe.
+.It Li multiply-pipe-excluding-junk-ops
+Count multiply pipe ops excluding junk ops.
+.It Li multiply-pipe-junk-ops
+Count junk ops in the multiply pipe.
+.It Li store-pipe-excluding-junk-ops
+Count store pipe ops excluding junk ops
+.It Li store-pipe-junk-ops
+Count junk ops in the store pipe.
+.El
+The default is to count all types of ops.
+.It Li k8-fp-cycles-with-no-fpu-ops-retired
+Count cycles when no FPU ops were retired.
+This event is supported in revision B and later CPUs.
+.It Li k8-fp-dispatched-fpu-fast-flag-ops
+Count dispatched FPU ops that use the fast flag interface.
+This event is supported in revision B and later CPUs.
+.It Li k8-fr-decoder-empty
+Count cycles when there was nothing to dispatch (i.e., the decoder
+was empty).
+.It Li k8-fr-dispatch-stalls
+Count all dispatch stalls.
+.It Li k8-fr-dispatch-stall-for-segment-load
+Count dispatch stalls for segment loads.
+.It Li k8-fr-dispatch-stall-for-serialization
+Count dispatch stalls for serialization.
+.It Li k8-fr-dispatch-stall-from-branch-abort-to-retire
+Count dispatch stalls from branch abort to retiral.
+.It Li k8-fr-dispatch-stall-when-fpu-is-full
+Count dispatch stalls when the FPU is full.
+.It Li k8-fr-dispatch-stall-when-ls-is-full
+Count dispatch stalls when the load/store unit is full.
+.It Li k8-fr-dispatch-stall-when-reorder-buffer-is-full
+Count dispatch stalls when the reorder buffer is full.
+.It Li k8-fr-dispatch-stall-when-reservation-stations-are-full
+Count dispatch stalls when reservation stations are full.
+.It Li k8-fr-dispatch-stall-when-waiting-for-all-to-be-quiet
+Count dispatch stalls when waiting for all to be quiet.
+.\" XXX What does "waiting for all to be quiet" mean?
+.It Li k8-fr-dispatch-stall-when-waiting-far-xfer-or-resync-branch-pending
+Count dispatch stalls when a far control transfer or a resync branch
+is pending.
+.It Li k8-fr-fpu-exceptions Op Li ,mask= Ns Ar qualifier
+Count FPU exceptions.
+This event is supported in revision B and later CPUs.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It Li sse-and-x87-microtraps
+Count SSE and x87 microtraps.
+.It Li sse-reclass-microfaults
+Count SSE reclass microfaults
+.It Li sse-retype-microfaults
+Count SSE retype microfaults
+.It Li x87-reclass-microfaults
+Count x87 reclass microfaults.
+.El
+The default is to count all types of exceptions.
+.It Li k8-fr-interrupts-masked-cycles
+Count cycles when interrupts were masked (by CPU RFLAGS field IF was zero).
+.It Li k8-fr-interrupts-masked-while-pending-cycles
+Count cycles while interrupts were masked while pending (i.e., cycles
+when INTR was asserted while CPU RFLAGS field IF was zero).
+.It Li k8-fr-number-of-breakpoints-for-dr0
+Count the number of breakpoints for DR0.
+.It Li k8-fr-number-of-breakpoints-for-dr1
+Count the number of breakpoints for DR1.
+.It Li k8-fr-number-of-breakpoints-for-dr2
+Count the number of breakpoints for DR2.
+.It Li k8-fr-number-of-breakpoints-for-dr3
+Count the number of breakpoints for DR3.
+.It Li k8-fr-retired-branches
+Count retired branches including exceptions and interrupts.
+.It Li k8-fr-retired-branches-mispredicted
+Count mispredicted retired branches.
+.It Li k8-fr-retired-far-control-transfers
+Count retired far control transfers (which are always mispredicted).
+.It Li k8-fr-retired-fastpath-double-op-instructions Op Li ,mask= Ns Ar qualifier
+Count retired fastpath double op instructions.
+This event is supported in revision B and later CPUs.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXXXX" -compact
+.It Li low-op-pos-0
+Count instructions with the low op in position 0.
+.It Li low-op-pos-1
+Count instructions with the low op in position 1.
+.It Li low-op-pos-2
+Count instructions with the low op in position 2.
+.El
+The default is to count all types of instructions.
+.It Li k8-fr-retired-fpu-instructions Op Li ,mask= Ns Ar qualifier
+Count retired FPU instructions.
+This event is supported in revision B and later CPUs.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It Li mmx-3dnow
+Count MMX and 3DNow! instructions.
+.It Li packed-sse-sse2
+Count packed SSE and SSE2 instructions.
+.It Li scalar-sse-sse2
+Count scalar SSE and SSE2 instructions
+.It Li x87
+Count x87 instructions.
+.El
+The default is to count all types of instructions.
+.It Li k8-fr-retired-near-returns
+Count retired near returns.
+.It Li k8-fr-retired-near-returns-mispredicted
+Count mispredicted near returns.
+.It Li k8-fr-retired-resyncs
+Count retired resyncs (non-control transfer branches).
+.It Li k8-fr-retired-taken-hardware-interrupts
+Count retired taken hardware interrupts.
+.It Li k8-fr-retired-taken-branches
+Count retired taken branches.
+.It Li k8-fr-retired-taken-branches-mispredicted
+Count retired taken branches that were mispredicted.
+.It Li k8-fr-retired-taken-branches-mispredicted-by-addr-miscompare
+Count retired taken branches that were mispredicted only due to an
+address miscompare.
+.It Li k8-fr-retired-uops
+Count retired uops.
+.It Li k8-fr-retired-x86-instructions
+Count retired x86 instructions including exceptions and interrupts.
+.It Li k8-ic-fetch
+Count instruction cache fetches.
+.It Li k8-ic-instruction-fetch-stall
+Count cycles in stalls due to instruction fetch.
+.It Li k8-ic-l1-itlb-miss-and-l2-itlb-hit
+Count L1 ITLB misses that are L2 ITLB hits.
+.It Li k8-ic-l1-itlb-miss-and-l2-itlb-miss
+Count ITLB misses that miss in both L1 and L2 ITLBs.
+.It Li k8-ic-microarchitectural-resync-by-snoop
+Count microarchitectural resyncs caused by snoops.
+.It Li k8-ic-miss
+Count instruction cache misses.
+.It Li k8-ic-refill-from-l2
+Count instruction cache refills from L2 cache.
+.It Li k8-ic-refill-from-system
+Count instruction cache refills from system memory.
+.It Li k8-ic-return-stack-hits
+Count hits to the return stack.
+.It Li k8-ic-return-stack-overflow
+Count overflows of the return stack.
+.It Li k8-ls-buffer2-full
+Count load/store buffer2 full events.
+.It Li k8-ls-locked-operation Op Li ,mask= Ns Ar qualifier
+Count locked operations.
+For revision C and later CPUs, the following qualifiers are supported:
+.Bl -tag -width "XXXXXXXXXXXXX" -compact
+.It Li cycles-in-request
+Count the number of cycles in the lock request/grant stage.
+.It Li cycles-to-complete
+Count the number of cycles a lock takes to complete once it is
+non-speculative and is the older load/store operation.
+.It Li locked-instructions
+Count the number of lock instructions executed.
+.El
+The default is to count the number of lock instructions executed.
+.It Li k8-ls-microarchitectural-late-cancel
+Count microarchitectural late cancels of operations in the load/store
+unit.
+.It Li k8-ls-microarchitectural-resync-by-self-modifying-code
+Count microarchitectural resyncs caused by self-modifying code.
+.It Li k8-ls-microarchitectural-resync-by-snoop
+Count microarchitectural resyncs caused by snoops.
+.It Li k8-ls-retired-cflush-instructions
+Count retired CFLUSH instructions.
+.It Li k8-ls-retired-cpuid-instructions
+Count retired CPUID instructions.
+.It Li k8-ls-segment-register-load Op Li ,mask= Ns Ar qualifier
+Count segment register loads.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XX" -compact
+.It Li cs
+Count CS register loads.
+.It Li ds
+Count DS register loads.
+.It Li es
+Count ES register loads.
+.It Li fs
+Count FS register loads.
+.It Li gs
+Count GS register loads.
+.\" .It Ic hs
+.\" Count HS register loads.
+.\" XXX "HS" register?
+.It Li ss
+Count SS register loads.
+.El
+The default is to count all types of loads.
+.It Li k8-nb-memory-controller-bypass-saturation Op Li ,mask= Ns Ar qualifier
+Count memory controller bypass counter saturation events.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It Li dram-controller-interface-bypass
+Count DRAM controller interface bypass.
+.It Li dram-controller-queue-bypass
+Count DRAM controller queue bypass.
+.It Li memory-controller-hi-pri-bypass
+Count memory controller high priority bypasses.
+.It Li memory-controller-lo-pri-bypass
+Count memory controller low priority bypasses.
+.El
+.It Li k8-nb-memory-controller-dram-slots-missed
+Count memory controller DRAM command slots missed (in MemClks).
+.It Li k8-nb-memory-controller-page-access-event Op Li ,mask= Ns Ar qualifier
+Count memory controller page access events.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It Li page-conflict
+Count page conflicts.
+.It Li page-hit
+Count page hits.
+.It Li page-miss
+Count page misses.
+.El
+The default is to count all types of events.
+.It Li k8-nb-memory-controller-page-table-overflow
+Count memory control page table overflow events.
+.It Li k8-nb-probe-result Op Li ,mask= Ns Ar qualifier
+Count probe events.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "exclusive" -compact
+.It Li probe-hit
+Count all probe hits.
+.It Li probe-hit-dirty-no-memory-cancel
+Count probe hits without memory cancels.
+.It Li probe-hit-dirty-with-memory-cancel
+Count probe hits with memory cancels.
+.It Li probe-miss
+Count probe misses.
+.El
+.It Li k8-nb-sized-commands Op Li ,mask= Ns Ar qualifier
+Count sized commands issued.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "exclusive" -compact
+.It Li nonpostwrszbyte
+.It Li nonpostwrszdword
+.It Li postwrszbyte
+.It Li postwrszdword
+.It Li rdszbyte
+.It Li rdszdword
+.It Li rdmodwr
+.El
+The default is to count all types of commands.
+.It Li k8-nb-memory-controller-turnaround Op Li ,mask= Ns Ar qualifier
+Count memory control turnaround events.
+This event may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.\" XXX doc is unclear whether these are cycle counts or event counts
+.It Li dimm-turnaround
+Count DIMM turnarounds.
+.It Li read-to-write-turnaround
+Count read to write turnarounds.
+.It Li write-to-read-turnaround
+Count write to read turnarounds.
+.El
+The default is to count all types of events.
+.It Li k8-nb-ht-bus0-bandwidth Op Li ,mask= Ns Ar qualifier
+.It Li k8-nb-ht-bus1-bandwidth Op Li ,mask= Ns Ar qualifier
+.It Li k8-nb-ht-bus2-bandwidth Op Li ,mask= Ns Ar qualifier
+Count events on the HyperTransport(tm) buses.
+These events may be further qualified using
+.Ar qualifier ,
+which is a
+.Li "+" Ns - Ns
+separated set of the following keywords:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It Li buffer-release
+Count buffer release messages sent.
+.It Li command
+Count command messages sent.
+.It Li data
+Count data messages sent.
+.It Li nop
+Count nop messages sent.
+.El
+The default is to count all types of messages.
+.El
+.Ss Intel P6 PMCS
+Intel P6 PMCs are present in Intel
+.Tn "Pentium Pro" ,
+.Tn "Pentium II" ,
+.Tn "Celeron" ,
+.Tn "Pentium III"
+and
+.Tn "Pentium M"
+processors.
+.Pp
+These CPUs have two counters.
+Some events may only be used on specific counters and some events are
+defined only on specific processor models.
+.Pp
+These PMCs are documented in
+.Rs
+.%B "IA-32 Intel(R) Architecture Software Developer's Manual"
+.%T "Volume 3: System Programming Guide"
+.%N "Order Number 245472-012"
+.%D 2003
+.%Q "Intel Corporation"
+.Re
+.Pp
+Event specifiers for Intel P6 PMCs can have the following common
+qualifiers:
+.Bl -tag -width indent
+.It Li cmask= Ns Ar value
+Configure the PMC to increment only if the number of configured
+events measured in a cycle is greater than or equal to
+.Ar value .
+.It Li edge
+Configure the PMC to count the number of deasserted to asserted
+transitions of the conditions expressed by the other qualifiers.
+If specified, the counter will increment only once whenever a
+condition becomes true, irrespective of the number of clocks during
+which the condition remains true.
+.It Li inv
+Invert the sense of comparision when the
+.Ar cmask
+qualifier is present, making the counter increment when the number of
+events per cycle is less than the value specified by the
+.Ar cmask
+qualifier.
+.It Li os
+Configure the PMC to count events happening at processor privilege
+level 0.
+.It Li umask= Ns Ar value
+This qualifier is used to further qualify the event selected (see
+below).
+.It Li usr
+Configure the PMC to count events occurring at privilege levels 1, 2
+or 3.
+.El
+If neither of the
+.Li os
+or
+.Li usr
+qualifiers are specified, the default is to enable both.
+.Pp
+The event specifiers supported by Intel P6 PMCs are:
+.Bl -tag -width indent
+.It Li p6-baclears
+Count the number of times a static branch prediction was made by the
+branch decoder because the BTB did not have a prediction.
+.It Li p6-br-bac-missp-exec
+.Pq Tn "Pentium M"
+Count the number of branch instructions executed that where
+mispredicted at the Front End (BAC).
+.It Li p6-br-bogus
+Count the number of bogus branches.
+.It Li p6-br-call-exec
+.Pq Tn "Pentium M"
+Count the number of call instructions executed.
+.It Li p6-br-call-missp-exec
+.Pq Tn "Pentium M"
+Count the number of call instructions executed that were mispredicted.
+.It Li p6-br-cnd-exec
+.Pq Tn "Pentium M"
+Count the number of conditional branch instructions executed.
+.It Li p6-br-cnd-missp-exec
+.Pq Tn "Pentium M"
+Count the number of conditional branch instructions executed that were
+mispredicted.
+.It Li p6-br-ind-call-exec
+.Pq Tn "Pentium M"
+Count the number of indirect call instructions executed.
+.It Li p6-br-ind-exec
+.Pq Tn "Pentium M"
+Count the number of indirect branch instructions executed.
+.It Li p6-br-ind-missp-exec
+.Pq Tn "Pentium M"
+Count the number of indirect branch instructions executed that were
+mispredicted.
+.It Li p6-br-inst-decoded
+Count the number of branch instructions decoded.
+.It Li p6-br-inst-exec
+.Pq Tn "Pentium M"
+Count the number of branch instructions executed but necessarily retired.
+.It Li p6-br-inst-retired
+Count the number of branch instructions retired.
+.It Li p6-br-miss-pred-retired
+Count the number of mispredicted branch instructions retired.
+.It Li p6-br-miss-pred-taken-ret
+Count the number of taken mispredicted branches retired.
+.It Li p6-br-missp-exec
+.Pq Tn "Pentium M"
+Count the number of branch instructions executed that were
+mispredicted at execution.
+.It Li p6-br-ret-bac-missp-exec
+.Pq Tn "Pentium M"
+Count the number of return instructions executed that were
+mispredicted at the Front End (BAC).
+.It Li p6-br-ret-exec
+.Pq Tn "Pentium M"
+Count the number of return instructions executed.
+.It Li p6-br-ret-missp-exec
+.Pq Tn "Pentium M"
+Count the number of return instructions executed that were
+mispredicted at execution.
+.It Li p6-br-taken-retired
+Count the number of taken branches retired.
+.It Li p6-btb-misses
+Count the number of branches for which the BTB did not produce a
+prediction.
+.It Li p6-bus-bnr-drv
+Count the number of bus clock cycles during which this processor is
+driving the BNR# pin.
+.It Li p6-bus-data-rcv
+Count the number of bus clock cycles during which this processor is
+receiving data.
+.It Li p6-bus-drdy-clocks Op Li ,umask= Ns Ar qualifier
+Count the number of clocks during which DRDY# is asserted.
+An additional qualifier may be specified, and comprises one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-hit-drv
+Count the number of bus clock cycles during which this processor is
+driving the HIT# pin.
+.It Li p6-bus-hitm-drv
+Count the number of bus clock cycles during which this processor is
+driving the HITM# pin.
+.It Li p6-bus-lock-clocks Op Li ,umask= Ns Ar qualifier
+Count the number of clocks during with LOCK# is asserted on the
+external system bus.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-req-outstanding
+Count the number of bus requests outstanding in any given cycle.
+.It Li p6-bus-snoop-stall
+Count the number of clock cycles during which the bus is snoop stalled.
+.It Li p6-bus-tran-any Op Li ,umask= Ns Ar qualifier
+Count the number of completed bus transactions of any kind.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-tran-brd Op Li ,umask= Ns Ar qualifier
+Count the number of burst read transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-tran-burst Op Li ,umask= Ns Ar qualifier
+Count the number of completed burst transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-tran-def Op Li ,umask= Ns Ar qualifier
+Count the number of completed deferred transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-tran-ifetch Op Li ,umask= Ns Ar qualifier
+Count the number of completed instruction fetch transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-tran-inval Op Li ,umask= Ns Ar qualifier
+Count the number of completed invalidate transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-tran-mem Op Li ,umask= Ns Ar qualifier
+Count the number of completed memory transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-tran-pwr Op Li ,umask= Ns Ar qualifier
+Count the number of completed partial write transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-tran-rfo Op Li ,umask= Ns Ar qualifier
+Count the number of completed read-for-ownership transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-trans-io Op Li ,umask= Ns Ar qualifier
+Count the number of completed I/O transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-trans-p Op Li ,umask= Ns Ar qualifier
+Count the number of completed partial transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-bus-trans-wb Op Li ,umask= Ns Ar qualifier
+Count the number of completed write-back transactions.
+An additional qualifier may be specified and comprises one of the following
+keywords:
+.Bl -tag -width indent -compact
+.It Li any
+Count transactions generated by any agent on the bus.
+.It Li self
+Count transactions generated by this processor.
+.El
+The default is to count operations generated by this processor.
+.It Li p6-cpu-clk-unhalted
+Count the number of cycles during with the processor was not halted.
+.Pp
+.Pq Tn "Pentium M"
+Count the number of cycles during with the processor was not halted
+and not in a thermal trip.
+.It Li p6-cycles-div-busy
+Count the number of cycles during which the divider is busy and cannot
+accept new divides.
+This event is only allocated on counter 0.
+.It Li p6-cycles-in-pending-and-masked
+Count the number of processor cycles for which interrupts were
+disabled and interrupts were pending.
+.It Li p6-cycles-int-masked
+Count the number of processor cycles for which interrupts were
+disabled.
+.It Li p6-data-mem-refs
+Count all loads and all stores using any memory type, including
+internal retries.
+Each part of a split store is counted seperately.
+.It Li p6-dcu-lines-in
+Count the total lines allocated in the data cache unit.
+.It Li p6-dcu-m-lines-in
+Count the number of M state lines allocated in the data cache unit.
+.It Li p6-dcu-m-lines-out
+Count the number of M state lines evicted from the data cache unit.
+.It Li p6-dcu-miss-outstanding
+Count the weighted number of cycles while a data cache unit miss is
+outstanding, incremented by the number of outstanding cache misses at
+any time.
+.It Li p6-div
+Count the number of floating point multiplies.
+This event is only allocated on counter 1.
+.It Li p6-emon-esp-uops
+.Pq Tn "Pentium M"
+Count the total number of micro-ops.
+.It Li p6-emon-est-trans Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium M"
+Count the number of
+.Tn "Enhanced Intel SpeedStep"
+transitions.
+An additional qualifier may be specified, and can be one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li all
+Count all transitions.
+.It Li freq
+Count only frequency transitions.
+.El
+The default is to count all transitions.
+.It Li p6-emon-fused-uops-ret Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium M"
+Count the number of retired fused micro-ops.
+An additional qualifier may be specified, and may be one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li all
+Count all fused micro-ops.
+.It Li loadop
+Count only load and op micro-ops.
+.It Li stdsta
+Count only STD/STA micro-ops.
+.El
+The default is to count all fused micro-ops.
+.It Li p6-emon-kni-comp-inst-ret
+.Pq Tn "Pentium III"
+Count the number of SSE computational instructions retired.
+An additional qualifier may be specified, and comprises one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li packed-and-scalar
+Count packed and scalar operations.
+.It Li scalar
+Count scalar operations only.
+.El
+The default is to count packed and scalar operations.
+.It Li p6-emon-kni-inst-retired Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium III"
+Count the number of SSE instructions retired.
+An additional qualifier may be specified, and comprises one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li packed-and-scalar
+Count packed and scalar operations.
+.It Li scalar
+Count scalar operations only.
+.El
+The default is to count packed and scalar operations.
+.It Li p6-emon-kni-pref-dispatched Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium III"
+Count the number of SSE prefetch or weakly ordered instructions
+dispatched (including speculative prefetches).
+An additional qualifier may be specified, and comprises one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li nta
+Count non-temporal prefetches.
+.It Li t1
+Count prefetches to L1.
+.It Li t2
+Count prefetches to L2.
+.It Li wos
+Count weakly ordered stores.
+.El
+The default is to count non-temporal prefetches.
+.It Li p6-emon-kni-pref-miss Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium III"
+Count the number of prefetch or weakly ordered instructions that miss
+all caches.
+An additional qualifier may be specified, and comprises one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li nta
+Count non-temporal prefetches.
+.It Li t1
+Count prefetches to L1.
+.It Li t2
+Count prefetches to L2.
+.It Li wos
+Count weakly ordered stores.
+.El
+The default is to count non-temporal prefetches.
+.It Li p6-emon-pref-rqsts-dn
+.Pq Tn "Pentium M"
+Count the number of downward prefetches issued.
+.It Li p6-emon-pref-rqsts-up
+.Pq Tn "Pentium M"
+Count the number of upward prefetches issued.
+.It Li p6-emon-simd-instr-retired
+.Pq Tn "Pentium M"
+Count the number of retired
+.Tn MMX
+instructions.
+.It Li p6-emon-sse-sse2-comp-inst-retired Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium M"
+Count the number of computational SSE instructions retired.
+An additional qualifier may be specified and can be one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li sse-packed-single
+Count SSE packed-single instructions.
+.It Li sse-scalar-single
+Count SSE scalar-single instructions.
+.It Li sse2-packed-double
+Count SSE2 packed-double instructions.
+.It Li sse2-scalar-double
+Count SSE2 scalar-double instructions.
+.El
+The default is to count SSE packed-single instructions.
+.It Li p6-emon-sse-sse2-inst-retired Op Li ,umask= Ns Ar qualifer
+.Pp
+.Pq Tn "Pentium M"
+Count the number of SSE instructions retired.
+An additional qualifier can be specified, and can be one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li sse-packed-single
+Count SSE packed-single instructions.
+.It Li sse-packed-single-scalar-single
+Count SSE packed-single and scalar-single instructions.
+.It Li sse2-packed-double
+Count SSE2 packed-double instructions.
+.It Li sse2-scalar-double
+Count SSE2 scalar-double instructions.
+.El
+The default is to count SSE packed-single instructions.
+.It Li p6-emon-synch-uops
+.Pq Tn "Pentium M"
+Count the number of sync micro-ops.
+.It Li p6-emon-thermal-trip
+.Pq Tn "Pentium M"
+Count the duration or occurrences of thermal trips.
+Use the
+.Ar edge
+qualifier to count occurrences of thermal trips.
+.It Li p6-emon-unfusion
+.Pq Tn "Pentium M"
+Count the number of unfusion events in the reorder buffer.
+.It Li p6-flops
+Count the number of computational floating point operations retired.
+This event is only allocated on counter 0.
+.It Li p6-fp-assist
+Count the number of floating point exceptions handled by microcode.
+This event is only allocated on counter 1.
+.It Li p6-fp-comps-ops-exe
+Count the number of computation floating point operations executed.
+This event is only allocated on counter 0.
+.It Li p6-fp-mmx-trans Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium II" , Tn "Pentium III"
+Count the number of transitions between MMX and floating-point
+instructions.
+An additional qualifier may be specified, and comprises one of the
+following keywords:
+.Bl -tag -width indent -compact
+.It Li mmxtofp
+Count transitions from MMX instructions to floating-point instructions.
+.It Li fptommx
+Count transitions from floating-point instructions to MMX instructions.
+.El
+The default is to count MMX to floating-point transitions.
+.It Li p6-hw-int-rx
+Count the number of hardware interrupts received.
+.It Li p6-ifu-fetch
+Count the number of instruction fetches, both cacheable and non-cacheable.
+.It Li p6-ifu-fetch-miss
+Count the number of instruction fetch misses (i.e., those that produce
+memory accesses).
+.It Li p6-ifu-mem-stall
+Count the number of cycles instruction fetch is stalled for any reason.
+.It Li p6-ild-stall
+Count the number of cycles the instruction length decoder is stalled.
+.It Li p6-inst-decoded
+Count the number of instructions decoded.
+.It Li p6-inst-retired
+Count the number of instructions retired.
+.It Li p6-itlb-miss
+Count the number of instruction TLB misses.
+.It Li p6-l2-ads
+Count the number of L2 address strobes.
+.It Li p6-l2-dbus-busy
+Count the number of cycles during which the L2 cache data bus was busy.
+.It Li p6-l2-dbus-busy-rd
+Count the number of cycles during which the L2 cache data bus was busy
+transferring read data from L2 to the processor.
+.It Li p6-l2-ifetch Op Li ,umask= Ns Ar qualifier
+Count the number of L2 instruction fetches.
+An additional qualifier may be specified and comprises a list of the following
+keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li e
+Count operations affecting E (exclusive) state lines.
+.It Li i
+Count operations affecting I (invalid) state lines.
+.It Li m
+Count operations affecting M (modified) state lines.
+.It Li s
+Count operations affecting S (shared) state lines.
+.El
+The default is to count operations affecting all (MESI) state lines.
+.It Li p6-l2-ld Op Li ,umask= Ns Ar qualifier
+Count the number of L2 data loads.
+An additional qualifier may be specified and comprises a list of the following
+keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li both
+.Pq Tn "Pentium M"
+Count both hardware-prefetched lines and non-hardware-prefetched lines.
+.It Li e
+Count operations affecting E (exclusive) state lines.
+.It Li hw
+.Pq Tn "Pentium M"
+Count hardware-prefetched lines only.
+.It Li i
+Count operations affecting I (invalid) state lines.
+.It Li m
+Count operations affecting M (modified) state lines.
+.It Li nonhw
+.Pq Tn "Pentium M"
+Exclude hardware-prefetched lines.
+.It Li s
+Count operations affecting S (shared) state lines.
+.El
+The default on processors other than
+.Tn "Pentium M"
+processors is to count operations affecting all (MESI) state lines.
+The default on
+.Tn "Pentium M"
+processors is to count both hardware-prefetched and
+non-hardware-prefetch operations on all (MESI) state lines.
+.It Li p6-l2-lines-in Op Li ,umask= Ns Ar qualifier
+Count the number of L2 lines allocated.
+An additional qualifier may be specified and comprises a list of the following
+keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li both
+.Pq Tn "Pentium M"
+Count both hardware-prefetched lines and non-hardware-prefetched lines.
+.It Li e
+Count operations affecting E (exclusive) state lines.
+.It Li hw
+.Pq Tn "Pentium M"
+Count hardware-prefetched lines only.
+.It Li i
+Count operations affecting I (invalid) state lines.
+.It Li m
+Count operations affecting M (modified) state lines.
+.It Li nonhw
+.Pq Tn "Pentium M"
+Exclude hardware-prefetched lines.
+.It Li s
+Count operations affecting S (shared) state lines.
+.El
+The default on processors other than
+.Tn "Pentium M"
+processors is to count operations affecting all (MESI) state lines.
+The default on
+.Tn "Pentium M"
+processors is to count both hardware-prefetched and
+non-hardware-prefetch operations on all (MESI) state lines.
+.It Li p6-l2-lines-out Op Li ,umask= Ns Ar qualifier
+Count the number of L2 lines evicted.
+An additional qualifier may be specified and comprises a list of the following
+keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li both
+.Pq Tn "Pentium M"
+Count both hardware-prefetched lines and non-hardware-prefetched lines.
+.It Li e
+Count operations affecting E (exclusive) state lines.
+.It Li hw
+.Pq Tn "Pentium M"
+Count hardware-prefetched lines only.
+.It Li i
+Count operations affecting I (invalid) state lines.
+.It Li m
+Count operations affecting M (modified) state lines.
+.It Li nonhw
+.Pq Tn "Pentium M" only
+Exclude hardware-prefetched lines.
+.It Li s
+Count operations affecting S (shared) state lines.
+.El
+The default on processors other than
+.Tn "Pentium M"
+processors is to count operations affecting all (MESI) state lines.
+The default on
+.Tn "Pentium M"
+processors is to count both hardware-prefetched and
+non-hardware-prefetch operations on all (MESI) state lines.
+.It Li p6-l2-m-lines-inm
+Count the number of modified lines allocated in L2 cache.
+.It Li p6-l2-m-lines-outm Op Li ,umask= Ns Ar qualifier
+Count the number of L2 M-state lines evicted.
+.Pp
+.Pq Tn "Pentium M"
+On these processors an additional qualifier may be specified and
+comprises a list of the following keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li both
+Count both hardware-prefetched lines and non-hardware-prefetched lines.
+.It Li hw
+Count hardware-prefetched lines only.
+.It Li nonhw
+Exclude hardware-prefetched lines.
+.El
+The default is to count both hardware-prefetched and
+non-hardware-prefetch operations.
+.It Li p6-l2-rqsts Op Li ,umask= Ns Ar qualifier
+Count the total number of L2 requests.
+An additional qualifier may be specified and comprises a list of the following
+keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li e
+Count operations affecting E (exclusive) state lines.
+.It Li i
+Count operations affecting I (invalid) state lines.
+.It Li m
+Count operations affecting M (modified) state lines.
+.It Li s
+Count operations affecting S (shared) state lines.
+.El
+The default is to count operations affecting all (MESI) state lines.
+.It Li p6-l2-st
+Count the number of L2 data stores.
+An additional qualifier may be specified and comprises a list of the following
+keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li e
+Count operations affecting E (exclusive) state lines.
+.It Li i
+Count operations affecting I (invalid) state lines.
+.It Li m
+Count operations affecting M (modified) state lines.
+.It Li s
+Count operations affecting S (shared) state lines.
+.El
+The default is to count operations affecting all (MESI) state lines.
+.It Li p6-ld-blocks
+Count the number of load operations delayed due to store buffer blocks.
+.It Li p6-misalign-mem-ref
+Count the number of misaligned data memory references (crossing a 64
+bit boundary).
+.It Li p6-mmx-assist
+.Pq Tn "Pentium II" , Tn "Pentium III"
+Count the number of MMX assists executed.
+.It Li p6-mmx-instr-exec
+.Pq Tn "Celeron" , Tn "Pentium II"
+Count the number of MMX instructions executed, except MOVQ and MOVD
+stores from register to memory.
+.It Li p6-mmx-instr-ret
+.Pq Tn "Pentium II"
+Count the number of MMX instructions retired.
+.It Li p6-mmx-instr-type-exec Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium II" , Tn "Pentium III"
+Count the number of MMX instructions executed.
+An additional qualifier may be specified and comprises a list of
+the following keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li pack
+Count MMX pack operation instructions.
+.It Li packed-arithmetic
+Count MMX packed arithmetic instructions.
+.It Li packed-logical
+Count MMX packed logical instructions.
+.It Li packed-multiply
+Count MMX packed multiply instructions.
+.It Li packed-shift
+Count MMX packed shift instructions.
+.It Li unpack
+Count MMX unpack operation instructions.
+.El
+The default is to count all operations.
+.It Li p6-mmx-sat-instr-exec
+.Pq Tn "Pentium II" , Tn "Pentium III"
+Count the number of MMX saturating instructions executed.
+.It Li p6-mmx-uops-exec
+.Pq Tn "Pentium II" , Tn "Pentium III"
+Count the number of MMX micro-ops executed.
+.It Li p6-mul
+Count the number of floating point multiplies.
+This event is only allocated on counter 1.
+.It Li p6-partial-rat-stalls
+Count the number of cycles or events for partial stalls.
+.It Li p6-resource-stalls
+Count the number of cycles there was a resource related stall of any kind.
+.It Li p6-ret-seg-renames
+.Pq Tn "Pentium II" , Tn "Pentium III"
+Count the number of segment register rename events retired.
+.It Li p6-sb-drains
+Count the number of cycles the store buffer is draining.
+.It Li p6-seg-reg-renames Op Li ,umask= Ns Ar qualifier
+.Pq Tn "Pentium II" , Tn "Pentium III"
+Count the number of segment register renames.
+An additional qualifier may be specified, and comprises a list of the
+following keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li ds
+Count renames for segment register DS.
+.It Li es
+Count renames for segment register ES.
+.It Li fs
+Count renames for segment register FS.
+.It Li gs
+Count renames for segment register GS.
+.El
+The default is to count operations affecting all segment registers.
+.It Li p6-seg-rename-stalls
+.Pq Tn "Pentium II" , Tn "Pentium III"
+Count the number of segment register renaming stalls.
+An additional qualifier may be specified, and comprises a list of the
+following keywords separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li ds
+Count stalls for segment register DS.
+.It Li es
+Count stalls for segment register ES.
+.It Li fs
+Count stalls for segment register FS.
+.It Li gs
+Count stalls for segment register GS.
+.El
+The default is to count operations affecting all the segment registers.
+.It Li p6-segment-reg-loads
+Count the number of segment register loads.
+.It Li p6-uops-retired
+Count the number of micro-ops retired.
+.El
+.Ss Intel P4 PMCS
+Intel P4 PMCs are present in Intel
+.Tn "Pentium 4"
+and
+.Tn Xeon
+processors.
+These PMCs are documented in
+.Rs
+.%B "IA-32 Intel(R) Architecture Software Developer's Manual"
+.%T "Volume 3: System Programming Guide"
+.%N "Order Number 245472-012"
+.%D 2003
+.%Q "Intel Corporation"
+.Re
+Further information about using these PMCs may be found in
+.Rs
+.%B "IA-32 Intel(R) Architecture Optimization Guide"
+.%D 2003
+.%N "Order Number 248966-009"
+.%Q "Intel Corporation"
+.Re
+.Pp
+Event specifiers for Intel P4 PMCs can have the following common
+qualifiers:
+.Bl -tag -width indent
+.It Li active= Ns Ar choice
+(On P4 HTT CPUs) Filter event counting based on which logical
+processors are active.
+The allowed values of
+.Ar choice
+are:
+.Bl -tag -width indent -compact
+.It Li any
+Count when either logical processor is active.
+.It Li both
+Count when both logical processors are active.
+.It Li none
+Count only when neither logical processor is active.
+.It Li single
+Count only when one logical processor is active.
+.El
+The default is
+.Li both .
+.It Li cascade
+Configure the PMC to cascade onto its partner.
+The PMC for the partner must already have been allocated by the
+current process.
+See
+.Sx "Cascading P4 PMCs"
+below for more information.
+.It Li edge
+Configure the counter to count false to true transitions of the threshold
+comparision output.
+This qualifier only takes effect if a threshold qualifier has also been
+specified.
+.It Li complement
+Configure the counter to increment only when the event count seen is
+less than the threshold qualifier value specified.
+.It Li mask= Ns Ar qualifier
+Many event specifiers for Intel P4 PMCs need to be additionally
+qualified using a mask qualifier.
+The allowed syntax for these qualifiers is event specific and is
+described along with the events.
+.It Li os
+Configure the PMC to count when the CPL of the processor is 0.
+.It Li precise
+Select precise event based sampling.
+Precise sampling is supported by the hardware for a limited set of
+events.
+.It Li tag= Ns Ar value
+Configure the PMC to tag the internal uop selected by the other
+fields in this event specifier with value
+.Ar value .
+This feature is used when cascading PMCs.
+.It Li threshold= Ns Ar value
+Configure the PMC to increment only when the event counts seen are
+greater than the specified threshold value
+.Ar value .
+.It Li usr
+Configure the PMC to count when the CPL of the processor is 1, 2 or 3.
+.El
+If neither of the
+.Li os
+or
+.Li usr
+qualifiers are specified, the default is to enable both.
+.Pp
+On Intel Pentium 4 processors with HTT, events are
+divided into two classes:
+.Bl -tag -width "XXXXXXXXXX" -compact
+.It "TS Events"
+are those where hardware can differentiate between events
+generated on one logical processor from those generated on the
+other.
+.It "TI Events"
+are those where hardware cannot differentiate between events
+generated by multiple logical processors in a package.
+.El
+Only TS events are allowed for use with process-mode PMCs on
+Pentium-4/HTT CPUs.
+.Pp
+The event specifiers supported by Intel P4 PMCs are:
+.Bl -tag -width indent
+.It Li p4-128bit-mmx-uop Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count integer SIMD SSE2 instructions that operate on 128 bit SIMD
+operands.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li all
+Count all uops operating on 128 bit SIMD integer operands in memory or
+XMM register.
+.El
+If an instruction contains more than one 128 bit MMX uop, then each
+uop will be counted.
+.It Li p4-64bit-mmx-uop Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count MMX instructions that operate on 64 bit SIMD operands.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li all
+Count all uops operating on 64 bit SIMD integer operands in memory or
+in MMX registers.
+.El
+If an instruction contains more than one 64 bit MMX uop, then each
+uop will be counted.
+.It Li p4-b2b-cycles
+.Pq "TI event"
+Count back-to-back bys cycles.
+Further documentation for this event is unavailable.
+.It Li p4-bnr
+.Pq "TI event"
+Count bus-not-ready conditions.
+Further documentation for this event is unavailable.
+.It Li p4-bpu-fetch-request Op Li ,mask= Ns Ar qualifier
+.Pq "TS event"
+Count instruction fetch requests qualified by additional
+flags specified in
+.Ar qualifier .
+At this point only one flag is supported:
+.Bl -tag -width indent -compact
+.It Li tcmiss
+Count trace cache lookup misses.
+.El
+The default qualifier is also
+.Ar mask=tcmiss .
+.It Li p4-branch-retired Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Counts retired branches.
+Qualifier
+.Ar flags
+is a list of the following
+.Li +
+separated strings:
+.Bl -tag -width indent -compact
+.It Li mmnp
+Count branches not-taken and predicted.
+.It Li mmnm
+Count branches not-taken and mis-predicted.
+.It Li mmtp
+Count branches taken and predicted.
+.It Li mmtm
+Count branches taken and mis-predicted.
+.El
+The default qualifier counts all four kinds of branches.
+.It Li p4-bsq-active-entries Op Li ,mask= Ns Ar qualifier
+.Pq "TS event"
+Count the number of entries (clipped at 15) currently active in the
+BSQ.
+Qualifier
+.Ar qualifier
+is a
+.Li +
+separated set of the following flags:
+.Bl -tag -width indent -compact
+.It Li req-type0 , Li req-type1
+Forms a 2-bit number used to select the request type encoding:
+.Bl -tag -width indent -compact
+.It Li 0
+reads excluding read invalidate
+.It Li 1
+read invalidates
+.It Li 2
+writes other than writebacks
+.It Li 3
+writebacks
+.El
+Bit
+.Li req-type1
+is the MSB for this two bit number.
+.It Li req-len0 , Li req-len1
+Forms a two-bit number that specifies the request length encoding:
+.Bl -tag -width indent -compact
+.It Li 0
+0 chunks
+.It Li 1
+1 chunk
+.It Li 3
+8 chunks
+.El
+Bit
+.Li req-len1
+is the MSB for this two bit number.
+.It Li req-io-type
+Count requests that are input or output requests.
+.It Li req-lock-type
+Count requests that lock the bus.
+.It Li req-lock-cache
+Count requests that lock the cache.
+.It Li req-split-type
+Count requests that is a bus 8-byte chunk that is split across an
+8-byte boundary.
+.It Li req-dem-type
+Count requests that are demand (not prefetches) if set.
+Count requests that are prefetches if not set.
+.It Li req-ord-type
+Count requests that are ordered.
+.It Li mem-type0 , Li mem-type1 , Li mem-type2
+Forms a 3-bit number that specifies a memory type encoding:
+.Bl -tag -width indent -compact
+.It Li 0
+UC
+.It Li 1
+USWC
+.It Li 4
+WT
+.It Li 5
+WP
+.It Li 6
+WB
+.El
+Bit
+.Li mem-type2
+is the MSB of this 3-bit number.
+.El
+The default qualifier has all the above bits set.
+.Pp
+Edge triggering using the
+.Li edge
+qualifier should not be used with this event when counting cycles.
+.It Li p4-bsq-allocation Op Li ,mask= Ns Ar qualifier
+.Pq "TS event"
+Count allocations in the bus sequence unit according to the flags
+specified in
+.Ar qualifier ,
+which is a
+.Li +
+separated set of the following flags:
+.Bl -tag -width indent -compact
+.It Li req-type0 , Li req-type1
+Forms a 2-bit number used to select the request type encoding:
+.Bl -tag -width indent -compact
+.It Li 0
+reads excluding read invalidate
+.It Li 1
+read invalidates
+.It Li 2
+writes other than writebacks
+.It Li 3
+writebacks
+.El
+Bit
+.Li req-type1
+is the MSB for this two bit number.
+.It Li req-len0 , Li req-len1
+Forms a two-bit number that specifies the request length encoding:
+.Bl -tag -width indent -compact
+.It Li 0
+0 chunks
+.It Li 1
+1 chunk
+.It Li 3
+8 chunks
+.El
+Bit
+.Li req-len1
+is the MSB for this two bit number.
+.It Li req-io-type
+Count requests that are input or output requests.
+.It Li req-lock-type
+Count requests that lock the bus.
+.It Li req-lock-cache
+Count requests that lock the cache.
+.It Li req-split-type
+Count requests that is a bus 8-byte chunk that is split across an
+8-byte boundary.
+.It Li req-dem-type
+Count requests that are demand (not prefetches) if set.
+Count requests that are prefetches if not set.
+.It Li req-ord-type
+Count requests that are ordered.
+.It Li mem-type0 , Li mem-type1 , Li mem-type2
+Forms a 3-bit number that specifies a memory type encoding:
+.Bl -tag -width indent -compact
+.It Li 0
+UC
+.It Li 1
+USWC
+.It Li 4
+WT
+.It Li 5
+WP
+.It Li 6
+WB
+.El
+Bit
+.Li mem-type2
+is the MSB of this 3-bit number.
+.El
+The default qualifier has all the above bits set.
+.Pp
+This event is usually used along with the
+.Li edge
+qualifier to avoid multiple counting.
+.It Li p4-bsq-cache-reference Op Li ,mask= Ns Ar qualifier
+.Pq "TS event"
+Count cache references as seen by the bus unit (2nd or 3rd level
+cache references).
+Qualifier
+.Ar qualifier
+is a
+.Li +
+separated list of the following keywords:
+.Bl -tag -width indent -compact
+.It Li rd-2ndl-hits
+Count 2nd level cache hits in the shared state.
+.It Li rd-2ndl-hite
+Count 2nd level cache hits in the exclusive state.
+.It Li rd-2ndl-hitm
+Count 2nd level cache hits in the modified state.
+.It Li rd-3rdl-hits
+Count 3rd level cache hits in the shared state.
+.It Li rd-3rdl-hite
+Count 3rd level cache hits in the exclusive state.
+.It Li rd-3rdl-hitm
+Count 3rd level cache hits in the modified state.
+.It Li rd-2ndl-miss
+Count 2nd level cache misses.
+.It Li rd-3rdl-miss
+Count 3rd level cache misses.
+.It Li wr-2ndl-miss
+Count write-back lookups from the data access cache that miss the 2nd
+level cache.
+.El
+The default is to count all the above events.
+.It Li p4-execution-event Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count the retirement of tagged uops selected through the execution
+tagging mechanism.
+Qualifier
+.Ar flags
+can contain the following strings separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li nbogus0 , Li nbogus1 , Li nbogus2 , Li nbogus3
+The marked uops are not bogus.
+.It Li bogus0 , Li bogus1 , Li bogus2 , Li bogus3
+The marked uops are bogus.
+.El
+This event requires additional (upstream) events to be allocated to
+perform the desired uop tagging.
+The default is to set all the above flags.
+This event can be used for precise event based sampling.
+.It Li p4-front-end-event Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count the retirement of tagged uops selected through the front-end
+tagging mechanism.
+Qualifier
+.Ar flags
+can contain the following strings separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li nbogus
+The marked uops are not bogus.
+.It Li bogus
+The marked uops are bogus.
+.El
+This event requires additional (upstream) events to be allocated to
+perform the desired uop tagging.
+The default is to select both kinds of events.
+This event can be used for precise event based sampling.
+.It Li p4-fsb-data-activity Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count each DBSY or DRDY event selected by qualifier
+.Ar flags .
+Qualifier
+.Ar flags
+is a
+.Li +
+separated set of the following flags:
+.Bl -tag -width indent -compact
+.It Li drdy-drv
+Count when this processor is driving data onto the bus.
+.It Li drdy-own
+Count when this processor is reading data from the bus.
+.It Li drdy-other
+Count when data is on the bus but not being sampled by this processor.
+.It Li dbsy-drv
+Count when this processor reserves the bus for use in the next cycle
+in order to drive data.
+.It Li dbsy-own
+Count when some agent reserves the bus for use in the next bus cycle
+to drive data that this processor will sample.
+.It Li dbsy-other
+Count when some agent reserves the bus for use in the next bus cycle
+to drive data that this processor will not sample.
+.El
+Flags
+.Li drdy-own
+and
+.Li drdy-other
+are mutually exclusive.
+Flags
+.Li dbsy-own
+and
+.Li dbsy-other
+are mutually exclusive.
+The default value for
+.Ar qualifier
+is
+.Li drdy-drv+drdy-own+dbsy-drv+dbsy-own .
+.It Li p4-global-power-events Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count cycles during which the processor is not stopped.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li running
+Count cycles when the processor is active.
+.El
+.It Li p4-instr-retired Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count instructions retired during a clock cycle.
+Qualifer
+.Ar flags
+comprises of the following strings separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li nbogusntag
+Count non-bogus instructions that are not tagged.
+.It Li nbogustag
+Count non-bogus instructions that are tagged.
+.It Li bogusntag
+Count bogus instructions that are not tagged.
+.It Li bogustag
+Count bogus instructions that are tagged.
+.El
+The default qualifier counts all the above kinds of instructions.
+.It Li p4-ioq-active-entries Xo
+.Op Li ,mask= Ns Ar qualifier
+.Op Li ,busreqtype= Ns Ar req-type
+.Xc
+.Pq "TS event"
+Count the number of entries (clipped at 15) in the IOQ that are
+active.
+The event masks are specified by qualifier
+.Ar qualifier
+and
+.Ar req-type .
+.Pp
+Qualifier
+.Ar qualifier
+is a
+.Li +
+separated set of the following flags:
+.Bl -tag -width indent -compact
+.It Li all-read
+Count read entries.
+.It Li all-write
+Count write entries.
+.It Li mem-uc
+Count entries accessing uncacheable memory.
+.It Li mem-wc
+Count entries accessing write-combining memory.
+.It Li mem-wt
+Count entries accessing write-through memory.
+.It Li mem-wp
+Count entries accessing write-protected memory
+.It Li mem-wb
+Count entries accessing write-back memory.
+.It Li own
+Count store requests driven by the processor (i.e., not by other
+processors or by DMA).
+.It Li other
+Count store requests driven by other processors or by DMA.
+.It Li prefetch
+Include hardware and software prefetch requests in the count.
+.El
+The default value for
+.Ar qualifier
+is to enable all the above flags.
+.Pp
+The
+.Ar req-type
+qualifier is a 5-bit number can be additionally used to select a
+specific bus request type.
+The default is 0.
+.Pp
+The
+.Li edge
+qualifier should not be used when counting cycles with this event.
+The exact behaviour of this event depends on the processor revision.
+.It Li p4-ioq-allocation Xo
+.Op Li ,mask= Ns Ar qualifier
+.Op Li ,busreqtype= Ns Ar req-type
+.Xc
+.Pq "TS event"
+Count various types of transactions on the bus matching the flags set
+in
+.Ar qualifier
+and
+.Ar req-type .
+.Pp
+Qualifier
+.Ar qualifier
+is a
+.Li +
+separated set of the following flags:
+.Bl -tag -width indent -compact
+.It Li all-read
+Count read entries.
+.It Li all-write
+Count write entries.
+.It Li mem-uc
+Count entries accessing uncacheable memory.
+.It Li mem-wc
+Count entries accessing write-combining memory.
+.It Li mem-wt
+Count entries accessing write-through memory.
+.It Li mem-wp
+Count entries accessing write-protected memory
+.It Li mem-wb
+Count entries accessing write-back memory.
+.It Li own
+Count store requests driven by the processor (i.e., not by other
+processors or by DMA).
+.It Li other
+Count store requests driven by other processors or by DMA.
+.It Li prefetch
+Include hardware and software prefetch requests in the count.
+.El
+The default value for
+.Ar qualifier
+is to enable all the above flags.
+.Pp
+The
+.Ar req-type
+qualifier is a 5-bit number can be additionally used to select a
+specific bus request type.
+The default is 0.
+.Pp
+The
+.Li edge
+qualifier is normally used with this event to prevent multiple
+counting.
+The exact behaviour of this event depends on the processor revision.
+.It Li p4-itlb-reference Op mask= Ns Ar qualifier
+.Pq "TS event"
+Count translations using the intruction translation look-aside
+buffer.
+The
+.Ar qualifier
+argument is a list of the following strings separated by
+.Li +
+characters.
+.Bl -tag -width indent -compact
+.It Li hit
+Count ITLB hits.
+.It Li miss
+Count ITLB misses.
+.It Li hit-uc
+Count uncacheable ITLB hits.
+.El
+If no
+.Ar qualifier
+is specified the default is to count all the three kinds of ITLB
+translations.
+.It Li p4-load-port-replay Op Li ,mask= Ns Ar qualifier
+.Pq "TS event"
+Count replayed events at the load port.
+Qualifier
+.Ar qualifier
+can take on one value:
+.Bl -tag -width indent -compact
+.It Li split-ld
+Count split loads.
+.El
+The default value for
+.Ar qualifier
+is
+.Li split-ld .
+.It Li p4-mispred-branch-retired Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count mispredicted IA-32 branch instructions.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li nbogus
+Count non-bogus retired branch instructions.
+.El
+.It Li p4-machine-clear Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count the number of pipeline clears seen by the processor.
+Qualifer
+.Ar flags
+is a list of the following strings separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li clear
+Count for a portion of the many cycles when the machine is being
+cleared for any reason.
+.It Li moclear
+Count machine clears due to memory ordering issues.
+.It Li smclear
+Count machine clears due to self-modifying code.
+.El
+Use qualifier
+.Li edge
+to get a count of occurrences of machine clears.
+The default qualifier is
+.Li clear .
+.It Li p4-memory-cancel Op Li ,mask= Ns Ar event-list
+.Pq "TS event"
+Count the cancelling of various kinds of requests in the data cache
+address control unit of the CPU.
+The qualifier
+.Ar event-list
+is a list of the following strings separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li st-rb-full
+Requests cancelled because no store request buffer was available.
+.It Li 64k-conf
+Requests that conflict due to 64K aliasing.
+.El
+If
+.Ar event-list
+is not specified, then the default is to count both kinds of events.
+.It Li p4-memory-complete Op Li ,mask= Ns Ar event-list
+.Pq "TS event"
+Count the completion of load split, store split, uncacheable split and
+uncacheable load operations selected by qualifier
+.Ar event-list .
+The qualifier
+.Ar event-list
+is a
+.Li +
+separated list of the following flags:
+.Bl -tag -width indent -compact
+.It Li lsc
+Count load splits completed, excluding loads from uncacheable or
+write-combining areas.
+.It Li ssc
+Count any split stores completed.
+.El
+The default is to count both kinds of operations.
+.It Li p4-mob-load-replay Op Li ,mask= Ns Ar qualifier
+.Pq "TS event"
+Count load replays triggered by the memory order buffer.
+Qualifier
+.Ar qualifier
+can be a
+.Li +
+separated list of the following flags:
+.Bl -tag -width indent -compact
+.It Li no-sta
+Count replays because of unknown store addresses.
+.It Li no-std
+Count replays because of unknown store data.
+.It Li partial-data
+Count replays because of partially overlapped data accesses between
+load and store operations.
+.It Li unalgn-addr
+Count replays because of mismatches in the lower 4 bits of load and
+store operations.
+.El
+The default qualifier is
+.Ar no-sta+no-std+partial-data+unalgn-addr .
+.It Li p4-packed-dp-uop Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count packed double-precision uops.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li all
+Count all uops operating on packed double-precision operands.
+.El
+.It Li p4-packed-sp-uop Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count packed single-precision uops.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li all
+Count all uops operating on packed single-precision operands.
+.El
+.It Li p4-page-walk-type Op Li ,mask= Ns Ar qualifier
+.Pq "TI event"
+Count page walks performed by the page miss handler.
+Qualifier
+.Ar qualifier
+can be a
+.Li +
+separated list of the following keywords:
+.Bl -tag -width indent -compact
+.It Li dtmiss
+Count page walks for data TLB misses.
+.It Li itmiss
+Count page walks for instruction TLB misses.
+.El
+The default value for
+.Ar qualifier
+is
+.Li dtmiss+itmiss .
+.It Li p4-replay-event Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count the retirement of tagged uops selected through the replay
+tagging mechanism.
+Qualifier
+.Ar flags
+contains a
+.Li +
+separated set of the following strings:
+.Bl -tag -width indent -compact
+.It Li nbogus
+The marked uops are not bogus.
+.It Li bogus
+The marked uops are bogus.
+.El
+This event requires additional (upstream) events to be allocated to
+perform the desired uop tagging.
+The default qualifier counts both kinds of uops.
+This event can be used for precise event based sampling.
+.It Li p4-resource-stall Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count the occurrence or latency of stalls in the allocator.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li sbfull
+A stall due to the lack of store buffers.
+.El
+.It Li p4-response
+.Pq "TI event"
+Count different types of responses.
+Further documentation on this event is not available.
+.It Li p4-retired-branch-type Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count branches retired.
+Qualifier
+.Ar flags
+contains a
+.Li +
+separated list of strings:
+.Bl -tag -width indent -compact
+.It Li conditional
+Count conditional jumps.
+.It Li call
+Count direct and indirect call branches.
+.It Li return
+Count return branches.
+.It Li indirect
+Count returns, indirect calls or indirect jumps.
+.El
+The default qualifier counts all the above branch types.
+.It Li p4-retired-mispred-branch-type Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count mispredicted branches retired.
+Qualifier
+.Ar flags
+contains a
+.Li +
+separated list of strings:
+.Bl -tag -width indent -compact
+.It Li conditional
+Count conditional jumps.
+.It Li call
+Count indirect call branches.
+.It Li return
+Count return branches.
+.It Li indirect
+Count returns, indirect calls or indirect jumps.
+.El
+The default qualifier counts all the above branch types.
+.It Li p4-scalar-dp-uop Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count the number of scalar double-precision uops.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li all
+Count the number of scalar double-precision uops.
+.El
+.It Li p4-scalar-sp-uop Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count the number of scalar single-precision uops.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li all
+Count all uops operating on scalar single-precision operands.
+.El
+.It Li p4-snoop
+.Pq "TI event"
+Count snoop traffic.
+Further documentation on this event is not available.
+.It Li p4-sse-input-assist Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count the number of times an assist is required to handle problems
+with the operands for SSE and SSE2 operations.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li all
+Count assists for all SSE and SSE2 uops.
+.El
+.It Li p4-store-port-replay Op Li ,mask= Ns Ar qualifier
+.Pq "TS event"
+Count events replayed at the store port.
+Qualifier
+.Ar qualifier
+can take on one value:
+.Bl -tag -width indent -compact
+.It Li split-st
+Count split stores.
+.El
+The default value for
+.Ar qualifier
+is
+.Li split-st .
+.It Li p4-tc-deliver-mode Op Li ,mask= Ns Ar qualifier
+.Pq "TI event"
+Count the duration in cycles of operating modes of the trace cache and
+decode engine.
+The desired operating mode is selected by
+.Ar qualifier ,
+which is a list of the following strings separated by
+.Li "+"
+characters:
+.Bl -tag -width indent -compact
+.It Li DD
+Both logical processors are in deliver mode.
+.It Li DB
+Logical processor 0 is in deliver mode while logical processor 1 is in
+build mode.
+.It Li DI
+Logical processor 0 is in deliver mode while logical processor 1 is
+halted, or in machine clear, or transitioning to a long microcode
+flow.
+.It Li BD
+Logical processor 0 is in build mode while logical processor 1 is in
+deliver mode.
+.It Li BB
+Both logical processors are in build mode.
+.It Li BI
+Logical processor 0 is in build mode while logical processor 1 is
+halted, or in machine clear or transitioning to a long microcode
+flow.
+.It Li ID
+Logical processor 0 is halted, or in machine clear or transitioning to
+a long microcode flow while logical processor 1 is in deliver mode.
+.It Li IB
+Logical processor 0 is halted, or in machine clear or transitioning to
+a long microcode flow while logical processor 1 is in build mode.
+.El
+If there is only one logical processor in the processor package then
+the qualifier for logical processor 1 is ignored.
+If no qualifier is specified, the default qualifier is
+.Li DD+DB+DI+BD+BB+BI+ID+IB .
+.It Li p4-tc-ms-xfer Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count the number of times uop delivery changed from the trace cache to
+MS ROM.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li cisc
+Count TC to MS transfers.
+.El
+.It Li p4-uop-queue-writes Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count the number of valid uops written to the uop queue.
+Qualifier
+.Ar flags
+is a list of the following strings, separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li from-tc-build
+Count uops being written from the trace cache in build mode.
+.It Li from-tc-deliver
+Count uops being written from the trace cache in deliver mode.
+.It Li from-rom
+Count uops being written from microcode ROM.
+.El
+The default qualifier counts all the above kinds of uops.
+.It Li p4-uop-type Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+This event is used in conjunction with the front-end at-retirement
+mechanism to tag load and store uops.
+Qualifer
+.Ar flags
+comprises the following strings separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li tagloads
+Mark uops that are load operations.
+.It Li tagstores
+Mark uops that are store operations.
+.El
+The default qualifier counts both kinds of uops.
+.It Li p4-uops-retired Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count uops retired during a clock cycle.
+Qualifier
+.Ar flags
+comprises the following strings separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li nbogus
+Count marked uops that are not bogus.
+.It Li bogus
+Count marked uops that are bogus.
+.El
+The default qualifier counts both kinds of uops.
+.It Li p4-wc-buffer Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count write-combining buffer operations.
+Qualifier
+.Ar flags
+contains the following strings separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li wcb-evicts
+WC buffer evictions due to any cause.
+.It Li wcb-full-evict
+WC buffer evictions due to no WC buffer being available.
+.El
+The default qualifer counts both kinds of evictions.
+.It Li p4-x87-assist Op Li ,mask= Ns Ar flags
+.Pq "TS event"
+Count the retirement of x87 instructions that required special
+handling.
+Qualifier
+.Ar flags
+contains the following strings separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li fpsu
+Count instructions that saw an FP stack underflow.
+.It Li fpso
+Count instructions that saw an FP stack overflow.
+.It Li poao
+Count instructions that saw an x87 output overflow.
+.It Li poau
+Count instructions that saw an x87 output underflow.
+.It Li prea
+Count instructions that needed an x87 input assist.
+.El
+The default qualifier counts all the above types of instruction
+retirements.
+.It Li p4-x87-fp-uop Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count x87 floating-point uops.
+Qualifier
+.Ar flags
+can take the following value (which is also the default):
+.Bl -tag -width indent -compact
+.It Li all
+Count all x87 floating-point uops.
+.El
+If an instruction contains more than one x87 floating-point uops, then
+all x87 floating-point uops will be counted.
+This event does not count x87 floating-point data movement operations.
+.It Li p4-x87-simd-moves-uop Op Li ,mask= Ns Ar flags
+.Pq "TI event"
+Count each x87 FPU, MMX, SSE, or SSE2 uops that load data or store
+data or perform register-to-register moves.
+This event does not count integer move uops.
+Qualifier
+.Ar flags
+may contain the following keywords separated by
+.Li +
+characters:
+.Bl -tag -width indent -compact
+.It Li allp0
+Count all x87 and SIMD store and move uops.
+.It Li allp2
+Count all x87 and SIMD load uops.
+.El
+The default is to count all uops.
+.El
+.Ss "Cascading P4 PMCs"
+To be filled in.
+.Ss "Precise Event Based Sampling"
+To be filled in.
+.Sh IMPLEMENTATION NOTES
+On the i386 architecture,
+.Fx
+has historically allowed the use of the RDTSC instruction from
+user-mode (i.e., at a processor CPL of 3) by any process.
+This behaviour is preserved by
+.Xr hwpmc 4 .
+.Sh RETURN VALUES
+The
+.Fn pmc_name_of_capability ,
+.Fn pmc_name_of_class ,
+.Fn pmc_name_of_cputype ,
+.Fn pmc_name_of_disposition ,
+.Fn pmc_name_of_event ,
+.Fn pmc_name_of_mode ,
+and
+.Fn pmc_name_of_state
+functions return a pointer to the human readable form of their argument.
+These pointers may point to statically allocated storage and must
+not be passed to
+.Fn free .
+In case of an error, these functions return
+.Li NULL
+and set the global variable
+.Va errno .
+.Pp
+The functions
+.Fn pmc_ncpu
+and
+.Fn pmc_npmc
+return the number of CPUs and number of PMCs configured respectively;
+in case of an error they return the value
+.Li -1
+and set the global variable
+.Va errno .
+.Pp
+All other functions return the value
+.Li 0
+if successful; otherwise the value
+.Li -1
+is returned and the global variable
+.Va errno
+is set to indicate the error.
+.Sh ERRORS
+A call to
+.Fn pmc_init
+may fail with the following errors in addition to those returned by
+.Xr modfind 2 ,
+.Xr modstat 2
+and
+.Xr hwpmc 4 :
+.Bl -tag -width Er
+.It Bq Er ENXIO
+An unknown CPU type was encountered during initialization.
+.It Bq Er EPROGMISMATCH
+The version number of the
+.Xr hwpmc 4
+kernel module did not match that compiled into the
+.Xr pmc 3
+library.
+.El
+.Pp
+A call to
+.Fn pmc_name_of_capability ,
+.Fn pmc_name_of_disposition ,
+.Fn pmc_name_of_state ,
+.Fn pmc_name_of_event ,
+.Fn pmc_name_of_mode
+and
+.Fn pmc_name_of_class
+may fail with the following error:
+.Bl -tag -width Er
+.It Bq Er EINVAL
+An invalid argument was passed to the function.
+.El
+.Pp
+A call to
+.Fn pmc_cpuinfo
+or
+.Fn pmc_ncpu
+may fail with the following error:
+.Bl -tag -width Er
+.It Bq Er ENXIO
+The
+.Xr pmc 3
+has not been initialized.
+.El
+.Pp
+A call to
+.Fn pmc_npmc
+may fail with the following errors:
+.Bl -tag -width Er
+.It Bq Er EINVAL
+The argument passed in was out of range.
+.It Bq Er ENXIO
+The
+.Xr pmc 3
+library has not been initialized.
+.El
+.Pp
+A call to
+.Fn pmc_pmcinfo
+may fail with the following errors, in addition to those returned by
+.Xr hwpmc 4 :
+.Bl -tag -width Er
+.It Bq Er ENXIO
+The
+.Xr pmc 3
+library is not yet initialized.
+.El
+.Pp
+A call to
+.Fn pmc_allocate
+may fail with the following errors, in addition to those returned by
+.Xr hwpmc 4 :
+.Bl -tag -width Er
+.It Bq Er EINVAL
+The
+.Fa mode
+argument passed in had an illegal value, or the event specification
+.Fa ctrspec
+was unrecognized for this cpu type.
+.El
+.Pp
+Calls to
+.Fn pmc_attach ,
+.Fn pmc_detach ,
+.Fn pmc_release ,
+.Fn pmc_start ,
+.Fn pmc_stop ,
+.Fn pmc_read ,
+.Fn pmc_write ,
+.Fn pmc_rw ,
+.Fn pmc_set ,
+.Fn pmc_configure_logfile ,
+.Fn pmc_get_driver_stats ,
+.Fn pmc_enable ,
+.Fn pmc_disable ,
+and
+.Fn pmc_x86_get_msr
+may fail with the errors described in
+.Xr hwpmc 4 .
+.Sh SEE ALSO
+.Xr modfind 2 ,
+.Xr modstat 2 ,
+.Xr hwpmc 4 ,
+.Xr pmccontrol 8 ,
+.Xr pmcreport 8 ,
+.Xr pmcstat 8
+.Sh BUGS
+The information returned by
+.Fn pmc_cpuinfo ,
+.Fn pmc_ncpu
+and possibly
+.Fn pmc_npmc
+should really be available all the time, through a better designed
+interface.
+.Pp
+The API for
+.Fn pmc_cpuinfo
+and
+.Fn pmc_pmcinfo
+expose too much of the underlying
+.Xr hwpmc 4
+driver's internals to userland.
diff --git a/lib/libpmc/pmc.h b/lib/libpmc/pmc.h
new file mode 100644
index 0000000..98c4af2
--- /dev/null
+++ b/lib/libpmc/pmc.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 2003,2004 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _PMC_H_
+#define _PMC_H_
+
+#include <sys/pmc.h>
+
+/*
+ * Prototypes
+ */
+
+int pmc_allocate(const char *_ctrspec, enum pmc_mode _mode, uint32_t _flags,
+ int _cpu, pmc_id_t *_pmcid);
+int pmc_attach(pmc_id_t _pmcid, pid_t _pid);
+int pmc_configure_logfile(int _fd);
+int pmc_detach(pmc_id_t _pmcid, pid_t _pid);
+int pmc_disable(int _cpu, int _pmc);
+int pmc_enable(int _cpu, int _pmc);
+int pmc_get_driver_stats(struct pmc_op_getdriverstats *_gms);
+int pmc_init(void);
+int pmc_read(pmc_id_t _pmc, pmc_value_t *_value);
+int pmc_release(pmc_id_t _pmc);
+int pmc_rw(pmc_id_t _pmc, pmc_value_t _newvalue, pmc_value_t *_oldvalue);
+int pmc_set(pmc_id_t _pmc, pmc_value_t _value);
+int pmc_start(pmc_id_t _pmc);
+int pmc_stop(pmc_id_t _pmc);
+int pmc_write(pmc_id_t _pmc, pmc_value_t _value);
+
+int pmc_ncpu(void);
+int pmc_npmc(int _cpu);
+int pmc_cpuinfo(const struct pmc_op_getcpuinfo **_cpu_info);
+int pmc_pmcinfo(int _cpu, struct pmc_op_getpmcinfo **_pmc_info);
+
+const char *pmc_name_of_capability(uint32_t _c);
+const char *pmc_name_of_class(enum pmc_class _pc);
+const char *pmc_name_of_cputype(enum pmc_cputype _cp);
+const char *pmc_name_of_disposition(enum pmc_disp _pd);
+const char *pmc_name_of_event(enum pmc_event _pe);
+const char *pmc_name_of_mode(enum pmc_mode _pm);
+const char *pmc_name_of_state(enum pmc_state _ps);
+
+int pmc_event_names_of_class(enum pmc_class _cl, const char ***_eventnames,
+ int *_nevents);
+
+/*
+ * Architecture specific extensions
+ */
+
+#if __i386__ || __amd64__
+int pmc_x86_get_msr(pmc_id_t _pmc, uint32_t *_msr);
+#endif
+
+#endif
diff --git a/share/doc/papers/Makefile b/share/doc/papers/Makefile
index ecab6f4..dd8fa56 100644
--- a/share/doc/papers/Makefile
+++ b/share/doc/papers/Makefile
@@ -5,6 +5,7 @@ SUBDIR= beyond4.3 \
contents \
devfs \
diskperf \
+ hwpmc \
fsinterface \
jail \
kernmalloc \
diff --git a/share/doc/papers/hwpmc/Makefile b/share/doc/papers/hwpmc/Makefile
new file mode 100644
index 0000000..d24fe06
--- /dev/null
+++ b/share/doc/papers/hwpmc/Makefile
@@ -0,0 +1,8 @@
+# $FreeBSD$
+
+VOLUME= papers
+DOC= hwpmc
+SRCS= hwpmc.ms
+MACROS= -ms
+
+.include <bsd.doc.mk>
diff --git a/share/doc/papers/hwpmc/hwpmc.ms b/share/doc/papers/hwpmc/hwpmc.ms
new file mode 100644
index 0000000..9061bb7
--- /dev/null
+++ b/share/doc/papers/hwpmc/hwpmc.ms
@@ -0,0 +1,34 @@
+.\" Copyright (c) 2004 Joseph Koshy.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY JOSEPH KOSHY AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL JOSEPH KOSHY OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.OH '''Using Hardware Performance Monitoring Counters'
+.EH 'HWPMC'''
+.TL
+Using Hardware Performance Monitoring Counters in FreeBSD
+.sp
+\s-2FreeBSD 5.2.1\s+2
+.sp
+\fRJuly, 2004\fR
+.PP
diff --git a/share/examples/hwpmc/README b/share/examples/hwpmc/README
new file mode 100644
index 0000000..035e68d
--- /dev/null
+++ b/share/examples/hwpmc/README
@@ -0,0 +1,8 @@
+# $FreeBSD$
+
+Examples illustrating the use of the hwpmc(4) driver and pmc(3)
+library interface.
+
+While there is nothing here yet, the source code for pmccontrol(8)
+and pmcstat(8) could serve as examples.
+
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index 33929ba..ce928f3 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -92,6 +92,7 @@ MAN= aac.4 \
hifn.4 \
hme.4 \
hptmv.4 \
+ hwpmc.4 \
ichsmb.4 \
icmp.4 \
icmp6.4 \
diff --git a/share/man/man4/hwpmc.4 b/share/man/man4/hwpmc.4
new file mode 100644
index 0000000..4a6c8cf
--- /dev/null
+++ b/share/man/man4/hwpmc.4
@@ -0,0 +1,583 @@
+.\" Copyright (c) 2003-2005 Joseph Koshy
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd Apr 15, 2005
+.Dt HWPMC 4
+.Os
+.Sh NAME
+.Nm hwpmc
+.Nd Hardware performance monitoring counter support
+.Sh SYNOPSIS
+.Cd options PMC_HOOKS
+.br
+.Cd device hwpmc
+.Sh DESCRIPTION
+The
+.Nm
+driver virtualizes the hardware performance monitoring facilities in
+modern CPUs and provides support for using these facilities from
+user level processes.
+.Pp
+The driver supports multi-processor systems.
+.Pp
+PMCs are allocated using the
+.Ic PMC_OP_PMCALLOCATE
+request.
+A successful
+.Ic PMC_OP_PMCALLOCATE
+request will return an integer handle (typically a small integer) to
+the requesting process.
+Subsequent operations on the allocated PMC use this handle to denote
+the specific PMC.
+A process that has successfully allocated a PMC is termed an
+.Dq "owner process" .
+.Pp
+PMCs may be allocated to operate in process-private or in system-wide
+modes.
+.Bl -hang -width "XXXXXXXXXXXXXXX"
+.It Em Process-private
+In process-private mode, a PMC is active only when a thread belonging
+to a process it is attached to is scheduled on a CPU.
+.It Em System-wide
+In system-wide mode a PMC operates independently of processes and
+measures hardware events for the system as a whole.
+.El
+.Pp
+The
+.Nm
+driver supports the use of hardware PMCs for counting or for
+sampling:
+.Bl -hang -width "XXXXXXXXX"
+.It Em Counting
+In counting modes, the PMCs count hardware events.
+These counts are retrievable using the
+.Ic PMC_OP_PMCREAD
+system call on all architectures, though some architectures like the
+x86 and amd64 offer faster methods of reading these counts.
+.It Em Sampling
+In sampling modes, where PMCs are configured to sample the CPU
+instruction pointer after a configurable number of hardware events
+have been observed.
+These instruction pointer samples are directed to a log file for
+subsequent analysis.
+.El
+.Pp
+These modes of operation are orthogonal; a PMC may be configured to
+operate in one of four modes:
+.Bl -tag -width indent
+.It Process-private, counting
+These PMCs count hardware events whenever a thread in their attached process is
+scheduled on a CPU.
+These PMCs normally count from zero, but the initial count may be
+set using the
+.Ic PMC_OP_SETCOUNT
+operation.
+Applications can read the value of the PMC anytime using the
+.Ic PMC_OP_PMCRW
+operation.
+.It Process-private, sampling
+These PMCs sample the target processes instruction pointer after they
+have seen the configured number of hardware events.
+The PMCs only count events when a thread belonging to their attached
+process is active.
+The desired frequency of sampling is set using the
+.Ic PMC_OP_SETCOUNT
+operation prior to starting the PMC.
+Log files are configured using the
+.Ic PMC_OP_CONFIGURELOG
+operation.
+.It System-wide, counting
+These PMCs count hardware events seen by them independent of the
+processes that are executing.
+The current count on these PMCs can be read using the
+.Ic PMC_OP_PMCRW
+request.
+These PMCs normally count from zero, but the initial count may be
+set using the
+.Ic PMC_OP_SETCOUNT
+operation.
+.It System-wide, sampling
+These PMCs will periodically sample the instruction pointer of the CPU
+they are allocated on, and will write the sample to a log for further
+processing.
+The desired frequency of sampling is set using the
+.Ic PMC_OP_SETCOUNT
+operation prior to starting the PMC.
+Log files are configured using the
+.Ic PMC_OP_CONFIGURELOG
+operation.
+.Pp
+System-wide statistical sampling can only be enabled by a process with
+super-user privileges.
+.El
+.Pp
+Processes are allowed to allocate as many PMCs are the hardware and
+current operating conditions permit.
+Processes may mix allocations of system-wide and process-private
+PMCs.
+Multiple processes are allowed to be concurrently using the facilities
+of the
+.Nm
+driver.
+.Pp
+Allocated PMCs are started using the
+.Ic PMC_OP_PMCSTART
+operation, and stopped using the
+.Ic PMC_OP_PMCSTOP
+operation.
+Stopping and starting a PMC is permitted at any time the owner process
+has a valid handle to the PMC.
+.Pp
+Process-private PMCs need to be attached to a target process before
+they can be used.
+Attaching a process to a PMC is done using the
+.Ic PMC_OP_PMCATTACH
+operation.
+An already attached PMC may be detached from its target process
+using the converse
+.Ic PMC_OP_PMCDETACH
+operation.
+Issuing an
+.Ic PMC_OP_PMCSTART
+operation on an as yet unattached PMC will cause it to be attached
+to its owner process.
+The following rules determine whether a given process may attach
+a PMC to another target process:
+.Bl -bullet -compact
+.It
+A non-jailed process with super-user privileges is allowed to attach
+to any other process in the system.
+.It
+Other processes are only allowed to attach to targets that they would
+be able to attach to for debugging (as determined by
+.Xr p_candebug 9 ) .
+.El
+.Pp
+PMCs are released using
+.Ic PMC_OP_PMCRELEASE .
+After a successful
+.Ic PMC_OP_PMCRELEASE
+operation the handle to the PMC will become invalid.
+.Ss MODIFIER FLAGS
+The
+.Ic PMC_OP_PMCALLOCATE
+operation supports the following flags that modify the behavior
+of an allocated PMC:
+.Bl -tag -width indent
+.It Dv PMC_F_DESCENDANTS
+This flag is valid only for a PMC being allocated in process-private
+mode.
+It signifies that the PMC will track hardware events for its
+target process and the target's current and future descendants.
+.El
+.Ss SIGNALS
+The
+.Nm
+driver may deliver signals to processes that have allocated PMCs:
+.Bl -tag -width indent
+.It Bq SIGIO
+A
+.Ic PMC_OP_PMCRW
+operation was attempted on a process-private PMC that does not have
+attached target processes.
+.It Bq SIGBUS
+The
+.Nm
+driver is being unloaded from the kernel.
+.El
+.Sh PROGRAMMING API
+The recommended way for application programs to use the facilities of
+the
+.Nm
+driver is using the API provided by the library
+.Xr pmc 3 .
+.Pp
+The
+.Nm
+driver operates using a system call number that is dynamically
+allotted to it when it is loaded into the kernel.
+.Pp
+The
+.Nm
+driver supports the following operations:
+.Bl -tag -width indent
+.It Ic PMC_OP_CONFIGURELOG
+Configure a log file for sampling mode PMCs.
+.It Ic PMC_OP_GETCPUINFO
+Retrieve information about the number of CPUs on the system and
+the number of hardware performance monitoring counters available per-CPU.
+.It Ic PMC_OP_GETDRIVERSTATS
+Retrieve module statistics (for analyzing the behavior of
+.Nm
+itself).
+.It Ic PMC_OP_GETMODULEVERSION
+Retrieve the version number of API.
+.It Ic PMC_OP_GETPMCINFO
+Retrieve information about the current state of the PMCs on a
+given CPU.
+.It Ic PMC_OP_PMCADMIN
+Set the administrative state (i.e., whether enabled or disabled) for
+the hardware PMCs managed by the
+.Nm
+driver.
+.It Ic PMC_OP_PMCALLOCATE
+Allocate and configure a PMC.
+On successful allocation, a handle to the PMC (a small integer)
+is returned.
+.It Ic PMC_OP_PMCATTACH
+Attach a process mode PMC to a target process.
+The PMC will be active whenever a thread in the target process is
+scheduled on a CPU.
+.Pp
+If the
+.Dv PMC_F_DESCENDANTS
+flag had been specified at PMC allocation time, then the PMC is
+attached to all current and future descendants of the target process.
+.It Ic PMC_OP_PMCDETACH
+Detach a PMC from its target process.
+.It Ic PMC_OP_PMCRELEASE
+Release a PMC.
+.It Ic PMC_OP_PMCRW
+Read and write a PMC.
+This operation is valid only for PMCs configured in counting modes.
+.It Ic PMC_OP_SETCOUNT
+Set the initial count (for counting mode PMCs) or the desired sampling
+rate (for sampling mode PMCs).
+.It Ic PMC_OP_PMCSTART
+Start a PMC.
+.It Ic PMC_OP_PMCSTOP
+Stop a PMC.
+.It Ic PMC_OP_WRITELOG
+Insert a timestamped user record into the log file.
+.El
+.Ss i386 SPECIFIC API
+Some i386 family CPUs support the RDPMC instruction which allows a
+user process to read a PMC value without needing to invoke a
+.Ic PMC_OP_PMCRW
+operation.
+On such CPUs, the machine address associated with an allocated PMC is
+retrievable using the
+.Ic PMC_OP_PMCX86GETMSR
+system call.
+.Bl -tag -width indent
+.It Ic PMC_OP_PMCX86GETMSR
+Retrieve the MSR (machine specific register) number associated with
+the given PMC handle.
+.Pp
+This operation is only valid for PMCs allocated in process-private modes.
+.El
+.Ss amd64 SPECIFIC API
+AMD64 cpus support the RDPMC instruction which allows a
+user process to read a PMC value without needing to invoke a
+.Ic PMC_OP_PMCRW
+operation.
+The machine address associated with an allocated PMC is
+retrievable using the
+.Ic PMC_OP_PMCX86GETMSR
+system call.
+.Bl -tag -width indent
+.It Ic PMC_OP_PMCX86GETMSR
+Retrieve the MSR (machine specific register) number associated with
+the given PMC handle.
+.Pp
+This operation is only valid for PMCs allocated in process-private modes.
+.El
+.Sh SYSCTL TUNABLES
+The behavior of
+.Nm
+is influenced by the following
+.Xr sysctl 8
+tunables:
+.Bl -tag -width indent
+.It Va kern.hwpmc.debugflags
+(Only available if the
+.Nm
+driver was compiled with
+.Fl DDEBUG ) .
+Control the verbosity of debug messages from the
+.Nm
+driver.
+.It Va kern.hwpmc.hashsize
+The number of rows in the hash-tables used to keep track of owner and
+target processes.
+.It Va kern.hwpmc.mtxpoolsize
+The size of the spin mutex pool used by the PMC driver.
+.It Va kern.hwpmc.pcpubuffersize
+The size of the per-cpu hash table used when performing system-wide
+statistical profiling.
+.It Va security.bsd.unprivileged_syspmcs
+If set to non-zero, allow unprivileged processes to allocate system-wide
+PMCs.
+The default value is 0.
+.It Va security.bsd.unprivileged_proc_debug
+If set to 0, the
+.Nm
+driver will only allow privileged process to attach PMCs to other
+processes.
+.El
+.Pp
+These variables may be set in the kernel environment using
+.Xr kenv 1
+before
+.Nm
+is loaded.
+.Sh SECURITY CONSIDERATIONS
+PMCs may be used to monitor the actual behaviour of the system on hardware.
+In situations where this constitutes an undesirable information leak,
+the following options are available:
+.Bl -enum
+.It
+Set the
+.Xr sysctl 8
+tunable
+.Va "security.bsd.unprivileged_syspmcs"
+to 0.
+.Pp
+This ensures that unprivileged processes cannot allocate system-wide
+PMCs and thus cannot observe the hardware behavior of the system
+as a whole.
+.Pp
+This tunable may also be set at boot time using
+.Xr loader 8 ,
+or with
+.Xr kenv 1
+prior to loading the
+.Nm
+driver into the kernel.
+.It
+Set the
+.Xr sysctl 8
+tunable
+.Va "security.bsd.unprivileged_proc_debug"
+to 0.
+.Pp
+This will ensure that an unprivileged process cannot attach a PMC
+to any process other than itself and thus cannot observe the hardware
+behavior of other processes with the same credentials.
+.El
+.Pp
+System administrators should note that on IA-32 platforms
+.Fx
+makes the content of the IA-32 TSC counter available to all processes
+via the RDTSC instruction.
+.Sh IMPLEMENTATION NOTES
+.Ss i386 TSC Handling
+Historically, on the x86 architecture,
+.Fx
+has permitted user processes running at a processor CPL of 3 to
+read the TSC using the RDTSC instruction.
+The
+.Nm
+driver preserves this semantic.
+.Pp
+TSCs are treated as shared, read-only counters and hence are
+only allowed to be allocated in system-wide counting mode.
+.Ss Intel P4/HTT Handling
+On CPUs with HTT support, Intel P4 PMCs are capable of qualifying
+only a subset of hardware events on a per-logical CPU basis.
+Consequently, if HTT is enabled on a system with Intel Pentium P4
+PMCs, then the
+.Nm
+driver will reject allocation requests for process-private PMCs that
+request counting of hardware events that cannot be counted separately
+for each logical CPU.
+.Sh ERRORS
+An command issued to the
+.Nm
+driver may fail with the following errors:
+.Bl -tag -width Er
+.It Bq Er EBUSY
+An
+.Ic OP_CONFIGURELOG
+operation was requested while an existing log was active.
+.It Bq Er EBUSY
+A
+.Ic DISABLE
+operation was requested using the
+.Ic PMC_OP_PMCADMIN
+request for a set of hardware resources currently in use for
+process-private PMCs.
+.It Bq Er EBUSY
+A
+.Ic PMC_OP_PMCADMIN
+operation was requested on an active system mode PMC.
+.It Bq Er EBUSY
+A
+.Ic PMC_OP_PMCATTACH
+operation was requested for a target process that already had another
+PMC using the same hardware resources attached to it.
+.It Bq Er EBUSY
+An
+.Ic PMC_OP_PMCRW
+request writing a new value was issued on a PMC that was active.
+.It Bq Er EBUSY
+An
+.Ic PMC_OP_PMCSETCOUNT
+request was issued on a PMC that was active.
+.It Bq Er EEXIST
+A
+.Ic PMC_OP_PMCATTACH
+request was reissued for a target process that already is the target
+of this PMC.
+.It Bq Er EFAULT
+A bad address was passed in to the driver.
+.It Bq Er EINVAL
+A process specified an invalid PMC handle.
+.It Bq Er EINVAL
+An invalid CPU number was passed in for an
+.Ic PMC_OP_GETPMCINFO
+operation.
+.It Bq Er EINVAL
+An invalid CPU number was passed in for an
+.Ic PMC_OP_PMCADMIN
+operation.
+.It Bq Er EINVAL
+An invalid operation request was passed in for an
+.Ic PMC_OP_PMCADMIN
+operation.
+.It Bq Er EINVAL
+An invalid PMC id was passed in for an
+.Ic PMC_OP_PMCADMIN
+operation.
+.It Bq Er EINVAL
+A suitable PMC matching the parameters passed in to a
+.Ic PMC_OP_PMCALLOCATE
+request could not be allocated.
+.It Bq Er EINVAL
+An invalid PMC mode was requested during a
+.Ic PMC_OP_PMCALLOCATE
+request.
+.It Bq Er EINVAL
+An invalid CPU number was specified during a
+.Ic PMC_OP_PMCALLOCATE
+request.
+.It Bq Er EINVAL
+A cpu other than
+.Li PMC_CPU_ANY
+was specified in a
+.Ic PMC_OP_ALLOCATE
+request for a process-private PMC.
+.It Bq Er EINVAL
+A cpu number of
+.Li PMC_CPU_ANY
+was specified in a
+.Ic PMC_OP_ALLOCATE
+request for a system-wide PMC.
+.It Bq Er EINVAL
+The
+.Ar pm_flags
+argument to an
+.Ic PMC_OP_PMCALLOCATE
+request contained unknown flags.
+.It Bq Er EINVAL
+A PMC allocated for system-wide operation was specified with a
+.Ic PMC_OP_PMCATTACH
+request.
+.It Bq Er EINVAL
+The
+.Ar pm_pid
+argument to a
+.Ic PMC_OP_PMCATTACH
+request specified an illegal process id.
+.It Bq Er EINVAL
+A
+.Ic PMC_OP_PMCDETACH
+request was issued for a PMC not attached to the target process.
+.It Bq Er EINVAL
+Argument
+.Ar pm_flags
+to a
+.Ic PMC_OP_PMCRW
+request contained illegal flags.
+.It Bq Er EINVAL
+A
+.Ic PMC_OP_PMCX86GETMSR
+operation was requested for a PMC not in process-virtual mode.
+.It Bq Er EINVAL
+(On Intel Pentium 4 CPUs with HTT support) An allocation request for
+a process-private PMC was issued for an event that does not support
+counting on a per-logical CPU basis.
+.It Bq Er ENOMEM
+The system was not able to allocate kernel memory.
+.It Bq Er ENOSYS
+(i386 architectures) A
+.Ic PMC_OP_PMCX86GETMSR
+operation was requested for hardware that does not support reading
+PMCs directly with the RDPMC instruction.
+.It Bq Er ENXIO
+An
+.Ic OP_GETPMCINFO
+operation was requested for a disabled CPU.
+.It Bq Er ENXIO
+A system-wide PMC on a disabled CPU was requested to be allocated with
+.Ic PMC_OP_PMCALLOCATE .
+.It Bq Er ENXIO
+A
+.Ic PMC_OP_PMCSTART
+or
+.Ic PMC_OP_PMCSTOP
+request was issued for a system-wide PMC that was allocated on a
+currently disabled CPU.
+.It Bq Er EPERM
+An
+.Ic OP_PMCADMIN
+request was issued by a process without super-user
+privilege or by a jailed super-user process.
+.It Bq Er EPERM
+An
+.Ic PMC_OP_PMCATTACH
+operation was issued for a target process that the current process
+does not have permission to attach to.
+.It Bq Er ESRCH
+A process issued a PMC operation request without having allocated any PMCs.
+.It Bq Er ESRCH
+A
+.Ic PMC_OP_PMCATTACH
+request specified a non-existent process id.
+.It Bq Er ESRCH
+The target process for a
+.Ic PMC_OP_PMCDETACH
+operation is not being monitored by the
+.Nm
+driver.
+.El
+.Sh BUGS
+The kernel driver requires all CPUs in an SMP system to be symmetric
+with respect to their performance monitoring counter resources.
+.Pp
+The driver samples the state of the kernel's logical processor support
+at the time of initialization (i.e., at module load time).
+On CPUs supporting logical processors, the driver could misbehave if
+logical processors are subsequently enabled or disabled while the
+driver is active.
+.Sh SEE ALSO
+.Xr kenv 1 ,
+.Xr pmc 3 ,
+.Xr kldload 8 ,
+.Xr pmccontrol 8 ,
+.Xr pmcstat 8 ,
+.Xr sysctl 8 ,
+.Xr p_candebug 9
diff --git a/share/mk/bsd.libnames.mk b/share/mk/bsd.libnames.mk
index 5186772..063f974 100644
--- a/share/mk/bsd.libnames.mk
+++ b/share/mk/bsd.libnames.mk
@@ -112,6 +112,7 @@ MINUSLPAM+= -lypclnt
LIBPANEL?= ${DESTDIR}${LIBDIR}/libpanel.a
LIBPCAP?= ${DESTDIR}${LIBDIR}/libpcap.a
+LIBPMC?= ${DESTDIR}${LIBDIR}/libpmc.a
LIBPTHREAD?= ${DESTDIR}${LIBDIR}/libpthread.a
LIBRADIUS?= ${DESTDIR}${LIBDIR}/libradius.a
LIBREADLINE?= ${DESTDIR}${LIBDIR}/libreadline.a
diff --git a/sys/amd64/include/pmc_mdep.h b/sys/amd64/include/pmc_mdep.h
new file mode 100644
index 0000000..ef89f90
--- /dev/null
+++ b/sys/amd64/include/pmc_mdep.h
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 2003, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* Machine dependent interfaces */
+
+#ifndef _MACHINE_PMC_MDEP_H
+#define _MACHINE_PMC_MDEP_H 1
+
+#include <sys/pmc.h>
+
+/* AMD K7 PMCs */
+
+#define K8_NPMCS 5 /* 1 TSC + 4 PMCs */
+
+#define K8_PMC_COUNTERMASK 0xFF000000
+#define K8_PMC_TO_COUNTER(x) (((x) << 24) & K8_PMC_COUNTERMASK)
+#define K8_PMC_INVERT (1 << 23)
+#define K8_PMC_ENABLE (1 << 22)
+#define K8_PMC_INT (1 << 20)
+#define K8_PMC_PC (1 << 19)
+#define K8_PMC_EDGE (1 << 18)
+#define K8_PMC_OS (1 << 17)
+#define K8_PMC_USR (1 << 16)
+
+#define K8_PMC_UNITMASK_M 0x10
+#define K8_PMC_UNITMASK_O 0x08
+#define K8_PMC_UNITMASK_E 0x04
+#define K8_PMC_UNITMASK_S 0x02
+#define K8_PMC_UNITMASK_I 0x01
+#define K8_PMC_UNITMASK_MOESI 0x1F
+
+#define K8_PMC_UNITMASK 0xFF00
+#define K8_PMC_EVENTMASK 0x00FF
+#define K8_PMC_TO_UNITMASK(x) (((x) << 8) & K8_PMC_UNITMASK)
+#define K8_PMC_TO_EVENTMASK(x) ((x) & 0xFF)
+#define K8_VALID_BITS (K8_PMC_COUNTERMASK | K8_PMC_INVERT | \
+ K8_PMC_ENABLE | K8_PMC_INT | K8_PMC_PC | K8_PMC_EDGE | K8_PMC_OS | \
+ K8_PMC_USR | K8_PMC_UNITMASK | K8_PMC_EVENTMASK)
+
+#ifdef _KERNEL
+
+/*
+ * Prototypes
+ */
+
+#if defined(__amd64__)
+struct pmc_mdep *pmc_amd_initialize(void);
+#endif /* defined(__i386__) */
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_PMC_MDEP_H */
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 3f2677d..2d5ad27 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -422,6 +422,19 @@ options COMPILING_LINT
#####################################################################
+# PERFORMANCE MONITORING OPTIONS
+
+#
+# The hwpmc driver that allows the use of in-CPU performance monitoring
+# counters for performance monitoring. The base kernel needs to configured
+# with the 'options' line, while the hwpmc device can be either compiled
+# in or loaded as a loadable kernel module.
+#
+device hwpmc # Driver (also a loadable module)
+options HWPMC_HOOKS # Other necessary kernel hooks
+
+
+#####################################################################
# NETWORKING OPTIONS
#
diff --git a/sys/conf/files b/sys/conf/files
index ccf3ac7..a396d70 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1044,6 +1044,10 @@ gnu/ext2fs/ext2_subr.c optional ext2fs
gnu/ext2fs/ext2_vfsops.c optional ext2fs
gnu/ext2fs/ext2_vnops.c optional ext2fs
#
+# Support for hardware performance monitoring counters
+#
+hwpmc/hwpmc_mod.c optional hwpmc
+#
# isdn4bsd device drivers
#
i4b/driver/i4b_trace.c optional i4btrc
@@ -1157,6 +1161,7 @@ kern/kern_mtxpool.c standard
kern/kern_mutex.c standard
kern/kern_ntptime.c standard
kern/kern_physio.c standard
+kern/kern_pmc.c standard
kern/kern_poll.c optional device_polling
kern/kern_proc.c standard
kern/kern_prot.c standard
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 30da61b..f3c69f8 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -170,6 +170,7 @@ geom/geom_bsd.c standard
geom/geom_bsd_enc.c standard
geom/geom_mbr.c standard
geom/geom_mbr_enc.c standard
+hwpmc/hwpmc_amd.c optional hwpmc
isa/atkbd_isa.c optional atkbd
isa/atkbdc_isa.c optional atkbdc
isa/psm.c optional psm
diff --git a/sys/conf/files.i386 b/sys/conf/files.i386
index 9b3ce26..d46b2c3 100644
--- a/sys/conf/files.i386
+++ b/sys/conf/files.i386
@@ -216,6 +216,8 @@ geom/geom_bsd_enc.c standard
geom/geom_mbr.c standard
geom/geom_mbr_enc.c standard
dev/acpica/acpi_if.m standard
+hwpmc/hwpmc_amd.c optional hwpmc
+hwpmc/hwpmc_intel.c optional hwpmc
i386/acpica/OsdEnvironment.c optional acpi
i386/acpica/acpi_machdep.c optional acpi
i386/acpica/acpi_wakeup.c optional acpi
diff --git a/sys/conf/options b/sys/conf/options
index aac7fad..9be76d8 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -699,3 +699,6 @@ NI4BISPPP opt_i4b.h
# VFS options
LOOKUP_SHARED opt_vfs.h
+
+# HWPMC options
+HWPMC_HOOKS opt_global.h
diff --git a/sys/dev/hwpmc/hwpmc_amd.c b/sys/dev/hwpmc/hwpmc_amd.c
new file mode 100644
index 0000000..c3bb56c
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_amd.c
@@ -0,0 +1,996 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Support for the AMD K7 and later processors */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/* AMD K7 and K8 PMCs */
+
+#define AMD_PMC_EVSEL_0 0xC0010000
+#define AMD_PMC_EVSEL_1 0xC0010001
+#define AMD_PMC_EVSEL_2 0xC0010002
+#define AMD_PMC_EVSEL_3 0xC0010003
+
+#define AMD_PMC_PERFCTR_0 0xC0010004
+#define AMD_PMC_PERFCTR_1 0xC0010005
+#define AMD_PMC_PERFCTR_2 0xC0010006
+#define AMD_PMC_PERFCTR_3 0xC0010007
+
+#define K7_VALID_EVENT_CODE(c) (((c) >= 0x40 && (c) <= 0x47) || \
+ ((c) >= 0x80 && (c) <= 0x85) || ((c) >= 0xC0 && (c) <= 0xC7) || \
+ ((c) >= 0xCD && (c) <= 0xCF))
+
+#define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \
+ PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
+
+/* reserved bits include bit 21 and the top two bits of the unit mask */
+#define K7_PMC_RESERVED ((1 << 21) | (3 << 13))
+
+#define K8_PMC_RESERVED (1 << 21)
+
+#define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
+#define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
+
+#if __i386__
+#define AMD_NPMCS K7_NPMCS
+#define AMD_PMC_CLASS PMC_CLASS_K7
+#define AMD_PMC_COUNTERMASK K7_PMC_COUNTERMASK
+#define AMD_PMC_TO_COUNTER(x) K7_PMC_TO_COUNTER(x)
+#define AMD_PMC_INVERT K7_PMC_INVERT
+#define AMD_PMC_ENABLE K7_PMC_ENABLE
+#define AMD_PMC_INT K7_PMC_INT
+#define AMD_PMC_PC K7_PMC_PC
+#define AMD_PMC_EDGE K7_PMC_EDGE
+#define AMD_PMC_OS K7_PMC_OS
+#define AMD_PMC_USR K7_PMC_USR
+
+#define AMD_PMC_UNITMASK_M K7_PMC_UNITMASK_M
+#define AMD_PMC_UNITMASK_O K7_PMC_UNITMASK_O
+#define AMD_PMC_UNITMASK_E K7_PMC_UNITMASK_E
+#define AMD_PMC_UNITMASK_S K7_PMC_UNITMASK_S
+#define AMD_PMC_UNITMASK_I K7_PMC_UNITMASK_I
+
+#define AMD_PMC_UNITMASK K7_PMC_UNITMASK
+#define AMD_PMC_EVENTMASK K7_PMC_EVENTMASK
+#define AMD_PMC_TO_UNITMASK(x) K7_PMC_TO_UNITMASK(x)
+#define AMD_PMC_TO_EVENTMASK(x) K7_PMC_TO_EVENTMASK(x)
+#define AMD_VALID_BITS K7_VALID_BITS
+
+#define AMD_PMC_CLASS_NAME "K7-"
+
+#elif __amd64__
+
+#define AMD_NPMCS K8_NPMCS
+#define AMD_PMC_CLASS PMC_CLASS_K8
+#define AMD_PMC_COUNTERMASK K8_PMC_COUNTERMASK
+#define AMD_PMC_TO_COUNTER(x) K8_PMC_TO_COUNTER(x)
+#define AMD_PMC_INVERT K8_PMC_INVERT
+#define AMD_PMC_ENABLE K8_PMC_ENABLE
+#define AMD_PMC_INT K8_PMC_INT
+#define AMD_PMC_PC K8_PMC_PC
+#define AMD_PMC_EDGE K8_PMC_EDGE
+#define AMD_PMC_OS K8_PMC_OS
+#define AMD_PMC_USR K8_PMC_USR
+
+#define AMD_PMC_UNITMASK_M K8_PMC_UNITMASK_M
+#define AMD_PMC_UNITMASK_O K8_PMC_UNITMASK_O
+#define AMD_PMC_UNITMASK_E K8_PMC_UNITMASK_E
+#define AMD_PMC_UNITMASK_S K8_PMC_UNITMASK_S
+#define AMD_PMC_UNITMASK_I K8_PMC_UNITMASK_I
+
+#define AMD_PMC_UNITMASK K8_PMC_UNITMASK
+#define AMD_PMC_EVENTMASK K8_PMC_EVENTMASK
+#define AMD_PMC_TO_UNITMASK(x) K8_PMC_TO_UNITMASK(x)
+#define AMD_PMC_TO_EVENTMASK(x) K8_PMC_TO_EVENTMASK(x)
+#define AMD_VALID_BITS K8_VALID_BITS
+
+#define AMD_PMC_CLASS_NAME "K8-"
+
+#else
+#error Unsupported architecture.
+#endif
+
+/* AMD K7 & K8 PMCs */
+struct amd_descr {
+ struct pmc_descr pm_descr; /* "base class" */
+ uint32_t pm_evsel; /* address of EVSEL register */
+ uint32_t pm_perfctr; /* address of PERFCTR register */
+};
+
+static const struct amd_descr amd_pmcdesc[AMD_NPMCS] =
+{
+ {
+ .pm_descr =
+ {
+ .pd_name = "TSC",
+ .pd_class = PMC_CLASS_TSC,
+ .pd_caps = PMC_CAP_READ,
+ .pd_width = 64
+ },
+ .pm_evsel = MSR_TSC,
+ .pm_perfctr = 0 /* unused */
+ },
+
+ {
+ .pm_descr =
+ {
+ .pd_name = AMD_PMC_CLASS_NAME "0",
+ .pd_class = AMD_PMC_CLASS,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_0,
+ .pm_perfctr = AMD_PMC_PERFCTR_0
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = AMD_PMC_CLASS_NAME "1",
+ .pd_class = AMD_PMC_CLASS,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_1,
+ .pm_perfctr = AMD_PMC_PERFCTR_1
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = AMD_PMC_CLASS_NAME "2",
+ .pd_class = AMD_PMC_CLASS,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_2,
+ .pm_perfctr = AMD_PMC_PERFCTR_2
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = AMD_PMC_CLASS_NAME "3",
+ .pd_class = AMD_PMC_CLASS,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_3,
+ .pm_perfctr = AMD_PMC_PERFCTR_3
+ }
+};
+
+struct amd_event_code_map {
+ enum pmc_event pe_ev; /* enum value */
+ uint8_t pe_code; /* encoded event mask */
+ uint8_t pe_mask; /* bits allowed in unit mask */
+};
+
+const struct amd_event_code_map amd_event_codes[] = {
+#if __i386__
+ { PMC_EV_K7_DC_ACCESSES, 0x40, 0 },
+ { PMC_EV_K7_DC_MISSES, 0x41, 0 },
+ { PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, K7_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, K7_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_DC_WRITEBACKS, 0x44, K7_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
+ { PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 },
+ { PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 },
+
+ { PMC_EV_K7_IC_FETCHES, 0x80, 0 },
+ { PMC_EV_K7_IC_MISSES, 0x81, 0 },
+
+ { PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 },
+ { PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 },
+
+ { PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 },
+ { PMC_EV_K7_RETIRED_OPS, 0xC1, 0 },
+ { PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 },
+ { PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
+ { PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 },
+ { PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
+ { PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
+ { PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 },
+ { PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 },
+ { PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
+ { PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 }
+#endif
+
+#if __amd64__
+ { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F },
+ { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 },
+ { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 },
+
+ { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
+ 0x21, 0x00 },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
+ { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 },
+ { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 },
+ { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 },
+ { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 },
+
+ { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 },
+ { PMC_EV_K8_DC_MISS, 0x41, 0x00 },
+ { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F },
+ { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F },
+ { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F },
+ { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 },
+ { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 },
+ { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 },
+ { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 },
+ { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
+ { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 },
+ { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
+ { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 },
+
+ { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 },
+ { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F },
+ { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 },
+ { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 },
+
+ { PMC_EV_K8_IC_FETCH, 0x80, 0x00 },
+ { PMC_EV_K8_IC_MISS, 0x81, 0x00 },
+ { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 },
+ { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 },
+ { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 },
+ { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 },
+ { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
+ { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 },
+ { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 },
+ { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 },
+
+ { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
+ 0xCA, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F },
+ { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
+ 0xCC, 0x07 },
+ { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 },
+ { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
+ { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 },
+
+ { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
+ 0xD2, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
+ 0xD5, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
+ 0xD6, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
+ 0xD9, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
+ 0xDA, 0x00 },
+ { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 },
+
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
+ 0xE2, 0x00 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
+ { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F },
+ { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
+#endif
+
+};
+
+const int amd_event_codes_size =
+ sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
+
+/*
+ * read a pmc register
+ */
+
+static int
+amd_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ enum pmc_mode mode;
+ const struct amd_descr *pd;
+ struct pmc *pm;
+ const struct pmc_hw *phw;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &amd_pmcdesc[ri];
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ mode = pm->pm_mode;
+
+ PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
+
+ /* Reading the TSC is a special case */
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
+ KASSERT(PMC_IS_COUNTING_MODE(mode),
+ ("[amd,%d] TSC counter in non-counting mode", __LINE__));
+ *v = rdtsc();
+ PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
+ return 0;
+ }
+
+ KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+
+ tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
+ if (PMC_IS_SAMPLING_MODE(mode))
+ *v = -tmp;
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
+
+ return 0;
+}
+
+/*
+ * Write a PMC MSR.
+ */
+
+static int
+amd_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ const struct amd_descr *pd;
+ struct pmc *pm;
+ const struct pmc_hw *phw;
+ enum pmc_mode mode;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &amd_pmcdesc[ri];
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
+ cpu, ri));
+
+ mode = pm->pm_mode;
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+
+ /* use 2's complement of the count for sampling mode PMCs */
+ if (PMC_IS_SAMPLING_MODE(mode))
+ v = -v;
+
+ PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ /* write the PMC value */
+ wrmsr(pd->pm_perfctr, v);
+ return 0;
+}
+
+/*
+ * configure hardware pmc according to the configuration recorded in
+ * pmc 'pm'.
+ */
+
+static int
+amd_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[amd,%d] hwpmc not unconfigured before re-config", __LINE__));
+
+ phw->phw_pmc = pm;
+ return 0;
+}
+
+/*
+ * Machine dependent actions taken during the context switch in of a
+ * thread.
+ */
+
+static int
+amd_switch_in(struct pmc_cpu *pc)
+{
+ (void) pc;
+
+ /* enable the RDPMC instruction */
+ load_cr4(rcr4() | CR4_PCE);
+ return 0;
+}
+
+/*
+ * Machine dependent actions taken during the context switch out of a
+ * thread.
+ */
+
+static int
+amd_switch_out(struct pmc_cpu *pc)
+{
+ (void) pc;
+
+ /* disallow RDPMC instruction */
+ load_cr4(rcr4() & ~CR4_PCE);
+ return 0;
+}
+
+/*
+ * Check if a given allocation is feasible.
+ */
+
+static int
+amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ int i;
+ uint32_t allowed_unitmask, caps, config, unitmask;
+ enum pmc_event pe;
+ const struct pmc_descr *pd;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row index %d", __LINE__, ri));
+
+ pd = &amd_pmcdesc[ri].pm_descr;
+
+ /* check class match */
+ if (pd->pd_class != pm->pm_class)
+ return EINVAL;
+
+ caps = pm->pm_caps;
+
+ PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
+
+ if ((pd->pd_caps & caps) != caps)
+ return EPERM;
+ if (pd->pd_class == PMC_CLASS_TSC) {
+ /* TSC's are always allocated in system-wide counting mode */
+ if (a->pm_ev != PMC_EV_TSC_TSC ||
+ a->pm_mode != PMC_MODE_SC)
+ return EINVAL;
+ return 0;
+ }
+
+ KASSERT(pd->pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
+
+ pe = a->pm_ev;
+
+ /* map ev to the correct event mask code */
+ config = allowed_unitmask = 0;
+ for (i = 0; i < amd_event_codes_size; i++)
+ if (amd_event_codes[i].pe_ev == pe) {
+ config =
+ AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
+ allowed_unitmask =
+ AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
+ break;
+ }
+ if (i == amd_event_codes_size)
+ return EINVAL;
+
+ unitmask = a->pm_amd_config & AMD_PMC_UNITMASK;
+ if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
+ return EINVAL;
+
+ if (unitmask && (caps & PMC_CAP_QUALIFIER))
+ config |= unitmask;
+
+ if (caps & PMC_CAP_THRESHOLD)
+ config |= a->pm_amd_config & AMD_PMC_COUNTERMASK;
+
+ /* set at least one of the 'usr' or 'os' caps */
+ if (caps & PMC_CAP_USER)
+ config |= AMD_PMC_USR;
+ if (caps & PMC_CAP_SYSTEM)
+ config |= AMD_PMC_OS;
+ if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
+ config |= (AMD_PMC_USR|AMD_PMC_OS);
+
+ if (caps & PMC_CAP_EDGE)
+ config |= AMD_PMC_EDGE;
+ if (caps & PMC_CAP_INVERT)
+ config |= AMD_PMC_INVERT;
+ if (caps & PMC_CAP_INTERRUPT)
+ config |= AMD_PMC_INT;
+
+ pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
+
+ PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+/*
+ * Release machine dependent state associated with a PMC. This is a
+ * no-op on this architecture.
+ *
+ */
+
+/* ARGSUSED0 */
+static int
+amd_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+#if DEBUG
+ const struct amd_descr *pd;
+#endif
+ struct pmc_hw *phw;
+
+ (void) pmc;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+#if DEBUG
+ pd = &amd_pmcdesc[ri];
+ if (pd->pm_descr.pd_class == AMD_PMC_CLASS)
+ KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] PMC %d released while active", __LINE__, ri));
+#endif
+
+ return 0;
+}
+
+/*
+ * start a PMC.
+ */
+
+static int
+amd_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ const struct amd_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &amd_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0; /* TSCs are always running */
+
+ KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+
+ KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
+ ri, cpu, pd->pm_descr.pd_name));
+
+ /* turn on the PMC ENABLE bit */
+ config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
+
+ PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
+
+ wrmsr(pd->pm_evsel, config);
+ return 0;
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+amd_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ const struct amd_descr *pd;
+ uint64_t config;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &amd_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
+ cpu, ri));
+
+ /* can't stop a TSC */
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+
+ KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
+ __LINE__, ri, cpu, pd->pm_descr.pd_name));
+
+ PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
+
+ /* turn off the PMC ENABLE bit */
+ config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
+ wrmsr(pd->pm_evsel, config);
+ return 0;
+}
+
+/*
+ * Interrupt handler. This function needs to return '1' if the
+ * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
+ * to sleep or do anything a 'fast' interrupt handler is not allowed
+ * to do.
+ */
+
+static int
+amd_intr(int cpu, uintptr_t eip)
+{
+ int i, retval;
+ enum pmc_mode mode;
+ uint32_t perfctr;
+ struct pmc *pm;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] out of range CPU %d", __LINE__, cpu));
+
+ retval = 0;
+
+ pc = pmc_pcpu[cpu];
+
+ /*
+ * look for all PMCs that have interrupted:
+ * - skip over the TSC [PMC#0]
+ * - look for a PMC with a valid 'struct pmc' association
+ * - look for a PMC in (a) sampling mode and (b) which has
+ * overflowed. If found, we update the process's
+ * histogram or send it a profiling signal by calling
+ * the appropriate helper function.
+ */
+
+ for (i = 1; i < AMD_NPMCS; i++) {
+
+ phw = pc->pc_hwpmcs[i];
+ perfctr = amd_pmcdesc[i].pm_perfctr;
+ KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
+
+ if ((pm = phw->phw_pmc) == NULL ||
+ pm->pm_state != PMC_STATE_RUNNING) {
+ atomic_add_int(&pmc_stats.pm_intr_ignored, 1);
+ continue;
+ }
+
+ mode = pm->pm_mode;
+ if (PMC_IS_SAMPLING_MODE(mode) &&
+ AMD_PMC_HAS_OVERFLOWED(perfctr)) {
+ atomic_add_int(&pmc_stats.pm_intr_processed, 1);
+ if (PMC_IS_SYSTEM_MODE(mode))
+ pmc_update_histogram(phw, eip);
+ else if (PMC_IS_VIRTUAL_MODE(mode))
+ pmc_send_signal(pm);
+ retval = 1;
+ }
+ }
+ return retval;
+}
+
+/*
+ * describe a PMC
+ */
+static int
+amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ const struct amd_descr *pd;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &amd_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return error;
+
+ pi->pm_class = pd->pm_descr.pd_class;
+ pi->pm_caps = pd->pm_descr.pd_caps;
+ pi->pm_width = pd->pm_descr.pd_width;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * i386 specific entry points
+ */
+
+/*
+ * return the MSR address of the given PMC.
+ */
+
+static int
+amd_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = amd_pmcdesc[ri].pm_perfctr;
+ return 0;
+}
+
+/*
+ * processor dependent initialization.
+ */
+
+/*
+ * Per-processor data structure
+ *
+ * [common stuff]
+ * [5 struct pmc_hw pointers]
+ * [5 struct pmc_hw structures]
+ */
+
+struct amd_cpu {
+ struct pmc_cpu pc_common;
+ struct pmc_hw *pc_hwpmcs[AMD_NPMCS];
+ struct pmc_hw pc_amdpmcs[AMD_NPMCS];
+};
+
+
+static int
+amd_init(int cpu)
+{
+ int n;
+ struct amd_cpu *pcs;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
+
+ MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ if (pcs == NULL)
+ return ENOMEM;
+
+ phw = &pcs->pc_amdpmcs[0];
+
+ /*
+ * Initialize the per-cpu mutex and set the content of the
+ * hardware descriptors to a known state.
+ */
+
+ for (n = 0; n < AMD_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pcs->pc_hwpmcs[n] = phw;
+ }
+
+ /* Mark the TSC as shareable */
+ pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
+
+ pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
+
+ return 0;
+}
+
+
+/*
+ * processor dependent cleanup prior to the KLD
+ * being unloaded
+ */
+
+static int
+amd_cleanup(int cpu)
+{
+ int i;
+ uint32_t evsel;
+ struct pmc_cpu *pcs;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
+
+ /*
+ * First, turn off all PMCs on this CPU.
+ */
+
+ for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
+ evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
+ evsel &= ~AMD_PMC_ENABLE;
+ wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
+ }
+
+ /*
+ * Next, free up allocated space.
+ */
+
+ pcs = pmc_pcpu[cpu];
+
+#if DEBUG
+ /* check the TSC */
+ KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
+ ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
+ for (i = 1; i < AMD_NPMCS; i++) {
+ KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
+ ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
+ KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
+ ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
+ }
+#endif
+ KASSERT(pcs != NULL,
+ ("[amd,%d] null per-cpu state pointer (cpu%d)", __LINE__, cpu));
+
+ pmc_pcpu[cpu] = NULL;
+ FREE(pcs, M_PMC);
+ return 0;
+}
+
+/*
+ * Initialize ourselves.
+ */
+
+struct pmc_mdep *
+pmc_amd_initialize(void)
+{
+
+ struct pmc_mdep *pmc_mdep;
+
+ /* The presence of hardware performance counters on the AMD
+ Athlon, Duron or later processors, is _not_ indicated by
+ any of the processor feature flags set by the 'CPUID'
+ instruction, so we only check the 'instruction family'
+ field returned by CPUID for instruction family >= 6. This
+ test needs to be be refined. */
+
+ if ((cpu_id & 0xF00) < 0x600)
+ return NULL;
+
+ MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
+ M_PMC, M_WAITOK|M_ZERO);
+
+#if __i386__
+ pmc_mdep->pmd_cputype = PMC_CPU_AMD_K7;
+#elif __amd64__
+ pmc_mdep->pmd_cputype = PMC_CPU_AMD_K8;
+#else
+#error Unknown AMD CPU type.
+#endif
+
+ pmc_mdep->pmd_npmc = AMD_NPMCS;
+
+ /* this processor has two classes of usable PMCs */
+ pmc_mdep->pmd_nclass = 2;
+ pmc_mdep->pmd_classes[0] = PMC_CLASS_TSC;
+ pmc_mdep->pmd_classes[1] = AMD_PMC_CLASS;
+ pmc_mdep->pmd_nclasspmcs[0] = 1;
+ pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
+
+ pmc_mdep->pmd_init = amd_init;
+ pmc_mdep->pmd_cleanup = amd_cleanup;
+ pmc_mdep->pmd_switch_in = amd_switch_in;
+ pmc_mdep->pmd_switch_out = amd_switch_out;
+ pmc_mdep->pmd_read_pmc = amd_read_pmc;
+ pmc_mdep->pmd_write_pmc = amd_write_pmc;
+ pmc_mdep->pmd_config_pmc = amd_config_pmc;
+ pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
+ pmc_mdep->pmd_release_pmc = amd_release_pmc;
+ pmc_mdep->pmd_start_pmc = amd_start_pmc;
+ pmc_mdep->pmd_stop_pmc = amd_stop_pmc;
+ pmc_mdep->pmd_intr = amd_intr;
+ pmc_mdep->pmd_describe = amd_describe;
+ pmc_mdep->pmd_get_msr = amd_get_msr; /* i386 */
+
+ PMCDBG(MDP,INI,0,"%s","amd-initialize");
+
+ return pmc_mdep;
+}
diff --git a/sys/dev/hwpmc/hwpmc_intel.c b/sys/dev/hwpmc/hwpmc_intel.c
new file mode 100644
index 0000000..2448b37
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_intel.c
@@ -0,0 +1,142 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+struct pmc_mdep *
+pmc_intel_initialize(void)
+{
+ struct pmc_mdep *pmc_mdep;
+ enum pmc_cputype cputype;
+ int error, model;
+
+ KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
+ ("[intel,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,0, "intel-initialize cpuid=0x%x", cpu_id);
+
+ cputype = -1;
+
+ switch (cpu_id & 0xF00) {
+ case 0x500: /* Pentium family processors */
+ cputype = PMC_CPU_INTEL_P5;
+ break;
+ case 0x600: /* Pentium Pro, Celeron, Pentium II & III */
+ switch ((cpu_id & 0xF0) >> 4) { /* model number field */
+ case 0x1:
+ cputype = PMC_CPU_INTEL_P6;
+ break;
+ case 0x3: case 0x5:
+ cputype = PMC_CPU_INTEL_PII;
+ break;
+ case 0x6:
+ cputype = PMC_CPU_INTEL_CL;
+ break;
+ case 0x7: case 0x8: case 0xA: case 0xB:
+ cputype = PMC_CPU_INTEL_PIII;
+ break;
+ case 0x9: case 0xD:
+ cputype = PMC_CPU_INTEL_PM;
+ break;
+ }
+ break;
+ case 0xF00: /* P4 */
+ model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
+ if (model >= 0 && model <= 3) /* known models */
+ cputype = PMC_CPU_INTEL_PIV;
+ break;
+ }
+
+ if ((int) cputype == -1) {
+ printf("pmc: Unknown Intel CPU.\n");
+ return NULL;
+ }
+
+ MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
+ M_PMC, M_WAITOK|M_ZERO);
+
+ pmc_mdep->pmd_cputype = cputype;
+ pmc_mdep->pmd_nclass = 2;
+ pmc_mdep->pmd_classes[0] = PMC_CLASS_TSC;
+ pmc_mdep->pmd_nclasspmcs[0] = 1;
+
+ error = 0;
+
+ switch (cputype) {
+
+ /*
+ * Intel Pentium 4 Processors
+ */
+
+ case PMC_CPU_INTEL_PIV:
+ error = pmc_initialize_p4(pmc_mdep);
+ break;
+
+ /*
+ * P6 Family Processors
+ */
+
+ case PMC_CPU_INTEL_P6:
+ case PMC_CPU_INTEL_CL:
+ case PMC_CPU_INTEL_PII:
+ case PMC_CPU_INTEL_PIII:
+ case PMC_CPU_INTEL_PM:
+
+ error = pmc_initialize_p6(pmc_mdep);
+ break;
+
+ /*
+ * Intel Pentium PMCs.
+ */
+
+ case PMC_CPU_INTEL_P5:
+ error = pmc_initialize_p5(pmc_mdep);
+ break;
+
+ default:
+ KASSERT(0,("[intel,%d] Unknown CPU type", __LINE__));
+ }
+
+ if (error) {
+ FREE(pmc_mdep, M_PMC);
+ pmc_mdep = NULL;
+ }
+
+ return pmc_mdep;
+}
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
new file mode 100644
index 0000000..89b2954
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -0,0 +1,3671 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/eventhandler.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/sched.h>
+#include <sys/signalvar.h>
+#include <sys/smp.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/systm.h>
+
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * Types
+ */
+
+enum pmc_flags {
+ PMC_FLAG_NONE = 0x00, /* do nothing */
+ PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
+ PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
+};
+
+/*
+ * The offset in sysent where the syscall is allocated.
+ */
+
+static int pmc_syscall_num = NO_SYSCALL;
+struct pmc_cpu **pmc_pcpu; /* per-cpu state */
+pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
+
+#define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
+
+struct mtx_pool *pmc_mtxpool;
+static int *pmc_pmcdisp; /* PMC row dispositions */
+
+#define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
+#define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
+#define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
+
+#define PMC_MARK_ROW_FREE(R) do { \
+ pmc_pmcdisp[(R)] = 0; \
+} while (0)
+
+#define PMC_MARK_ROW_STANDALONE(R) do { \
+ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&pmc_pmcdisp[(R)], -1); \
+ KASSERT(pmc_pmcdisp[(R)] >= (-mp_ncpus), ("[pmc,%d] row " \
+ "disposition error", __LINE__)); \
+} while (0)
+
+#define PMC_UNMARK_ROW_STANDALONE(R) do { \
+ atomic_add_int(&pmc_pmcdisp[(R)], 1); \
+ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+} while (0)
+
+#define PMC_MARK_ROW_THREAD(R) do { \
+ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&pmc_pmcdisp[(R)], 1); \
+} while (0)
+
+#define PMC_UNMARK_ROW_THREAD(R) do { \
+ atomic_add_int(&pmc_pmcdisp[(R)], -1); \
+ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+} while (0)
+
+
+/* various event handlers */
+static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
+
+/* Module statistics */
+struct pmc_op_getdriverstats pmc_stats;
+
+/* Machine/processor dependent operations */
+struct pmc_mdep *md;
+
+/*
+ * Hash tables mapping owner processes and target threads to PMCs.
+ */
+
+struct mtx pmc_processhash_mtx; /* spin mutex */
+static u_long pmc_processhashmask;
+static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
+
+/*
+ * Hash table of PMC owner descriptors. This table is protected by
+ * the shared PMC "sx" lock.
+ */
+
+static u_long pmc_ownerhashmask;
+static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
+
+/*
+ * Prototypes
+ */
+
+#if DEBUG
+static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
+static int pmc_debugflags_parse(char *newstr, char *fence);
+#endif
+
+static int load(struct module *module, int cmd, void *arg);
+static int pmc_syscall_handler(struct thread *td, void *syscall_args);
+static int pmc_configure_log(struct pmc_owner *po, int logfd);
+static void pmc_log_process_exit(struct pmc *pm, struct pmc_process *pp);
+static struct pmc *pmc_allocate_pmc_descriptor(void);
+static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
+ pmc_id_t pmc);
+static void pmc_release_pmc_descriptor(struct pmc *pmc);
+static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri);
+static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
+ uint32_t mode);
+static void pmc_remove_process_descriptor(struct pmc_process *pp);
+static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
+static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
+static void pmc_remove_owner(struct pmc_owner *po);
+static void pmc_maybe_remove_owner(struct pmc_owner *po);
+static void pmc_unlink_target_process(struct pmc *pmc,
+ struct pmc_process *pp);
+static void pmc_link_target_process(struct pmc *pm,
+ struct pmc_process *pp);
+static void pmc_unlink_owner(struct pmc *pmc);
+static void pmc_cleanup(void);
+static void pmc_save_cpu_binding(struct pmc_binding *pb);
+static void pmc_restore_cpu_binding(struct pmc_binding *pb);
+static void pmc_select_cpu(int cpu);
+static void pmc_process_exit(void *arg, struct proc *p);
+static void pmc_process_fork(void *arg, struct proc *p1,
+ struct proc *p2, int n);
+static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
+static int pmc_attach_process(struct proc *p, struct pmc *pm);
+static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
+ int flags);
+static int pmc_detach_process(struct proc *p, struct pmc *pm);
+static int pmc_start(struct pmc *pm);
+static int pmc_stop(struct pmc *pm);
+static int pmc_can_attach(struct pmc *pm, struct proc *p);
+
+/*
+ * Kernel tunables and sysctl(8) interface.
+ */
+
+#define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "."
+
+SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
+
+#if DEBUG
+unsigned int pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
+char pmc_debugstr[PMC_DEBUG_STRSIZE];
+TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
+ sizeof(pmc_debugstr));
+SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
+ CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
+ 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
+#endif
+
+/*
+ * kern.pmc.hashrows -- determines the number of rows in the
+ * of the hash table used to look up threads
+ */
+
+static int pmc_hashsize = PMC_HASH_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_hashsize, 0, "rows in hash tables");
+
+/*
+ * kern.pmc.pcpusize -- the size of each per-cpu
+ * area for collection PC samples.
+ */
+
+static int pmc_pcpu_buffer_size = PMC_PCPU_BUFFER_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "pcpubuffersize", &pmc_pcpu_buffer_size);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, pcpubuffersize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_pcpu_buffer_size, 0, "size of per-cpu buffer in 4K pages");
+
+/*
+ * kern.pmc.mtxpoolsize -- number of mutexes in the mutex pool.
+ */
+
+static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_mtxpool_size, 0, "size of spin mutex pool");
+
+
+
+/*
+ * security.bsd.unprivileged_syspmcs -- allow non-root processes to
+ * allocate system-wide PMCs.
+ *
+ * Allowing unprivileged processes to allocate system PMCs is convenient
+ * if system-wide measurements need to be taken concurrently with other
+ * per-process measurements. This feature is turned off by default.
+ */
+
+SYSCTL_DECL(_security_bsd);
+
+static int pmc_unprivileged_syspmcs = 0;
+TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
+SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
+ &pmc_unprivileged_syspmcs, 0,
+ "allow unprivileged process to allocate system PMCs");
+
+#if PMC_HASH_USE_CRC32
+
+#define PMC_HASH_PTR(P,M) (crc32(&(P), sizeof((P))) & (M))
+
+#else /* integer multiplication */
+
+#if LONG_BIT == 64
+#define _PMC_HM 11400714819323198486u
+#elif LONG_BIT == 32
+#define _PMC_HM 2654435769u
+#else
+#error Must know the size of 'long' to compile
+#endif
+
+/*
+ * Hash function. Discard the lower 2 bits of the pointer since
+ * these are always zero for our uses. The hash multiplier is
+ * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
+ */
+
+#define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
+
+#endif
+
+/*
+ * Syscall structures
+ */
+
+/* The `sysent' for the new syscall */
+static struct sysent pmc_sysent = {
+ 2, /* sy_narg */
+ pmc_syscall_handler /* sy_call */
+};
+
+static struct syscall_module_data pmc_syscall_mod = {
+ load,
+ NULL,
+ &pmc_syscall_num,
+ &pmc_sysent,
+ { 0, NULL }
+};
+
+static moduledata_t pmc_mod = {
+ PMC_MODULE_NAME,
+ syscall_module_handler,
+ &pmc_syscall_mod
+};
+
+DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
+MODULE_VERSION(pmc, PMC_VERSION);
+
+#if DEBUG
+static int
+pmc_debugflags_parse(char *newstr, char *fence)
+{
+ char c, *e, *p, *q;
+ unsigned int tmpflags;
+ int level;
+ char tmpbuf[4]; /* 3 character keyword + '\0' */
+
+ tmpflags = 0;
+ level = 0xF; /* max verbosity */
+
+ p = newstr;
+
+ for (; p < fence && (c = *p);) {
+
+ /* skip separators */
+ if (c == ' ' || c == '\t' || c == ',') {
+ p++; continue;
+ }
+
+ (void) strlcpy(tmpbuf, p, sizeof(tmpbuf));
+
+#define CMP_SET_FLAG_MAJ(S,F) \
+ else if (strncmp(tmpbuf, S, 3) == 0) \
+ tmpflags |= __PMCDFMAJ(F)
+
+#define CMP_SET_FLAG_MIN(S,F) \
+ else if (strncmp(tmpbuf, S, 3) == 0) \
+ tmpflags |= __PMCDFMIN(F)
+
+ if (e - p > 6 && strncmp(p, "level=", 6) == 0) {
+ p += 6; /* skip over keyword */
+ level = strtoul(p, &q, 16);
+ }
+ CMP_SET_FLAG_MAJ("mod", MOD);
+ CMP_SET_FLAG_MAJ("pmc", PMC);
+ CMP_SET_FLAG_MAJ("ctx", CTX);
+ CMP_SET_FLAG_MAJ("own", OWN);
+ CMP_SET_FLAG_MAJ("prc", PRC);
+ CMP_SET_FLAG_MAJ("mdp", MDP);
+ CMP_SET_FLAG_MAJ("cpu", CPU);
+
+ CMP_SET_FLAG_MIN("all", ALL);
+ CMP_SET_FLAG_MIN("rel", REL);
+ CMP_SET_FLAG_MIN("ops", OPS);
+ CMP_SET_FLAG_MIN("ini", INI);
+ CMP_SET_FLAG_MIN("fnd", FND);
+ CMP_SET_FLAG_MIN("pmh", PMH);
+ CMP_SET_FLAG_MIN("pms", PMS);
+ CMP_SET_FLAG_MIN("orm", ORM);
+ CMP_SET_FLAG_MIN("omr", OMR);
+ CMP_SET_FLAG_MIN("tlk", TLK);
+ CMP_SET_FLAG_MIN("tul", TUL);
+ CMP_SET_FLAG_MIN("ext", EXT);
+ CMP_SET_FLAG_MIN("exc", EXC);
+ CMP_SET_FLAG_MIN("frk", FRK);
+ CMP_SET_FLAG_MIN("att", ATT);
+ CMP_SET_FLAG_MIN("swi", SWI);
+ CMP_SET_FLAG_MIN("swo", SWO);
+ CMP_SET_FLAG_MIN("reg", REG);
+ CMP_SET_FLAG_MIN("alr", ALR);
+ CMP_SET_FLAG_MIN("rea", REA);
+ CMP_SET_FLAG_MIN("wri", WRI);
+ CMP_SET_FLAG_MIN("cfg", CFG);
+ CMP_SET_FLAG_MIN("sta", STA);
+ CMP_SET_FLAG_MIN("sto", STO);
+ CMP_SET_FLAG_MIN("bnd", BND);
+ CMP_SET_FLAG_MIN("sel", SEL);
+ else /* unrecognized keyword */
+ return EINVAL;
+
+ p += 4; /* skip keyword and separator */
+ }
+
+ pmc_debugflags = (tmpflags|level);
+
+ return 0;
+}
+
+static int
+pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ char *fence, *newstr;
+ int error;
+ unsigned int n;
+
+ (void) arg1; (void) arg2; /* unused parameters */
+
+ n = sizeof(pmc_debugstr);
+ MALLOC(newstr, char *, n, M_PMC, M_ZERO|M_WAITOK);
+ (void) strlcpy(newstr, pmc_debugstr, sizeof(pmc_debugstr));
+
+ error = sysctl_handle_string(oidp, newstr, n, req);
+
+ /* if there is a new string, parse and copy it */
+ if (error == 0 && req->newptr != NULL) {
+ fence = newstr + (n < req->newlen ? n : req->newlen);
+ if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
+ (void) strlcpy(pmc_debugstr, newstr,
+ sizeof(pmc_debugstr));
+ }
+
+ FREE(newstr, M_PMC);
+
+ return error;
+}
+#endif
+
+/*
+ * Concurrency Control
+ *
+ * The driver manages the following data structures:
+ *
+ * - target process descriptors, one per target process
+ * - owner process descriptors (and attached lists), one per owner process
+ * - lookup hash tables for owner and target processes
+ * - PMC descriptors (and attached lists)
+ * - per-cpu hardware state
+ * - the 'hook' variable through which the kernel calls into
+ * this module
+ * - the machine hardware state (managed by the MD layer)
+ *
+ * These data structures are accessed from:
+ *
+ * - thread context-switch code
+ * - interrupt handlers (possibly on multiple cpus)
+ * - kernel threads on multiple cpus running on behalf of user
+ * processes doing system calls
+ * - this driver's private kernel threads
+ *
+ * = Locks and Locking strategy =
+ *
+ * The driver uses four locking strategies for its operation:
+ *
+ * - There is a 'global' SX lock "pmc_sx" that is used to protect
+ * the its 'meta-data'.
+ *
+ * Calls into the module (via syscall() or by the kernel) start with
+ * this lock being held in exclusive mode. Depending on the requested
+ * operation, the lock may be downgraded to 'shared' mode to allow
+ * more concurrent readers into the module.
+ *
+ * This SX lock is held in exclusive mode for any operations that
+ * modify the linkages between the driver's internal data structures.
+ *
+ * The 'pmc_hook' function pointer is also protected by this lock.
+ * It is only examined with the sx lock held in exclusive mode. The
+ * kernel module is allowed to be unloaded only with the sx lock
+ * held in exclusive mode. In normal syscall handling, after
+ * acquiring the pmc_sx lock we first check that 'pmc_hook' is
+ * non-null before proceeding. This prevents races between the
+ * thread unloading the module and other threads seeking to use the
+ * module.
+ *
+ * - Lookups of target process structures and owner process structures
+ * cannot use the global "pmc_sx" SX lock because these lookups need
+ * to happen during context switches and in other critical sections
+ * where sleeping is not allowed. We protect these lookup tables
+ * with their own private spin-mutexes, "pmc_processhash_mtx" and
+ * "pmc_ownerhash_mtx". These are 'leaf' mutexes, in that no other
+ * lock is acquired with these locks held.
+ *
+ * - Interrupt handlers work in a lock free manner. At interrupt
+ * time, handlers look at the PMC pointer (phw->phw_pmc) configured
+ * when the PMC was started. If this pointer is NULL, the interrupt
+ * is ignored after updating driver statistics. We ensure that this
+ * pointer is set (using an atomic operation if necessary) before the
+ * PMC hardware is started. Conversely, this pointer is unset atomically
+ * only after the PMC hardware is stopped.
+ *
+ * We ensure that everything needed for the operation of an
+ * interrupt handler is available without it needing to acquire any
+ * locks. We also ensure that a PMC's software state is destroyed only
+ * after the PMC is taken off hardware (on all CPUs).
+ *
+ * - Context-switch handling with process-private PMCs needs more
+ * care.
+ *
+ * A given process may be the target of multiple PMCs. For example,
+ * PMCATTACH and PMCDETACH may be requested by a process on one CPU
+ * while the target process is running on another. A PMC could also
+ * be getting released because its owner is exiting. We tackle
+ * these situations in the following manner:
+ *
+ * - each target process structure 'pmc_process' has an array
+ * of 'struct pmc *' pointers, one for each hardware PMC.
+ *
+ * - At context switch IN time, each "target" PMC in RUNNING state
+ * gets started on hardware and a pointer to each PMC is copied into
+ * the per-cpu phw array. The 'runcount' for the PMC is
+ * incremented.
+ *
+ * - At context switch OUT time, all process-virtual PMCs are stopped
+ * on hardware. The saved value is added to the PMCs value field
+ * only if the PMC is in a non-deleted state (the PMCs state could
+ * have changed during the current time slice).
+ *
+ * Note that since in-between a switch IN on a processor and a switch
+ * OUT, the PMC could have been released on another CPU. Therefore
+ * context switch OUT always looks at the hardware state to turn
+ * OFF PMCs and will update a PMC's saved value only if reachable
+ * from the target process record.
+ *
+ * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
+ * be attached to many processes at the time of the call and could
+ * be active on multiple CPUs).
+ *
+ * We prevent further scheduling of the PMC by marking it as in
+ * state 'DELETED'. If the runcount of the PMC is non-zero then
+ * this PMC is currently running on a CPU somewhere. The thread
+ * doing the PMCRELEASE operation waits by repeatedly doing an
+ * tsleep() till the runcount comes to zero.
+ *
+ */
+
+/*
+ * save the cpu binding of the current kthread
+ */
+
+static void
+pmc_save_cpu_binding(struct pmc_binding *pb)
+{
+ PMCDBG(CPU,BND,2, "%s", "save-cpu");
+ mtx_lock_spin(&sched_lock);
+ pb->pb_bound = sched_is_bound(curthread);
+ pb->pb_cpu = curthread->td_oncpu;
+ mtx_unlock_spin(&sched_lock);
+ PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
+}
+
+/*
+ * restore the cpu binding of the current thread
+ */
+
+static void
+pmc_restore_cpu_binding(struct pmc_binding *pb)
+{
+ PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
+ curthread->td_oncpu, pb->pb_cpu);
+ mtx_lock_spin(&sched_lock);
+ if (pb->pb_bound)
+ sched_bind(curthread, pb->pb_cpu);
+ else
+ sched_unbind(curthread);
+ mtx_unlock_spin(&sched_lock);
+ PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
+}
+
+/*
+ * move execution over the specified cpu and bind it there.
+ */
+
+static void
+pmc_select_cpu(int cpu)
+{
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
+
+ /* never move to a disabled CPU */
+ KASSERT(pmc_cpu_is_disabled(cpu) == 0, ("[pmc,%d] selecting "
+ "disabled CPU %d", __LINE__, cpu));
+
+ PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
+ mtx_lock_spin(&sched_lock);
+ sched_bind(curthread, cpu);
+ mtx_unlock_spin(&sched_lock);
+
+ KASSERT(curthread->td_oncpu == cpu,
+ ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
+ cpu, curthread->td_oncpu));
+
+ PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
+}
+
+/*
+ * Update the per-pmc histogram
+ */
+
+void
+pmc_update_histogram(struct pmc_hw *phw, uintptr_t pc)
+{
+ (void) phw;
+ (void) pc;
+}
+
+/*
+ * Send a signal to a process. This is meant to be invoked from an
+ * interrupt handler.
+ */
+
+void
+pmc_send_signal(struct pmc *pmc)
+{
+ (void) pmc; /* shutup gcc */
+
+#if 0
+ struct proc *proc;
+ struct thread *td;
+
+ KASSERT(pmc->pm_owner != NULL,
+ ("[pmc,%d] No owner for PMC", __LINE__));
+
+ KASSERT((pmc->pm_owner->po_flags & PMC_FLAG_IS_OWNER) &&
+ (pmc->pm_owner->po_flags & PMC_FLAG_HAS_TS_PMC),
+ ("[pmc,%d] interrupting PMC owner has wrong flags 0x%x",
+ __LINE__, pmc->pm_owner->po_flags));
+
+ proc = pmc->pm_owner->po_owner;
+
+ KASSERT(curthread->td_proc == proc,
+ ("[pmc,%d] interruping the wrong thread (owner %p, "
+ "cur %p)", __LINE__, (void *) proc, curthread->td_proc));
+
+ mtx_lock_spin(&sched_lock);
+ td = TAILQ_FIRST(&proc->p_threads);
+ mtx_unlock_spin(&sched_lock);
+ /* XXX RACE HERE: can 'td' disappear now? */
+ trapsignal(td, SIGPROF, 0);
+ /* XXX rework this to use the regular 'psignal' interface from a
+ helper thread */
+#endif
+
+}
+
+/*
+ * remove an process owning PMCs
+ */
+
+void
+pmc_remove_owner(struct pmc_owner *po)
+{
+ struct pmc_list *pl, *tmp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
+
+ /* Remove descriptor from the owner hash table */
+ LIST_REMOVE(po, po_next);
+
+ /* pass 1: release all owned PMC descriptors */
+ LIST_FOREACH_SAFE(pl, &po->po_pmcs, pl_next, tmp) {
+
+ PMCDBG(OWN,ORM,2, "pl=%p pmc=%p", pl, pl->pl_pmc);
+
+ /* remove the associated PMC descriptor, if present */
+ if (pl->pl_pmc)
+ pmc_release_pmc_descriptor(pl->pl_pmc);
+
+ /* remove the linked list entry */
+ LIST_REMOVE(pl, pl_next);
+ FREE(pl, M_PMC);
+ }
+
+ /* pass 2: delete the pmc_list chain */
+ LIST_FOREACH_SAFE(pl, &po->po_pmcs, pl_next, tmp) {
+ KASSERT(pl->pl_pmc == NULL,
+ ("[pmc,%d] non-null pmc pointer", __LINE__));
+ LIST_REMOVE(pl, pl_next);
+ FREE(pl, M_PMC);
+ }
+
+ KASSERT(LIST_EMPTY(&po->po_pmcs),
+ ("[pmc,%d] PMC list not empty", __LINE__));
+
+
+ /*
+ * If this process owns a log file used for system wide logging,
+ * remove the log file.
+ *
+ * XXX rework needed.
+ */
+
+ if (po->po_flags & PMC_FLAG_OWNS_LOGFILE)
+ pmc_configure_log(po, -1);
+
+}
+
+/*
+ * remove an owner process record if all conditions are met.
+ */
+
+static void
+pmc_maybe_remove_owner(struct pmc_owner *po)
+{
+
+ PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
+
+ /*
+ * Remove owner record if
+ * - this process does not own any PMCs
+ * - this process has not allocated a system-wide sampling buffer
+ */
+
+ if (LIST_EMPTY(&po->po_pmcs) &&
+ ((po->po_flags & PMC_FLAG_OWNS_LOGFILE) == 0)) {
+ pmc_remove_owner(po);
+ FREE(po, M_PMC);
+ }
+}
+
+/*
+ * Add an association between a target process and a PMC.
+ */
+
+static void
+pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
+{
+ int ri;
+ struct pmc_target *pt;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL && pp != NULL,
+ ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
+
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < ((int) md->pmd_npmc - 1),
+ ("[pmc,%d] Illegal reference count %d for process record %p",
+ __LINE__, pp->pp_refcnt, (void *) pp));
+
+ ri = pm->pm_rowindex;
+
+ PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
+ pm, ri, pp);
+
+#if DEBUG
+ LIST_FOREACH(pt, &pm->pm_targets, pt_next)
+ if (pt->pt_process == pp)
+ KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
+ __LINE__, pp, pm));
+#endif
+
+ MALLOC(pt, struct pmc_target *, sizeof(struct pmc_target),
+ M_PMC, M_ZERO|M_WAITOK);
+
+ pt->pt_process = pp;
+
+ LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
+
+ atomic_store_rel_ptr(&pp->pp_pmcs[ri].pp_pmc, pm);
+
+ pp->pp_refcnt++;
+
+}
+
+/*
+ * Removes the association between a target process and a PMC.
+ */
+
+static void
+pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
+{
+ int ri;
+ struct pmc_target *ptgt;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL && pp != NULL,
+ ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
+
+ KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt < (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal ref count %d on process record %p",
+ __LINE__, pp->pp_refcnt, (void *) pp));
+
+ ri = pm->pm_rowindex;
+
+ PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
+ pm, ri, pp);
+
+ KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
+ ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
+ ri, pm, pp->pp_pmcs[ri].pp_pmc));
+
+ pp->pp_pmcs[ri].pp_pmc = NULL;
+ pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
+
+ pp->pp_refcnt--;
+
+ /* Remove the target process from the PMC structure */
+ LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
+ if (ptgt->pt_process == pp)
+ break;
+
+ KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
+ "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
+
+ PMCDBG(PRC,TUL,4, "unlink ptgt=%p", ptgt);
+
+ LIST_REMOVE(ptgt, pt_next);
+ FREE(ptgt, M_PMC);
+}
+
+/*
+ * Remove PMC descriptor 'pmc' from the owner descriptor.
+ */
+
+void
+pmc_unlink_owner(struct pmc *pm)
+{
+ struct pmc_list *pl, *tmp;
+ struct pmc_owner *po;
+
+#if DEBUG
+ KASSERT(LIST_EMPTY(&pm->pm_targets),
+ ("[pmc,%d] unlinking PMC with targets", __LINE__));
+#endif
+
+ po = pm->pm_owner;
+
+ KASSERT(po != NULL, ("[pmc,%d] No owner for PMC", __LINE__));
+
+ LIST_FOREACH_SAFE(pl, &po->po_pmcs, pl_next, tmp) {
+ if (pl->pl_pmc == pm) {
+ pl->pl_pmc = NULL;
+ pm->pm_owner = NULL;
+ return;
+ }
+ }
+
+ KASSERT(0, ("[pmc,%d] couldn't find pmc in owner list", __LINE__));
+}
+
+/*
+ * Check if PMC 'pm' may be attached to target process 't'.
+ */
+
+static int
+pmc_can_attach(struct pmc *pm, struct proc *t)
+{
+ struct proc *o; /* pmc owner */
+ struct ucred *oc, *tc; /* owner, target credentials */
+ int decline_attach, i;
+
+ /*
+ * A PMC's owner can always attach that PMC to itself.
+ */
+
+ if ((o = pm->pm_owner->po_owner) == t)
+ return 0;
+
+ PROC_LOCK(o);
+ oc = o->p_ucred;
+ crhold(oc);
+ PROC_UNLOCK(o);
+
+ PROC_LOCK(t);
+ tc = t->p_ucred;
+ crhold(tc);
+ PROC_UNLOCK(t);
+
+ /*
+ * The effective uid of the PMC owner should match at least one
+ * of the {effective,real,saved} uids of the target process.
+ */
+
+ decline_attach = oc->cr_uid != tc->cr_uid &&
+ oc->cr_uid != tc->cr_svuid &&
+ oc->cr_uid != tc->cr_ruid;
+
+ /*
+ * Every one of the target's group ids, must be in the owner's
+ * group list.
+ */
+ for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
+ decline_attach = !groupmember(tc->cr_groups[i], oc);
+
+ /* check the read and saved gids too */
+ if (decline_attach == 0)
+ decline_attach = !groupmember(tc->cr_rgid, oc) ||
+ !groupmember(tc->cr_svgid, oc);
+
+ crfree(tc);
+ crfree(oc);
+
+ return !decline_attach;
+}
+
+/*
+ * Attach a process to a PMC.
+ */
+
+static int
+pmc_attach_one_process(struct proc *p, struct pmc *pm)
+{
+ int ri;
+ struct pmc_process *pp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
+ pm->pm_rowindex, p, p->p_pid, p->p_comm);
+
+ /*
+ * Locate the process descriptor corresponding to process 'p',
+ * allocating space as needed.
+ *
+ * Verify that rowindex 'pm_rowindex' is free in the process
+ * descriptor.
+ *
+ * If not, allocate space for a descriptor and link the
+ * process descriptor and PMC.
+ */
+
+ ri = pm->pm_rowindex;
+
+ if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
+ return ENOMEM;
+
+ if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
+ return EEXIST;
+
+ if (pp->pp_pmcs[ri].pp_pmc != NULL)
+ return EBUSY;
+
+ pmc_link_target_process(pm, pp);
+
+ /* mark process as using HWPMCs */
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+
+ return 0;
+}
+
+/*
+ * Attach a process and optionally its children
+ */
+
+static int
+pmc_attach_process(struct proc *p, struct pmc *pm)
+{
+ int error;
+ struct proc *top;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
+ pm->pm_rowindex, p, p->p_pid, p->p_comm);
+
+ if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
+ return pmc_attach_one_process(p, pm);
+
+ /*
+ * Traverse all child processes, attaching them to
+ * this PMC.
+ */
+
+ sx_slock(&proctree_lock);
+
+ top = p;
+
+ for (;;) {
+ if ((error = pmc_attach_one_process(p, pm)) != 0)
+ break;
+ if (!LIST_EMPTY(&p->p_children))
+ p = LIST_FIRST(&p->p_children);
+ else for (;;) {
+ if (p == top)
+ goto done;
+ if (LIST_NEXT(p, p_sibling)) {
+ p = LIST_NEXT(p, p_sibling);
+ break;
+ }
+ p = p->p_pptr;
+ }
+ }
+
+ if (error)
+ (void) pmc_detach_process(top, pm);
+
+ done:
+ sx_sunlock(&proctree_lock);
+ return error;
+}
+
+/*
+ * Detach a process from a PMC. If there are no other PMCs tracking
+ * this process, remove the process structure from its hash table. If
+ * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
+ */
+
+static int
+pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
+{
+ int ri;
+ struct pmc_process *pp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL,
+ ("[pmc,%d] null pm pointer", __LINE__));
+
+ PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
+ pm, pm->pm_rowindex, p, p->p_pid, p->p_comm, flags);
+
+ ri = pm->pm_rowindex;
+
+ if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
+ return ESRCH;
+
+ if (pp->pp_pmcs[ri].pp_pmc != pm)
+ return EINVAL;
+
+ pmc_unlink_target_process(pm, pp);
+
+ /*
+ * If there are no PMCs targetting this process, we remove its
+ * descriptor from the target hash table and unset the P_HWPMC
+ * flag in the struct proc.
+ */
+
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal refcnt %d for process struct %p",
+ __LINE__, pp->pp_refcnt, pp));
+
+ if (pp->pp_refcnt != 0) /* still a target of some PMC */
+ return 0;
+
+ pmc_remove_process_descriptor(pp);
+
+ if (flags & PMC_FLAG_REMOVE)
+ FREE(pp, M_PMC);
+
+ PROC_LOCK(p);
+ p->p_flag &= ~P_HWPMC;
+ PROC_UNLOCK(p);
+
+ return 0;
+}
+
+/*
+ * Detach a process and optionally its descendants from a PMC.
+ */
+
+static int
+pmc_detach_process(struct proc *p, struct pmc *pm)
+{
+ struct proc *top;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
+ pm->pm_rowindex, p, p->p_pid, p->p_comm);
+
+ if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
+ return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
+
+ /*
+ * Traverse all children, detaching them from this PMC. We
+ * ignore errors since we could be detaching a PMC from a
+ * partially attached proc tree.
+ */
+
+ sx_slock(&proctree_lock);
+
+ top = p;
+
+ for (;;) {
+ (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
+
+ if (!LIST_EMPTY(&p->p_children))
+ p = LIST_FIRST(&p->p_children);
+ else for (;;) {
+ if (p == top)
+ goto done;
+ if (LIST_NEXT(p, p_sibling)) {
+ p = LIST_NEXT(p, p_sibling);
+ break;
+ }
+ p = p->p_pptr;
+ }
+ }
+
+ done:
+ sx_sunlock(&proctree_lock);
+ return 0;
+}
+
+/*
+ * The 'hook' invoked from the kernel proper
+ */
+
+
+#if DEBUG
+const char *pmc_hooknames[] = {
+ "",
+ "EXIT",
+ "EXEC",
+ "FORK",
+ "CSW-IN",
+ "CSW-OUT"
+};
+#endif
+
+static int
+pmc_hook_handler(struct thread *td, int function, void *arg)
+{
+
+ KASSERT(td->td_proc->p_flag & P_HWPMC,
+ ("[pmc,%d] unregistered thread called pmc_hook()", __LINE__));
+
+ PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
+ pmc_hooknames[function], arg);
+
+ switch (function)
+ {
+
+ /*
+ * Process exit.
+ *
+ * Remove this process from all hash tables. If this process
+ * owned any PMCs, turn off those PMCs and deallocate them,
+ * removing any associations with target processes.
+ *
+ * This function will be called by the last 'thread' of a
+ * process.
+ *
+ */
+
+ case PMC_FN_PROCESS_EXIT: /* release PMCs */
+ {
+ int cpu;
+ unsigned int ri;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ struct pmc_process *pp;
+ struct pmc_owner *po;
+ struct proc *p;
+ pmc_value_t newvalue, tmp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ p = (struct proc *) arg;
+
+ /*
+ * Since this code is invoked by the last thread in an
+ * exiting process, we would have context switched IN
+ * at some prior point. Kernel mode context switches
+ * may happen any time, so we want to disable a context
+ * switch OUT till we get any PMCs targetting this
+ * process off the hardware.
+ *
+ * We also need to atomically remove this process'
+ * entry from our target process hash table, using
+ * PMC_FLAG_REMOVE.
+ */
+
+ PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
+ p->p_comm);
+
+ critical_enter(); /* no preemption */
+
+ cpu = curthread->td_oncpu;
+
+ if ((pp = pmc_find_process_descriptor(p,
+ PMC_FLAG_REMOVE)) != NULL) {
+
+ PMCDBG(PRC,EXT,2,
+ "process-exit proc=%p pmc-process=%p", p, pp);
+
+ /*
+ * This process could the target of some PMCs.
+ * Such PMCs will thus be running on currently
+ * executing CPU at this point in the code
+ * since we've disallowed context switches.
+ * We need to turn these PMCs off like we
+ * would do at context switch OUT time.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ /*
+ * Pick up the pmc pointer from hardware
+ * state similar to the CSW_OUT code.
+ */
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+
+ PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
+
+ if (pm == NULL ||
+ !PMC_IS_VIRTUAL_MODE(pm->pm_mode))
+ continue;
+
+ PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
+ "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
+ pm, pm->pm_state);
+
+ KASSERT(pm->pm_rowindex == ri,
+ ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
+ __LINE__, pm->pm_rowindex, ri));
+
+ KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
+ ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
+ __LINE__, pm, ri,
+ pp->pp_pmcs[ri].pp_pmc));
+
+ (void) md->pmd_stop_pmc(cpu, ri);
+
+ KASSERT(pm->pm_runcount > 0,
+ ("[pmc,%d] bad runcount ri %d rc %d",
+ __LINE__, ri, pm->pm_runcount));
+
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+ md->pmd_read_pmc(cpu, ri, &newvalue);
+ tmp = newvalue -
+ PMC_PCPU_SAVED(cpu,ri);
+
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ pm->pm_gv.pm_savedvalue += tmp;
+ pp->pp_pmcs[ri].pp_pmcval += tmp;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+ }
+
+ KASSERT((int) pm->pm_runcount >= 0,
+ ("[pmc,%d] runcount is %d", __LINE__, ri));
+
+ atomic_subtract_rel_32(&pm->pm_runcount,1);
+ (void) md->pmd_config_pmc(cpu, ri, NULL);
+ }
+ critical_exit(); /* ok to be pre-empted now */
+
+ /*
+ * Unlink this process from the PMCs that are
+ * targetting it. Log value at exit() time if
+ * requested.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
+ if (pm->pm_flags &
+ PMC_F_LOG_TC_PROCEXIT)
+ pmc_log_process_exit(pm, pp);
+ pmc_unlink_target_process(pm, pp);
+ }
+
+ FREE(pp, M_PMC);
+
+ } else
+ critical_exit(); /* pp == NULL */
+
+ /*
+ * If the process owned PMCs, free them up and free up
+ * memory.
+ */
+
+ if ((po = pmc_find_owner_descriptor(p)) != NULL) {
+ pmc_remove_owner(po);
+ FREE(po, M_PMC);
+ }
+
+ }
+ break;
+
+ /*
+ * Process exec()
+ */
+
+ case PMC_FN_PROCESS_EXEC:
+ {
+ int *credentials_changed;
+ unsigned int ri;
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_owner *po;
+ struct pmc_process *pp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ /*
+ * PMCs are not inherited across an exec(): remove any
+ * PMCs that this process is the owner of.
+ */
+
+ p = td->td_proc;
+
+ if ((po = pmc_find_owner_descriptor(p)) != NULL) {
+ pmc_remove_owner(po);
+ FREE(po, M_PMC);
+ }
+
+ /*
+ * If this process is the target of a PMC, check if the new
+ * credentials are compatible with the owner's permissions.
+ */
+
+ if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
+ break;
+
+ credentials_changed = arg;
+
+ PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
+ p, p->p_pid, p->p_comm, *credentials_changed);
+
+ if (*credentials_changed == 0) /* credentials didn't change */
+ break;
+
+ /*
+ * If the newly exec()'ed process has a different credential
+ * than before, allow it to be the target of a PMC only if
+ * the PMC's owner has sufficient priviledge.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
+ if (pmc_can_attach(pm, td->td_proc) != 0)
+ pmc_detach_one_process(td->td_proc,
+ pm, PMC_FLAG_NONE);
+
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
+ pp->pp_refcnt, pp));
+
+ /*
+ * If this process is no longer the target of any
+ * PMCs, we can remove the process entry and free
+ * up space.
+ */
+
+ if (pp->pp_refcnt == 0) {
+ pmc_remove_process_descriptor(pp);
+ FREE(pp, M_PMC);
+ }
+ }
+ break;
+
+ /*
+ * Process fork()
+ */
+
+ case PMC_FN_PROCESS_FORK:
+ {
+ unsigned int ri;
+ uint32_t do_descendants;
+ struct pmc *pm;
+ struct pmc_process *ppnew, *ppold;
+ struct proc *newproc;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ newproc = (struct proc *) arg;
+
+ PMCDBG(PMC,FRK,2, "process-fork p1=%p p2=%p",
+ curthread->td_proc, newproc);
+ /*
+ * If the parent process (curthread->td_proc) is a
+ * target of any PMCs, look for PMCs that are to be
+ * inherited, and link these into the new process
+ * descriptor.
+ */
+
+ if ((ppold = pmc_find_process_descriptor(
+ curthread->td_proc, PMC_FLAG_NONE)) == NULL)
+ break;
+
+ do_descendants = 0;
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
+ do_descendants |=
+ pm->pm_flags & PMC_F_DESCENDANTS;
+ if (do_descendants == 0) /* nothing to do */
+ break;
+
+ if ((ppnew = pmc_find_process_descriptor(newproc,
+ PMC_FLAG_ALLOCATE)) == NULL)
+ return ENOMEM;
+
+ /*
+ * Run through all PMCs targeting the old process and
+ * attach them to the new process.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
+ pm->pm_flags & PMC_F_DESCENDANTS)
+ pmc_link_target_process(pm, ppnew);
+
+ /*
+ * Now mark the new process as being tracked by this
+ * driver.
+ */
+
+ PROC_LOCK(newproc);
+ newproc->p_flag |= P_HWPMC;
+ PROC_UNLOCK(newproc);
+
+ }
+ break;
+
+ /*
+ * Thread context switch IN
+ */
+
+ case PMC_FN_CSW_IN:
+ {
+ int cpu;
+ unsigned int ri;
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ struct pmc_process *pp;
+ pmc_value_t newvalue;
+
+ p = td->td_proc;
+
+ if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
+ break;
+
+ KASSERT(pp->pp_proc == td->td_proc,
+ ("[pmc,%d] not my thread state", __LINE__));
+
+ critical_enter(); /* no preemption on this CPU */
+
+ cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
+
+ PMCDBG(CTX,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
+ p->p_pid, p->p_comm, pp);
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
+
+ pc = pmc_pcpu[cpu];
+
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
+ continue;
+
+ KASSERT(PMC_IS_VIRTUAL_MODE(pm->pm_mode),
+ ("[pmc,%d] Target PMC in non-virtual mode (%d)",
+ __LINE__, pm->pm_mode));
+
+ KASSERT(pm->pm_rowindex == ri,
+ ("[pmc,%d] Row index mismatch pmc %d != ri %d",
+ __LINE__, pm->pm_rowindex, ri));
+
+ /*
+ * Only PMCs that are marked as 'RUNNING' need
+ * be placed on hardware.
+ */
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ /* increment PMC runcount */
+ atomic_add_rel_32(&pm->pm_runcount, 1);
+
+ /* configure the HWPMC we are going to use. */
+ md->pmd_config_pmc(cpu, ri, pm);
+
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(phw != NULL,
+ ("[pmc,%d] null hw pointer", __LINE__));
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
+ phw->phw_pmc, pm));
+
+ /* write out saved value and start the PMC */
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ newvalue = PMC_PCPU_SAVED(cpu, ri) =
+ pm->pm_gv.pm_savedvalue;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ md->pmd_write_pmc(cpu, ri, newvalue);
+ md->pmd_start_pmc(cpu, ri);
+
+ }
+
+ /*
+ * perform any other architecture/cpu dependent thread
+ * switch-in actions.
+ */
+
+ (void) (*md->pmd_switch_in)(pc);
+
+ critical_exit();
+
+ }
+ break;
+
+ /*
+ * Thread context switch OUT.
+ */
+
+ case PMC_FN_CSW_OUT:
+ {
+ int cpu;
+ unsigned int ri;
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ struct pmc_process *pp;
+ pmc_value_t newvalue, tmp;
+
+ /*
+ * Locate our process descriptor; this may be NULL if
+ * this process is exiting and we have already removed
+ * the process from the target process table.
+ *
+ * Note that due to kernel preemption, multiple
+ * context switches may happen while the process is
+ * exiting.
+ *
+ * Note also that if the target process cannot be
+ * found we still need to deconfigure any PMCs that
+ * are currently running on hardware.
+ */
+
+ p = td->td_proc;
+ pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
+
+ /*
+ * save PMCs
+ */
+
+ critical_enter();
+
+ cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
+
+ PMCDBG(CTX,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
+ p->p_pid, p->p_comm, pp);
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
+
+ pc = pmc_pcpu[cpu];
+
+ /*
+ * When a PMC gets unlinked from a target PMC, it will
+ * be removed from the target's pp_pmc[] array.
+ *
+ * However, on a MP system, the target could have been
+ * executing on another CPU at the time of the unlink.
+ * So, at context switch OUT time, we need to look at
+ * the hardware to determine if a PMC is scheduled on
+ * it.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ phw = pc->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+
+ if (pm == NULL) /* nothing at this row index */
+ continue;
+
+ if (!PMC_IS_VIRTUAL_MODE(pm->pm_mode))
+ continue; /* not a process virtual PMC */
+
+ KASSERT(pm->pm_rowindex == ri,
+ ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
+ __LINE__, pm->pm_rowindex, ri));
+
+ /* Stop hardware */
+ md->pmd_stop_pmc(cpu, ri);
+
+ /* reduce this PMC's runcount */
+ atomic_subtract_rel_32(&pm->pm_runcount, 1);
+
+ /*
+ * If this PMC is associated with this process,
+ * save the reading.
+ */
+
+ if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
+
+ KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
+ ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
+ __LINE__, pm, ri,
+ pp->pp_pmcs[ri].pp_pmc));
+
+ KASSERT(pp->pp_refcnt > 0,
+ ("[pmc,%d] pp refcnt = %d", __LINE__,
+ pp->pp_refcnt));
+
+ md->pmd_read_pmc(cpu, ri, &newvalue);
+
+ tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
+
+ KASSERT((int64_t) tmp >= 0,
+ ("[pmc,%d] negative increment cpu=%d "
+ "ri=%d newvalue=%jx saved=%jx "
+ "incr=%jx", __LINE__, cpu, ri,
+ newvalue, PMC_PCPU_SAVED(cpu,ri),
+ tmp));
+
+ /*
+ * Increment the PMC's count and this
+ * target process's count by the difference
+ * between the current reading and the
+ * saved value at context switch in time.
+ */
+
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+
+ pm->pm_gv.pm_savedvalue += tmp;
+ pp->pp_pmcs[ri].pp_pmcval += tmp;
+
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ }
+
+ /* mark hardware as free */
+ md->pmd_config_pmc(cpu, ri, NULL);
+ }
+
+ /*
+ * perform any other architecture/cpu dependent thread
+ * switch out functions.
+ */
+
+ (void) (*md->pmd_switch_out)(pc);
+
+ critical_exit();
+
+ }
+ break;
+
+ default:
+#if DEBUG
+ KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
+#endif
+ break;
+
+ }
+
+ return 0;
+}
+
+/*
+ * allocate a 'struct pmc_owner' descriptor in the owner hash table.
+ */
+
+static struct pmc_owner *
+pmc_allocate_owner_descriptor(struct proc *p)
+{
+ uint32_t hindex;
+ struct pmc_owner *po;
+ struct pmc_ownerhash *poh;
+
+ hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
+ poh = &pmc_ownerhash[hindex];
+
+ /* allocate space for N pointers and one descriptor struct */
+ MALLOC(po, struct pmc_owner *, sizeof(struct pmc_owner),
+ M_PMC, M_WAITOK);
+
+ po->po_flags = 0;
+ po->po_owner = p;
+ LIST_INIT(&po->po_pmcs);
+ LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
+
+ PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
+ p, p->p_pid, p->p_comm, po);
+
+ return po;
+}
+
+/*
+ * find the descriptor corresponding to process 'p', adding or removing it
+ * as specified by 'mode'.
+ */
+
+static struct pmc_process *
+pmc_find_process_descriptor(struct proc *p, uint32_t mode)
+{
+ uint32_t hindex;
+ struct pmc_process *pp, *ppnew;
+ struct pmc_processhash *pph;
+
+ hindex = PMC_HASH_PTR(p, pmc_processhashmask);
+ pph = &pmc_processhash[hindex];
+
+ ppnew = NULL;
+
+ /*
+ * Pre-allocate memory in the FIND_ALLOCATE case since we
+ * cannot call malloc(9) once we hold a spin lock.
+ */
+
+ if (mode & PMC_FLAG_ALLOCATE) {
+ /* allocate additional space for 'n' pmc pointers */
+ MALLOC(ppnew, struct pmc_process *,
+ sizeof(struct pmc_process) + md->pmd_npmc *
+ sizeof(struct pmc_targetstate), M_PMC, M_ZERO|M_WAITOK);
+ }
+
+ mtx_lock_spin(&pmc_processhash_mtx);
+ LIST_FOREACH(pp, pph, pp_next)
+ if (pp->pp_proc == p)
+ break;
+
+ if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
+ LIST_REMOVE(pp, pp_next);
+
+ if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
+ ppnew != NULL) {
+ ppnew->pp_proc = p;
+ LIST_INSERT_HEAD(pph, ppnew, pp_next);
+ pp = ppnew;
+ ppnew = NULL;
+ }
+ mtx_unlock_spin(&pmc_processhash_mtx);
+
+ if (pp != NULL && ppnew != NULL)
+ FREE(ppnew, M_PMC);
+
+ return pp;
+}
+
+/*
+ * remove a process descriptor from the process hash table.
+ */
+
+static void
+pmc_remove_process_descriptor(struct pmc_process *pp)
+{
+ KASSERT(pp->pp_refcnt == 0,
+ ("[pmc,%d] Removing process descriptor %p with count %d",
+ __LINE__, pp, pp->pp_refcnt));
+
+ mtx_lock_spin(&pmc_processhash_mtx);
+ LIST_REMOVE(pp, pp_next);
+ mtx_unlock_spin(&pmc_processhash_mtx);
+}
+
+
+/*
+ * find an owner descriptor corresponding to proc 'p'
+ */
+
+static struct pmc_owner *
+pmc_find_owner_descriptor(struct proc *p)
+{
+ uint32_t hindex;
+ struct pmc_owner *po;
+ struct pmc_ownerhash *poh;
+
+ hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
+ poh = &pmc_ownerhash[hindex];
+
+ po = NULL;
+ LIST_FOREACH(po, poh, po_next)
+ if (po->po_owner == p)
+ break;
+
+ PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
+ "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
+
+ return po;
+}
+
+/*
+ * pmc_allocate_pmc_descriptor
+ *
+ * Allocate a pmc descriptor and initialize its
+ * fields.
+ */
+
+static struct pmc *
+pmc_allocate_pmc_descriptor(void)
+{
+ struct pmc *pmc;
+
+ MALLOC(pmc, struct pmc *, sizeof(struct pmc), M_PMC, M_ZERO|M_WAITOK);
+
+ if (pmc != NULL) {
+ pmc->pm_owner = NULL;
+ LIST_INIT(&pmc->pm_targets);
+ }
+
+ PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
+
+ return pmc;
+}
+
+/*
+ * Destroy a pmc descriptor.
+ */
+
+static void
+pmc_destroy_pmc_descriptor(struct pmc *pm)
+{
+ (void) pm;
+
+#if DEBUG
+ KASSERT(pm->pm_state == PMC_STATE_DELETED ||
+ pm->pm_state == PMC_STATE_FREE,
+ ("[pmc,%d] destroying non-deleted PMC", __LINE__));
+ KASSERT(LIST_EMPTY(&pm->pm_targets),
+ ("[pmc,%d] destroying pmc with targets", __LINE__));
+ KASSERT(pm->pm_owner == NULL,
+ ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
+ KASSERT(pm->pm_runcount == 0,
+ ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
+ pm->pm_runcount));
+#endif
+}
+
+/*
+ * This function does the following things:
+ *
+ * - detaches the PMC from hardware
+ * - unlinks all target threads that were attached to it
+ * - removes the PMC from its owner's list
+ * - destroy's the PMC private mutex
+ *
+ * Once this function completes, the given pmc pointer can be safely
+ * FREE'd by the caller.
+ */
+
+static void
+pmc_release_pmc_descriptor(struct pmc *pm)
+{
+#if DEBUG
+ volatile int maxloop;
+#endif
+ u_int ri, cpu;
+ u_char curpri;
+ struct pmc_hw *phw;
+ struct pmc_process *pp;
+ struct pmc_target *ptgt, *tmp;
+ struct pmc_binding pb;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
+
+ ri = pm->pm_rowindex;
+
+ PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
+ pm->pm_mode);
+
+ /*
+ * First, we take the PMC off hardware.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+
+ /*
+ * A system mode PMC runs on a specific CPU. Switch
+ * to this CPU and turn hardware off.
+ */
+
+ pmc_save_cpu_binding(&pb);
+
+ cpu = pm->pm_gv.pm_cpu;
+
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+
+ pmc_select_cpu(cpu);
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
+ __LINE__, ri, phw->phw_pmc, pm));
+
+ PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
+
+ critical_enter();
+ md->pmd_stop_pmc(cpu, ri);
+ critical_exit();
+ }
+
+ PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
+
+ critical_enter();
+ md->pmd_config_pmc(cpu, ri, NULL);
+ critical_exit();
+
+ pm->pm_state = PMC_STATE_DELETED;
+
+ pmc_restore_cpu_binding(&pb);
+
+ } else if (PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+
+ /*
+ * A virtual PMC could be running on multiple CPUs at
+ * a given instant.
+ *
+ * By marking its state as DELETED, we ensure that
+ * this PMC is never further scheduled on hardware.
+ *
+ * Then we wait till all CPUs are done with this PMC.
+ */
+
+ pm->pm_state = PMC_STATE_DELETED;
+
+
+ /*
+ * Wait for the PMCs runcount to come to zero.
+ */
+
+#if DEBUG
+ maxloop = 100 * mp_ncpus;
+#endif
+
+ while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
+
+#if DEBUG
+ maxloop--;
+ KASSERT(maxloop > 0,
+ ("[pmc,%d] (ri%d, rc%d) waiting too long for "
+ "pmc to be free", __LINE__, pm->pm_rowindex,
+ pm->pm_runcount));
+#endif
+
+ mtx_lock_spin(&sched_lock);
+ curpri = curthread->td_priority;
+ mtx_unlock_spin(&sched_lock);
+
+ (void) tsleep((void *) pmc_release_pmc_descriptor,
+ curpri, "pmcrel", 1);
+
+ }
+
+ /*
+ * At this point the PMC is off all CPUs and cannot be
+ * freshly scheduled onto a CPU. It is now safe to
+ * unlink all targets from this PMC. If a
+ * process-record's refcount falls to zero, we remove
+ * it from the hash table. The module-wide SX lock
+ * protects us from races.
+ */
+
+ LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
+ pp = ptgt->pt_process;
+ pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
+
+ PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
+
+ /*
+ * If the target process record shows that no
+ * PMCs are attached to it, reclaim its space.
+ */
+
+ if (pp->pp_refcnt == 0) {
+ pmc_remove_process_descriptor(pp);
+ FREE(pp, M_PMC);
+ }
+ }
+
+ cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
+
+ }
+
+ /*
+ * Release any MD resources
+ */
+
+ (void) md->pmd_release_pmc(cpu, ri, pm);
+
+ /*
+ * Update row disposition
+ */
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode))
+ PMC_UNMARK_ROW_STANDALONE(ri);
+ else
+ PMC_UNMARK_ROW_THREAD(ri);
+
+ /* unlink from the owner's list */
+ if (pm->pm_owner)
+ pmc_unlink_owner(pm);
+
+ pmc_destroy_pmc_descriptor(pm);
+}
+
+/*
+ * Register an owner and a pmc.
+ */
+
+static int
+pmc_register_owner(struct proc *p, struct pmc *pmc)
+{
+ struct pmc_list *pl;
+ struct pmc_owner *po;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ MALLOC(pl, struct pmc_list *, sizeof(struct pmc_list), M_PMC,
+ M_WAITOK);
+
+ if (pl == NULL)
+ return ENOMEM;
+
+ if ((po = pmc_find_owner_descriptor(p)) == NULL) {
+ if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
+ FREE(pl, M_PMC);
+ return ENOMEM;
+ }
+ po->po_flags |= PMC_FLAG_IS_OWNER; /* real owner */
+ }
+
+ if (pmc->pm_mode == PMC_MODE_TS) {
+ /* can have only one TS mode PMC per process */
+ if (po->po_flags & PMC_FLAG_HAS_TS_PMC) {
+ FREE(pl, M_PMC);
+ return EINVAL;
+ }
+ po->po_flags |= PMC_FLAG_HAS_TS_PMC;
+ }
+
+ KASSERT(pmc->pm_owner == NULL,
+ ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
+ pmc->pm_owner = po;
+
+ pl->pl_pmc = pmc;
+
+ LIST_INSERT_HEAD(&po->po_pmcs, pl, pl_next);
+
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+
+ PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pl=%p pmc=%p",
+ po, pl, pmc);
+
+ return 0;
+}
+
+/*
+ * Return the current row disposition:
+ * == 0 => FREE
+ * > 0 => PROCESS MODE
+ * < 0 => SYSTEM MODE
+ */
+
+int
+pmc_getrowdisp(int ri)
+{
+ return pmc_pmcdisp[ri];
+}
+
+/*
+ * Check if a PMC at row index 'ri' can be allocated to the current
+ * process.
+ *
+ * Allocation can fail if:
+ * - the current process is already being profiled by a PMC at index 'ri',
+ * attached to it via OP_PMCATTACH.
+ * - the current process has already allocated a PMC at index 'ri'
+ * via OP_ALLOCATE.
+ */
+
+static int
+pmc_can_allocate_rowindex(struct proc *p, unsigned int ri)
+{
+ struct pmc_list *pl;
+ struct pmc_owner *po;
+ struct pmc_process *pp;
+
+ PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d",
+ p, p->p_pid, p->p_comm, ri);
+
+ /* we shouldn't have allocated a PMC at row index 'ri' */
+ if ((po = pmc_find_owner_descriptor(p)) != NULL)
+ LIST_FOREACH(pl, &po->po_pmcs, pl_next)
+ if (pl->pl_pmc->pm_rowindex == ri)
+ return EEXIST;
+
+ /* we shouldn't be the target of any PMC ourselves at this index */
+ if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
+ if (pp->pp_pmcs[ri].pp_pmc)
+ return EEXIST;
+
+ PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
+ p, p->p_pid, p->p_comm, ri);
+
+ return 0;
+}
+
+/*
+ * Check if a given PMC at row index 'ri' can be currently used in
+ * mode 'mode'.
+ */
+
+static int
+pmc_can_allocate_row(int ri, enum pmc_mode mode)
+{
+ enum pmc_disp disp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ disp = PMC_DISP_STANDALONE;
+ else
+ disp = PMC_DISP_THREAD;
+
+ /*
+ * check disposition for PMC row 'ri':
+ *
+ * Expected disposition Row-disposition Result
+ *
+ * STANDALONE STANDALONE or FREE proceed
+ * STANDALONE THREAD fail
+ * THREAD THREAD or FREE proceed
+ * THREAD STANDALONE fail
+ */
+
+ if (!PMC_ROW_DISP_IS_FREE(ri) &&
+ !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
+ !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
+ return EBUSY;
+
+ /*
+ * All OK
+ */
+
+ PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
+
+ return 0;
+
+}
+
+/*
+ * Find a PMC descriptor with user handle 'pmc' for thread 'td'.
+ */
+
+static struct pmc *
+pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
+{
+ struct pmc_list *pl;
+
+ KASSERT(pmcid < md->pmd_npmc,
+ ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__, pmcid,
+ md->pmd_npmc));
+
+ LIST_FOREACH(pl, &po->po_pmcs, pl_next)
+ if (pl->pl_pmc->pm_rowindex == pmcid)
+ return pl->pl_pmc;
+
+ return NULL;
+}
+
+static int
+pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
+{
+
+ struct pmc *pm;
+ struct pmc_owner *po;
+
+ PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
+
+ if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
+ return ESRCH;
+
+ if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
+ return EINVAL;
+
+ PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
+
+ *pmc = pm;
+ return 0;
+}
+
+/*
+ * Start a PMC.
+ */
+
+static int
+pmc_start(struct pmc *pm)
+{
+ int error, cpu, ri;
+ struct pmc_binding pb;
+
+ KASSERT(pm != NULL,
+ ("[pmc,%d] null pm", __LINE__));
+
+ PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, pm->pm_mode,
+ pm->pm_rowindex);
+
+ pm->pm_state = PMC_STATE_RUNNING;
+
+ if (PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+
+ /*
+ * If a PMCATTACH hadn't been done on this
+ * PMC, attach this PMC to its owner process.
+ */
+
+ if (LIST_EMPTY(&pm->pm_targets))
+ return pmc_attach_process(pm->pm_owner->po_owner, pm);
+
+
+ /*
+ * Nothing further to be done; thread context switch code
+ * will start/stop the PMC as appropriate.
+ */
+
+ return 0;
+
+ }
+
+ /*
+ * A system-mode PMC. Move to the CPU associated with this
+ * PMC, and start the hardware.
+ */
+
+ pmc_save_cpu_binding(&pb);
+
+ cpu = pm->pm_gv.pm_cpu;
+
+ if (pmc_cpu_is_disabled(cpu))
+ return ENXIO;
+
+ ri = pm->pm_rowindex;
+
+ pmc_select_cpu(cpu);
+
+ /*
+ * global PMCs are configured at allocation time
+ * so write out the initial value and start the PMC.
+ */
+
+ if ((error = md->pmd_write_pmc(cpu, ri,
+ PMC_IS_SAMPLING_MODE(pm->pm_mode) ?
+ pm->pm_sc.pm_reloadcount :
+ pm->pm_sc.pm_initial)) == 0)
+ error = md->pmd_start_pmc(cpu, ri);
+
+ pmc_restore_cpu_binding(&pb);
+
+ return error;
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+pmc_stop(struct pmc *pm)
+{
+ int error, cpu;
+ struct pmc_binding pb;
+
+ KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
+
+ PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm, pm->pm_mode,
+ pm->pm_rowindex);
+
+ pm->pm_state = PMC_STATE_STOPPED;
+
+ /*
+ * If the PMC is a virtual mode one, changing the state to
+ * non-RUNNING is enough to ensure that the PMC never gets
+ * scheduled.
+ *
+ * If this PMC is current running on a CPU, then it will
+ * handled correctly at the time its target process is context
+ * switched out.
+ */
+
+ if (PMC_IS_VIRTUAL_MODE(pm->pm_mode))
+ return 0;
+
+ /*
+ * A system-mode PMC. Move to the CPU associated with
+ * this PMC, and stop the hardware. We update the
+ * 'initial count' so that a subsequent PMCSTART will
+ * resume counting from the current hardware count.
+ */
+
+ pmc_save_cpu_binding(&pb);
+
+ cpu = pm->pm_gv.pm_cpu;
+
+ if (pmc_cpu_is_disabled(cpu))
+ return ENXIO;
+
+ pmc_select_cpu(cpu);
+
+ if ((error = md->pmd_stop_pmc(cpu, pm->pm_rowindex)) == 0)
+ error = md->pmd_read_pmc(cpu, pm->pm_rowindex,
+ &pm->pm_sc.pm_initial);
+
+ pmc_restore_cpu_binding(&pb);
+
+ return error;
+}
+
+
+#if DEBUG
+static const char *pmc_op_to_name[] = {
+#undef __PMC_OP
+#define __PMC_OP(N, D) #N ,
+ __PMC_OPS()
+ NULL
+};
+#endif
+
+/*
+ * The syscall interface
+ */
+
+#define PMC_GET_SX_XLOCK(...) do { \
+ sx_xlock(&pmc_sx); \
+ if (pmc_hook == NULL) { \
+ sx_xunlock(&pmc_sx); \
+ return __VA_ARGS__; \
+ } \
+} while (0)
+
+#define PMC_DOWNGRADE_SX() do { \
+ sx_downgrade(&pmc_sx); \
+ is_sx_downgraded = 1; \
+} while (0)
+
+static int
+pmc_syscall_handler(struct thread *td, void *syscall_args)
+{
+ int error, is_sx_downgraded, op;
+ struct pmc_syscall_args *c;
+ void *arg;
+
+ PMC_GET_SX_XLOCK(ENOSYS);
+
+ is_sx_downgraded = 0;
+
+ c = (struct pmc_syscall_args *) syscall_args;
+
+ op = c->pmop_code;
+ arg = c->pmop_data;
+
+ PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
+ pmc_op_to_name[op], arg);
+
+ error = 0;
+ atomic_add_int(&pmc_stats.pm_syscalls, 1);
+
+ switch(op)
+ {
+
+
+ /*
+ * Configure a log file.
+ *
+ * XXX This OP will be reworked.
+ */
+
+ case PMC_OP_CONFIGURELOG:
+ {
+ struct pmc_owner *po;
+ struct pmc_op_configurelog cl;
+ struct proc *p;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
+ break;
+
+ /* mark this process as owning a log file */
+ p = td->td_proc;
+ if ((po = pmc_find_owner_descriptor(p)) == NULL)
+ if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
+ return ENOMEM;
+
+ if ((error = pmc_configure_log(po, cl.pm_logfd)) != 0)
+ break;
+
+ }
+ break;
+
+
+ /*
+ * Retrieve hardware configuration.
+ */
+
+ case PMC_OP_GETCPUINFO: /* CPU information */
+ {
+ struct pmc_op_getcpuinfo gci;
+
+ gci.pm_cputype = md->pmd_cputype;
+ gci.pm_npmc = md->pmd_npmc;
+ gci.pm_nclass = md->pmd_nclass;
+ bcopy(md->pmd_classes, &gci.pm_classes,
+ sizeof(gci.pm_classes));
+ gci.pm_ncpu = mp_ncpus;
+ error = copyout(&gci, arg, sizeof(gci));
+ }
+ break;
+
+
+ /*
+ * Get module statistics
+ */
+
+ case PMC_OP_GETDRIVERSTATS:
+ {
+ struct pmc_op_getdriverstats gms;
+
+ bcopy(&pmc_stats, &gms, sizeof(gms));
+ error = copyout(&gms, arg, sizeof(gms));
+ }
+ break;
+
+
+ /*
+ * Retrieve module version number
+ */
+
+ case PMC_OP_GETMODULEVERSION:
+ {
+ error = copyout(&_pmc_version.mv_version, arg, sizeof(int));
+ }
+ break;
+
+
+ /*
+ * Retrieve the state of all the PMCs on a given
+ * CPU.
+ */
+
+ case PMC_OP_GETPMCINFO:
+ {
+ uint32_t cpu, n, npmc;
+ size_t pmcinfo_size;
+ struct pmc *pm;
+ struct pmc_info *p, *pmcinfo;
+ struct pmc_op_getpmcinfo *gpi;
+ struct pmc_owner *po;
+ struct pmc_binding pb;
+
+ PMC_DOWNGRADE_SX();
+
+ gpi = (struct pmc_op_getpmcinfo *) arg;
+
+ if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
+ break;
+
+ if (cpu >= (unsigned int) mp_ncpus) {
+ error = EINVAL;
+ break;
+ }
+
+ if (pmc_cpu_is_disabled(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /* switch to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ npmc = md->pmd_npmc;
+
+ pmcinfo_size = npmc * sizeof(struct pmc_info);
+ MALLOC(pmcinfo, struct pmc_info *, pmcinfo_size, M_PMC,
+ M_WAITOK);
+
+ p = pmcinfo;
+
+ for (n = 0; n < md->pmd_npmc; n++, p++) {
+
+ if ((error = md->pmd_describe(cpu, n, p, &pm)) != 0)
+ break;
+
+ if (PMC_ROW_DISP_IS_STANDALONE(n))
+ p->pm_rowdisp = PMC_DISP_STANDALONE;
+ else if (PMC_ROW_DISP_IS_THREAD(n))
+ p->pm_rowdisp = PMC_DISP_THREAD;
+ else
+ p->pm_rowdisp = PMC_DISP_FREE;
+
+ p->pm_ownerpid = -1;
+
+ if (pm == NULL) /* no PMC associated */
+ continue;
+
+ po = pm->pm_owner;
+
+ KASSERT(po->po_owner != NULL,
+ ("[pmc,%d] pmc_owner had a null proc pointer",
+ __LINE__));
+
+ p->pm_ownerpid = po->po_owner->p_pid;
+ p->pm_mode = pm->pm_mode;
+ p->pm_event = pm->pm_event;
+ p->pm_flags = pm->pm_flags;
+
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ p->pm_reloadcount =
+ pm->pm_sc.pm_reloadcount;
+ }
+
+ pmc_restore_cpu_binding(&pb);
+
+ /* now copy out the PMC info collected */
+ if (error == 0)
+ error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
+
+ FREE(pmcinfo, M_PMC);
+ }
+ break;
+
+
+ /*
+ * Set the administrative state of a PMC. I.e. whether
+ * the PMC is to be used or not.
+ */
+
+ case PMC_OP_PMCADMIN:
+ {
+ int cpu, ri;
+ enum pmc_state request;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ struct pmc_op_pmcadmin pma;
+ struct pmc_binding pb;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(td == curthread,
+ ("[pmc,%d] td != curthread", __LINE__));
+
+ if (suser(td) || jailed(td->td_ucred)) {
+ error = EPERM;
+ break;
+ }
+
+ if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
+ break;
+
+ cpu = pma.pm_cpu;
+
+ if (cpu < 0 || cpu >= mp_ncpus) {
+ error = EINVAL;
+ break;
+ }
+
+ if (pmc_cpu_is_disabled(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ request = pma.pm_state;
+
+ if (request != PMC_STATE_DISABLED &&
+ request != PMC_STATE_FREE) {
+ error = EINVAL;
+ break;
+ }
+
+ ri = pma.pm_pmc; /* pmc id == row index */
+ if (ri < 0 || ri >= (int) md->pmd_npmc) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * We can't disable a PMC with a row-index allocated
+ * for process virtual PMCs.
+ */
+
+ if (PMC_ROW_DISP_IS_THREAD(ri) &&
+ request == PMC_STATE_DISABLED) {
+ error = EBUSY;
+ break;
+ }
+
+ /*
+ * otherwise, this PMC on this CPU is either free or
+ * in system-wide mode.
+ */
+
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ pc = pmc_pcpu[cpu];
+ phw = pc->pc_hwpmcs[ri];
+
+ /*
+ * XXX do we need some kind of 'forced' disable?
+ */
+
+ if (phw->phw_pmc == NULL) {
+ if (request == PMC_STATE_DISABLED &&
+ (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
+ phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
+ PMC_MARK_ROW_STANDALONE(ri);
+ } else if (request == PMC_STATE_FREE &&
+ (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
+ phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
+ PMC_UNMARK_ROW_STANDALONE(ri);
+ }
+ /* other cases are a no-op */
+ } else
+ error = EBUSY;
+
+ pmc_restore_cpu_binding(&pb);
+ }
+ break;
+
+
+ /*
+ * Allocate a PMC.
+ */
+
+ case PMC_OP_PMCALLOCATE:
+ {
+ uint32_t caps;
+ u_int cpu;
+ int n;
+ enum pmc_mode mode;
+ struct pmc *pmc;
+ struct pmc_op_pmcallocate pa;
+ struct pmc_binding pb;
+
+ if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
+ break;
+
+ caps = pa.pm_caps;
+ mode = pa.pm_mode;
+ cpu = pa.pm_cpu;
+
+ if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
+ mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
+ (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) mp_ncpus)) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * Virtual PMCs should only ask for a default CPU.
+ * System mode PMCs need to specify a non-default CPU.
+ */
+
+ if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
+ (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * Check that a disabled CPU is not being asked for.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode) && pmc_cpu_is_disabled(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /*
+ * Refuse an allocation for a system-wide PMC if this
+ * process has been jailed, or if this process lacks
+ * super-user credentials and the sysctl tunable
+ * 'security.bsd.unprivileged_syspmcs' is zero.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode)) {
+ if (jailed(curthread->td_ucred))
+ error = EPERM;
+ else if (suser(curthread) &&
+ (pmc_unprivileged_syspmcs == 0))
+ error = EPERM;
+ }
+
+ if (error)
+ break;
+
+ /*
+ * Look for valid values for 'pm_flags'
+ */
+
+ if ((pa.pm_flags & ~(PMC_F_DESCENDANTS|PMC_F_LOG_TC_CSW))
+ != 0) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * All sampling mode PMCs need to be able to interrupt the
+ * CPU.
+ */
+
+ if (PMC_IS_SAMPLING_MODE(mode)) {
+ caps |= PMC_CAP_INTERRUPT;
+ error = ENOSYS; /* for snapshot 6 */
+ break;
+ }
+
+ PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
+ pa.pm_ev, caps, mode, cpu);
+
+ pmc = pmc_allocate_pmc_descriptor();
+ pmc->pm_event = pa.pm_ev;
+ pmc->pm_class = pa.pm_class;
+ pmc->pm_state = PMC_STATE_FREE;
+ pmc->pm_mode = mode;
+ pmc->pm_caps = caps;
+ pmc->pm_flags = pa.pm_flags;
+
+ /* switch thread to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+
+#define PMC_IS_SHAREABLE_PMC(cpu, n) \
+ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
+ PMC_PHW_FLAG_IS_SHAREABLE)
+#define PMC_IS_UNALLOCATED(cpu, n) \
+ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
+
+ if (PMC_IS_SYSTEM_MODE(mode)) {
+ pmc_select_cpu(cpu);
+ for (n = 0; n < (int) md->pmd_npmc; n++)
+ if (pmc_can_allocate_row(n, mode) == 0 &&
+ pmc_can_allocate_rowindex(
+ curthread->td_proc, n) == 0 &&
+ (PMC_IS_UNALLOCATED(cpu, n) ||
+ PMC_IS_SHAREABLE_PMC(cpu, n)) &&
+ md->pmd_allocate_pmc(cpu, n, pmc,
+ &pa) == 0)
+ break;
+ } else {
+ /* Process virtual mode */
+ for (n = 0; n < (int) md->pmd_npmc; n++) {
+ if (pmc_can_allocate_row(n, mode) == 0 &&
+ pmc_can_allocate_rowindex(
+ curthread->td_proc, n) == 0 &&
+ md->pmd_allocate_pmc(curthread->td_oncpu,
+ n, pmc, &pa) == 0)
+ break;
+ }
+ }
+
+#undef PMC_IS_UNALLOCATED
+#undef PMC_IS_SHAREABLE_PMC
+
+ pmc_restore_cpu_binding(&pb);
+
+ if (n == (int) md->pmd_npmc) {
+ pmc_destroy_pmc_descriptor(pmc);
+ FREE(pmc, M_PMC);
+ pmc = NULL;
+ error = EINVAL;
+ break;
+ }
+
+ PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d -> n=%d",
+ pmc->pm_event, pmc->pm_class, pmc->pm_mode, n);
+
+ /*
+ * Configure global pmc's immediately
+ */
+
+ if (PMC_IS_SYSTEM_MODE(pmc->pm_mode))
+ if ((error = md->pmd_config_pmc(cpu, n, pmc)) != 0) {
+ (void) md->pmd_release_pmc(cpu, n, pmc);
+ pmc_destroy_pmc_descriptor(pmc);
+ FREE(pmc, M_PMC);
+ pmc = NULL;
+ break;
+ }
+
+ /*
+ * Mark the row index allocated.
+ */
+
+ pmc->pm_rowindex = n;
+ pmc->pm_state = PMC_STATE_ALLOCATED;
+
+ /*
+ * mark row disposition
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ PMC_MARK_ROW_STANDALONE(n);
+ else
+ PMC_MARK_ROW_THREAD(n);
+
+ /*
+ * If this is a system-wide CPU, mark the CPU it
+ * was allocated on.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ pmc->pm_gv.pm_cpu = cpu;
+
+ /*
+ * Register this PMC with the current thread as its owner.
+ */
+
+ if ((error =
+ pmc_register_owner(curthread->td_proc, pmc)) != 0) {
+ pmc_release_pmc_descriptor(pmc);
+ FREE(pmc, M_PMC);
+ pmc = NULL;
+ break;
+ }
+
+ /*
+ * Return the allocated index.
+ */
+
+ pa.pm_pmcid = n;
+
+ error = copyout(&pa, arg, sizeof(pa));
+ }
+ break;
+
+
+ /*
+ * Attach a PMC to a process.
+ */
+
+ case PMC_OP_PMCATTACH:
+ {
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_op_pmcattach a;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &a, sizeof(a))) != 0)
+ break;
+
+ if (a.pm_pid < 0) {
+ error = EINVAL;
+ break;
+ } else if (a.pm_pid == 0)
+ a.pm_pid = td->td_proc->p_pid;
+
+ if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
+ break;
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ error = EINVAL;
+ break;
+ }
+
+ /* PMCs may be (re)attached only when allocated or stopped */
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+ error = EBUSY;
+ break;
+ } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
+ pm->pm_state != PMC_STATE_STOPPED) {
+ error = EINVAL;
+ break;
+ }
+
+ /* lookup pid */
+ if ((p = pfind(a.pm_pid)) == NULL) {
+ error = ESRCH;
+ break;
+ }
+
+ /*
+ * Ignore processes that are working on exiting.
+ */
+ if (p->p_flag & P_WEXIT) {
+ error = ESRCH;
+ PROC_UNLOCK(p); /* pfind() returns a locked process */
+ break;
+ }
+
+ /*
+ * we are allowed to attach a PMC to a process if
+ * we can debug it.
+ */
+ error = p_candebug(curthread, p);
+
+ PROC_UNLOCK(p);
+
+ if (error == 0)
+ error = pmc_attach_process(p, pm);
+ }
+ break;
+
+
+ /*
+ * Detach an attached PMC from a process.
+ */
+
+ case PMC_OP_PMCDETACH:
+ {
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_op_pmcattach a;
+
+ if ((error = copyin(arg, &a, sizeof(a))) != 0)
+ break;
+
+ if (a.pm_pid < 0) {
+ error = EINVAL;
+ break;
+ } else if (a.pm_pid == 0)
+ a.pm_pid = td->td_proc->p_pid;
+
+ if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
+ break;
+
+ if ((p = pfind(a.pm_pid)) == NULL) {
+ error = ESRCH;
+ break;
+ }
+
+ /*
+ * Treat processes that are in the process of exiting
+ * as if they were not present.
+ */
+
+ if (p->p_flag & P_WEXIT)
+ error = ESRCH;
+
+ PROC_UNLOCK(p); /* pfind() returns a locked process */
+
+ if (error == 0)
+ error = pmc_detach_process(p, pm);
+ }
+ break;
+
+
+ /*
+ * Release an allocated PMC
+ */
+
+ case PMC_OP_PMCRELEASE:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_owner *po;
+ struct pmc_op_simple sp;
+
+ /*
+ * Find PMC pointer for the named PMC.
+ *
+ * Use pmc_release_pmc_descriptor() to switch off the
+ * PMC, remove all its target threads, and remove the
+ * PMC from its owner's list.
+ *
+ * Remove the owner record if this is the last PMC
+ * owned.
+ *
+ * Free up space.
+ */
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ po = pm->pm_owner;
+ pmc_release_pmc_descriptor(pm);
+ pmc_maybe_remove_owner(po);
+
+ FREE(pm, M_PMC);
+ }
+ break;
+
+
+ /*
+ * Read and/or write a PMC.
+ */
+
+ case PMC_OP_PMCRW:
+ {
+ uint32_t cpu, ri;
+ struct pmc *pm;
+ struct pmc_op_pmcrw *pprw;
+ struct pmc_op_pmcrw prw;
+ struct pmc_binding pb;
+ pmc_value_t oldvalue;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
+ break;
+
+ PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
+ prw.pm_flags);
+
+ /* must have at least one flag set */
+ if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
+ error = EINVAL;
+ break;
+ }
+
+ /* locate pmc descriptor */
+ if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
+ break;
+
+ /* Can't read a PMC that hasn't been started. */
+ if (pm->pm_state != PMC_STATE_ALLOCATED &&
+ pm->pm_state != PMC_STATE_STOPPED &&
+ pm->pm_state != PMC_STATE_RUNNING) {
+ error = EINVAL;
+ break;
+ }
+
+ /* writing a new value is allowed only for 'STOPPED' pmcs */
+ if (pm->pm_state == PMC_STATE_RUNNING &&
+ (prw.pm_flags & PMC_F_NEWVALUE)) {
+ error = EBUSY;
+ break;
+ }
+
+ if (PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+
+ /* read/write the saved value in the PMC record */
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ if (prw.pm_flags & PMC_F_OLDVALUE)
+ oldvalue = pm->pm_gv.pm_savedvalue;
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ pm->pm_gv.pm_savedvalue = prw.pm_value;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ } else { /* System mode PMCs */
+ cpu = pm->pm_gv.pm_cpu;
+ ri = pm->pm_rowindex;
+
+ if (pmc_cpu_is_disabled(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /* move this thread to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ /* save old value */
+ if (prw.pm_flags & PMC_F_OLDVALUE)
+ if ((error = (*md->pmd_read_pmc)(cpu, ri,
+ &oldvalue)))
+ goto error;
+ /* write out new value */
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ error = (*md->pmd_write_pmc)(cpu, ri,
+ prw.pm_value);
+ error:
+ pmc_restore_cpu_binding(&pb);
+ if (error)
+ break;
+ }
+
+ pprw = (struct pmc_op_pmcrw *) arg;
+
+#if DEBUG
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
+ ri, prw.pm_value, oldvalue);
+ else
+ PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
+#endif
+
+ /* return old value if requested */
+ if (prw.pm_flags & PMC_F_OLDVALUE)
+ if ((error = copyout(&oldvalue, &pprw->pm_value,
+ sizeof(prw.pm_value))))
+ break;
+
+ /*
+ * send a signal (SIGIO) to the owner if it is trying to read
+ * a PMC with no target processes attached.
+ */
+
+ if (LIST_EMPTY(&pm->pm_targets) &&
+ (prw.pm_flags & PMC_F_OLDVALUE)) {
+ PROC_LOCK(curthread->td_proc);
+ psignal(curthread->td_proc, SIGIO);
+ PROC_UNLOCK(curthread->td_proc);
+ }
+ }
+ break;
+
+
+ /*
+ * Set the sampling rate for a sampling mode PMC and the
+ * initial count for a counting mode PMC.
+ */
+
+ case PMC_OP_PMCSETCOUNT:
+ {
+ struct pmc *pm;
+ struct pmc_op_pmcsetcount sc;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
+ break;
+
+ if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
+ break;
+
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+ error = EBUSY;
+ break;
+ }
+
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ pm->pm_sc.pm_reloadcount = sc.pm_count;
+ else
+ pm->pm_sc.pm_initial = sc.pm_count;
+ }
+ break;
+
+
+ /*
+ * Start a PMC.
+ */
+
+ case PMC_OP_PMCSTART:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_op_simple sp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ KASSERT(pmcid == pm->pm_rowindex,
+ ("[pmc,%d] row index %d != id %d", __LINE__,
+ pm->pm_rowindex, pmcid));
+
+ if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
+ break;
+ else if (pm->pm_state != PMC_STATE_STOPPED &&
+ pm->pm_state != PMC_STATE_ALLOCATED) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmc_start(pm);
+ }
+ break;
+
+
+ /*
+ * Stop a PMC.
+ */
+
+ case PMC_OP_PMCSTOP:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_op_simple sp;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ /*
+ * Mark the PMC as inactive and invoke the MD stop
+ * routines if needed.
+ */
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ KASSERT(pmcid == pm->pm_rowindex,
+ ("[pmc,%d] row index %d != pmcid %d", __LINE__,
+ pm->pm_rowindex, pmcid));
+
+ if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
+ break;
+ else if (pm->pm_state != PMC_STATE_RUNNING) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmc_stop(pm);
+ }
+ break;
+
+
+ /*
+ * Write a user-entry to the log file.
+ */
+
+ case PMC_OP_WRITELOG:
+ {
+
+ PMC_DOWNGRADE_SX();
+
+ /*
+ * flush all per-cpu hash tables
+ * append user-log entry
+ */
+
+ error = ENOSYS;
+ }
+ break;
+
+
+#if __i386__ || __amd64__
+
+ /*
+ * Machine dependent operation for i386-class processors.
+ *
+ * Retrieve the MSR number associated with the counter
+ * 'pmc_id'. This allows processes to directly use RDPMC
+ * instructions to read their PMCs, without the overhead of a
+ * system call.
+ */
+
+ case PMC_OP_PMCX86GETMSR:
+ {
+ int ri;
+ struct pmc *pm;
+ struct pmc_op_x86_getmsr gm;
+
+ PMC_DOWNGRADE_SX();
+
+ /* CPU has no 'GETMSR' support */
+ if (md->pmd_get_msr == NULL) {
+ error = ENOSYS;
+ break;
+ }
+
+ if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
+ break;
+
+ if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
+ break;
+
+ /*
+ * The allocated PMC needs to be a process virtual PMC,
+ * i.e., of type T[CS].
+ *
+ * Global PMCs can only be read using the PMCREAD
+ * operation since they may be allocated on a
+ * different CPU than the one we could be running on
+ * at the time of the read.
+ */
+
+ if (!PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+ error = EINVAL;
+ break;
+ }
+
+ ri = pm->pm_rowindex;
+
+ if ((error = (*md->pmd_get_msr)(ri, &gm.pm_msr)) < 0)
+ break;
+ if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
+ break;
+ }
+ break;
+#endif
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ if (is_sx_downgraded)
+ sx_sunlock(&pmc_sx);
+ else
+ sx_xunlock(&pmc_sx);
+
+ if (error)
+ atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
+
+ return error;
+}
+
+/*
+ * Helper functions
+ */
+
+/*
+ * Configure a log file.
+ */
+
+static int
+pmc_configure_log(struct pmc_owner *po, int logfd)
+{
+ struct proc *p;
+
+ return ENOSYS; /* for now */
+
+ p = po->po_owner;
+
+ if (po->po_logfd < 0 && logfd < 0) /* nothing to do */
+ return 0;
+
+ if (po->po_logfd >= 0 && logfd < 0) {
+ /* deconfigure log */
+ /* XXX */
+ po->po_flags &= ~PMC_FLAG_OWNS_LOGFILE;
+ pmc_maybe_remove_owner(po);
+
+ } else if (po->po_logfd < 0 && logfd >= 0) {
+ /* configure log file */
+ /* XXX */
+ po->po_flags |= PMC_FLAG_OWNS_LOGFILE;
+
+ /* mark process as using HWPMCs */
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+ } else
+ return EBUSY;
+
+ return 0;
+}
+
+/*
+ * Log an exit event to the PMC owner's log file.
+ */
+
+static void
+pmc_log_process_exit(struct pmc *pm, struct pmc_process *pp)
+{
+ KASSERT(pm->pm_flags & PMC_F_LOG_TC_PROCEXIT,
+ ("[pmc,%d] log-process-exit called gratuitously", __LINE__));
+
+ (void) pm;
+ (void) pp;
+
+ return;
+}
+
+/*
+ * Event handlers.
+ */
+
+/*
+ * Handle a process exit.
+ *
+ * XXX This eventhandler gets called early in the exit process.
+ * Consider using a 'hook' invocation from thread_exit() or equivalent
+ * spot. Another negative is that kse_exit doesn't seem to call
+ * exit1() [??].
+ */
+
+static void
+pmc_process_exit(void *arg __unused, struct proc *p)
+{
+ int is_using_hwpmcs;
+
+ PROC_LOCK(p);
+ is_using_hwpmcs = p->p_flag & P_HWPMC;
+ PROC_UNLOCK(p);
+
+ if (is_using_hwpmcs) {
+ PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
+ p->p_comm);
+
+ PMC_GET_SX_XLOCK();
+ (void) pmc_hook_handler(curthread, PMC_FN_PROCESS_EXIT,
+ (void *) p);
+ sx_xunlock(&pmc_sx);
+ }
+}
+
+/*
+ * Handle a process fork.
+ *
+ * If the parent process 'p1' is under HWPMC monitoring, then copy
+ * over any attached PMCs that have 'do_descendants' semantics.
+ */
+
+static void
+pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *p2,
+ int flags)
+{
+ int is_using_hwpmcs;
+
+ (void) flags; /* unused parameter */
+
+ PROC_LOCK(p1);
+ is_using_hwpmcs = p1->p_flag & P_HWPMC;
+ PROC_UNLOCK(p1);
+
+ if (is_using_hwpmcs) {
+ PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s)", p1,
+ p1->p_pid, p1->p_comm);
+ PMC_GET_SX_XLOCK();
+ (void) pmc_hook_handler(curthread, PMC_FN_PROCESS_FORK,
+ (void *) p2);
+ sx_xunlock(&pmc_sx);
+ }
+}
+
+
+/*
+ * initialization
+ */
+
+static const char *pmc_name_of_pmcclass[] = {
+#undef __PMC_CLASS
+#define __PMC_CLASS(N) #N ,
+ __PMC_CLASSES()
+};
+
+static int
+pmc_initialize(void)
+{
+ int error, cpu, n;
+ struct pmc_binding pb;
+
+ md = NULL;
+ error = 0;
+
+#if DEBUG
+ /* parse debug flags first */
+ if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
+ pmc_debugstr, sizeof(pmc_debugstr)))
+ pmc_debugflags_parse(pmc_debugstr,
+ pmc_debugstr+strlen(pmc_debugstr));
+#endif
+
+ PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
+
+ /*
+ * check sysctl parameters
+ */
+
+ if (pmc_hashsize <= 0) {
+ (void) printf("pmc: sysctl variable \""
+ PMC_SYSCTL_NAME_PREFIX "hashsize\" must be greater than "
+ "zero\n");
+ pmc_hashsize = PMC_HASH_SIZE;
+ }
+
+#if defined(__i386__)
+ /* determine the CPU kind. This is i386 specific */
+ if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+ md = pmc_amd_initialize();
+ else if (strcmp(cpu_vendor, "GenuineIntel") == 0)
+ md = pmc_intel_initialize();
+ /* XXX: what about the other i386 CPU manufacturers? */
+#elif defined(__amd64__)
+ if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+ md = pmc_amd_initialize();
+#else /* other architectures */
+ md = NULL;
+#endif
+
+ if (md == NULL || md->pmd_init == NULL)
+ return ENOSYS;
+
+ /* allocate space for the per-cpu array */
+ MALLOC(pmc_pcpu, struct pmc_cpu **, mp_ncpus * sizeof(struct pmc_cpu *),
+ M_PMC, M_WAITOK|M_ZERO);
+
+ /* per-cpu 'saved values' for managing process-mode PMCs */
+ MALLOC(pmc_pcpu_saved, pmc_value_t *,
+ sizeof(pmc_value_t) * mp_ncpus * md->pmd_npmc, M_PMC, M_WAITOK);
+
+ /* perform cpu dependent initialization */
+ pmc_save_cpu_binding(&pb);
+ for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ if (pmc_cpu_is_disabled(cpu))
+ continue;
+ pmc_select_cpu(cpu);
+ if ((error = md->pmd_init(cpu)) != 0)
+ break;
+ }
+ pmc_restore_cpu_binding(&pb);
+
+ if (error != 0)
+ return error;
+
+ /* allocate space for the row disposition array */
+ pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
+ M_PMC, M_WAITOK|M_ZERO);
+
+ KASSERT(pmc_pmcdisp != NULL,
+ ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
+
+ /* mark all PMCs as available */
+ for (n = 0; n < (int) md->pmd_npmc; n++)
+ PMC_MARK_ROW_FREE(n);
+
+ /* allocate thread hash tables */
+ pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
+ &pmc_ownerhashmask);
+
+ pmc_processhash = hashinit(pmc_hashsize, M_PMC,
+ &pmc_processhashmask);
+ mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc", MTX_SPIN);
+
+ /* allocate a pool of spin mutexes */
+ pmc_mtxpool = mtx_pool_create("pmc", pmc_mtxpool_size, MTX_SPIN);
+
+ PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
+ "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
+ pmc_processhash, pmc_processhashmask);
+
+ /* register process {exit,fork,exec} handlers */
+ pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
+ pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
+ pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
+ pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
+
+ /* set hook functions */
+ pmc_intr = md->pmd_intr;
+ pmc_hook = pmc_hook_handler;
+
+ if (error == 0) {
+ printf(PMC_MODULE_NAME ":");
+ for (n = 0; n < (int) md->pmd_nclass; n++)
+ printf(" %s(%d)",
+ pmc_name_of_pmcclass[md->pmd_classes[n]],
+ md->pmd_nclasspmcs[n]);
+ printf("\n");
+ }
+
+ return error;
+}
+
+/* prepare to be unloaded */
+static void
+pmc_cleanup(void)
+{
+ int cpu;
+ struct pmc_ownerhash *ph;
+ struct pmc_owner *po, *tmp;
+ struct pmc_binding pb;
+#if DEBUG
+ struct pmc_processhash *prh;
+#endif
+
+ PMCDBG(MOD,INI,0, "%s", "cleanup");
+
+ pmc_intr = NULL; /* no more interrupts please */
+
+ sx_xlock(&pmc_sx);
+ if (pmc_hook == NULL) { /* being unloaded already */
+ sx_xunlock(&pmc_sx);
+ return;
+ }
+
+ pmc_hook = NULL; /* prevent new threads from entering module */
+
+ /* deregister event handlers */
+ EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
+ EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
+
+ /* send SIGBUS to all owner threads, free up allocations */
+ if (pmc_ownerhash)
+ for (ph = pmc_ownerhash;
+ ph <= &pmc_ownerhash[pmc_ownerhashmask];
+ ph++) {
+ LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
+ pmc_remove_owner(po);
+
+ /* send SIGBUS to owner processes */
+ PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
+ "(%d, %s)", po->po_owner,
+ po->po_owner->p_pid,
+ po->po_owner->p_comm);
+
+ PROC_LOCK(po->po_owner);
+ psignal(po->po_owner, SIGBUS);
+ PROC_UNLOCK(po->po_owner);
+ FREE(po, M_PMC);
+ }
+ }
+
+ /* reclaim allocated data structures */
+ if (pmc_mtxpool)
+ mtx_pool_destroy(&pmc_mtxpool);
+
+ mtx_destroy(&pmc_processhash_mtx);
+ if (pmc_processhash) {
+#if DEBUG
+ struct pmc_process *pp;
+
+ PMCDBG(MOD,INI,3, "%s", "destroy process hash");
+ for (prh = pmc_processhash;
+ prh <= &pmc_processhash[pmc_processhashmask];
+ prh++)
+ LIST_FOREACH(pp, prh, pp_next)
+ PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
+#endif
+
+ hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
+ pmc_processhash = NULL;
+ }
+
+ if (pmc_ownerhash) {
+ PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
+ hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
+ pmc_ownerhash = NULL;
+ }
+
+ /* do processor dependent cleanup */
+ PMCDBG(MOD,INI,3, "%s", "md cleanup");
+ if (md) {
+ pmc_save_cpu_binding(&pb);
+ for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
+ cpu, pmc_pcpu[cpu]);
+ if (pmc_cpu_is_disabled(cpu))
+ continue;
+ pmc_select_cpu(cpu);
+ if (pmc_pcpu[cpu])
+ (void) md->pmd_cleanup(cpu);
+ }
+ FREE(md, M_PMC);
+ md = NULL;
+ pmc_restore_cpu_binding(&pb);
+ }
+
+ /* deallocate per-cpu structures */
+ FREE(pmc_pcpu, M_PMC);
+ pmc_pcpu = NULL;
+
+ FREE(pmc_pcpu_saved, M_PMC);
+ pmc_pcpu_saved = NULL;
+
+ if (pmc_pmcdisp) {
+ FREE(pmc_pmcdisp, M_PMC);
+ pmc_pmcdisp = NULL;
+ }
+
+ sx_xunlock(&pmc_sx); /* we are done */
+}
+
+/*
+ * The function called at load/unload.
+ */
+
+static int
+load (struct module *module __unused, int cmd, void *arg __unused)
+{
+ int error;
+
+ error = 0;
+
+ switch (cmd) {
+ case MOD_LOAD :
+ /* initialize the subsystem */
+ error = pmc_initialize();
+ if (error != 0)
+ break;
+ PMCDBG(MOD,INI,1, "syscall=%d ncpus=%d",
+ pmc_syscall_num, mp_ncpus);
+ break;
+
+
+ case MOD_UNLOAD :
+ case MOD_SHUTDOWN:
+ pmc_cleanup();
+ PMCDBG(MOD,INI,1, "%s", "unloaded");
+ break;
+
+ default :
+ error = EINVAL; /* XXX should panic(9) */
+ break;
+ }
+
+ return error;
+}
+
+/* memory pool */
+MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");
diff --git a/sys/dev/hwpmc/hwpmc_pentium.c b/sys/dev/hwpmc/hwpmc_pentium.c
new file mode 100644
index 0000000..9a02f41
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_pentium.c
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * Intel Pentium PMCs
+ */
+
+int
+pmc_initialize_p5(struct pmc_mdep *pmc_mdep)
+{
+ (void) pmc_mdep;
+ return ENOSYS; /* nothing here yet */
+}
diff --git a/sys/dev/hwpmc/hwpmc_piv.c b/sys/dev/hwpmc/hwpmc_piv.c
new file mode 100644
index 0000000..292fbba
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_piv.c
@@ -0,0 +1,1484 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * PENTIUM 4 SUPPORT
+ *
+ * The P4 has 18 PMCs, divided into 4 groups with 4,4,4 and 6 PMCs
+ * respectively. Each PMC comprises of two model specific registers:
+ * a counter configuration control register (CCCR) and a counter
+ * register that holds the actual event counts.
+ *
+ * Configuring an event requires the use of one of 45 event selection
+ * control registers (ESCR). Events are associated with specific
+ * ESCRs. Each PMC group has a set of ESCRs it can use.
+ *
+ * - The BPU counter group (4 PMCs) can use the 16 ESCRs:
+ * BPU_ESCR{0,1}, IS_ESCR{0,1}, MOB_ESCR{0,1}, ITLB_ESCR{0,1},
+ * PMH_ESCR{0,1}, IX_ESCR{0,1}, FSB_ESCR{0,}, BSU_ESCR{0,1}.
+ *
+ * - The MS counter group (4 PMCs) can use the 6 ESCRs: MS_ESCR{0,1},
+ * TC_ESCR{0,1}, TBPU_ESCR{0,1}.
+ *
+ * - The FLAME counter group (4 PMCs) can use the 10 ESCRs:
+ * FLAME_ESCR{0,1}, FIRM_ESCR{0,1}, SAAT_ESCR{0,1}, U2L_ESCR{0,1},
+ * DAC_ESCR{0,1}.
+ *
+ * - The IQ counter group (6 PMCs) can use the 13 ESCRs: IQ_ESCR{0,1},
+ * ALF_ESCR{0,1}, RAT_ESCR{0,1}, SSU_ESCR0, CRU_ESCR{0,1,2,3,4,5}.
+ *
+ * Even-numbered ESCRs can be used with counters 0, 1 and 4 (if
+ * present) of a counter group. Odd-numbers ESCRs can be used with
+ * counters 2, 3 and 5 (if present) of a counter group. The
+ * 'p4_escrs[]' table describes these restrictions in a form that
+ * function 'p4_allocate()' uses for making allocation decisions.
+ *
+ * SYSTEM-MODE AND THREAD-MODE ALLOCATION
+ *
+ * In addition to remembering the state of PMC rows
+ * ('FREE','STANDALONE', or 'THREAD'), we similar need to track the
+ * state of ESCR rows. If an ESCR is allocated to a system-mode PMC
+ * on a CPU we cannot allocate this to a thread-mode PMC. On a
+ * multi-cpu (multiple physical CPUs) system, ESCR allocation on each
+ * CPU is tracked by the pc_escrs[] array.
+ *
+ * Each system-mode PMC that is using an ESCR records its row-index in
+ * the appropriate entry and system-mode allocation attempts check
+ * that an ESCR is available using this array. Process-mode PMCs do
+ * not use the pc_escrs[] array, since ESCR row itself would have been
+ * marked as in 'THREAD' mode.
+ *
+ * HYPERTHREADING SUPPORT
+ *
+ * When HTT is enabled, the FreeBSD kernel treats the two 'logical'
+ * cpus as independent CPUs and can schedule kernel threads on them
+ * independently. However, the two logical CPUs share the same set of
+ * PMC resources. We need to ensure that:
+ * - PMCs that use the PMC_F_DESCENDANTS semantics are handled correctly,
+ * and,
+ * - Threads of multi-threaded processes that get scheduled on the same
+ * physical CPU are handled correctly.
+ *
+ * Not all HTT capable systems will have HTT enabled since users may
+ * have turned HTT support off using the appropriate sysctls
+ * (machdep.hlt_logical_cpus and machdep.logical_cpus_mask). We
+ * detect the presence of HTT by remembering if an initialization was
+ * done for a logical CPU.
+ *
+ */
+
+#define P4_PMCS() \
+ P4_PMC(BPU_COUNTER0) \
+ P4_PMC(BPU_COUNTER1) \
+ P4_PMC(BPU_COUNTER2) \
+ P4_PMC(BPU_COUNTER3) \
+ P4_PMC(MS_COUNTER0) \
+ P4_PMC(MS_COUNTER1) \
+ P4_PMC(MS_COUNTER2) \
+ P4_PMC(MS_COUNTER3) \
+ P4_PMC(FLAME_COUNTER0) \
+ P4_PMC(FLAME_COUNTER1) \
+ P4_PMC(FLAME_COUNTER2) \
+ P4_PMC(FLAME_COUNTER3) \
+ P4_PMC(IQ_COUNTER0) \
+ P4_PMC(IQ_COUNTER1) \
+ P4_PMC(IQ_COUNTER2) \
+ P4_PMC(IQ_COUNTER3) \
+ P4_PMC(IQ_COUNTER4) \
+ P4_PMC(IQ_COUNTER5) \
+ P4_PMC(NONE)
+
+enum pmc_p4pmc {
+#undef P4_PMC
+#define P4_PMC(N) P4_PMC_##N ,
+ P4_PMCS()
+};
+
+/*
+ * P4 ESCR descriptors
+ */
+
+#define P4_ESCRS() \
+ P4_ESCR(BSU_ESCR0, 0x3A0, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(BSU_ESCR1, 0x3A1, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(FSB_ESCR0, 0x3A2, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(FSB_ESCR1, 0x3A3, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(FIRM_ESCR0, 0x3A4, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(FIRM_ESCR1, 0x3A5, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(FLAME_ESCR0, 0x3A6, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(FLAME_ESCR1, 0x3A7, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(DAC_ESCR0, 0x3A8, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(DAC_ESCR1, 0x3A9, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(MOB_ESCR0, 0x3AA, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(MOB_ESCR1, 0x3AB, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(PMH_ESCR0, 0x3AC, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(PMH_ESCR1, 0x3AD, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(SAAT_ESCR0, 0x3AE, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(SAAT_ESCR1, 0x3AF, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(U2L_ESCR0, 0x3B0, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(U2L_ESCR1, 0x3B1, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(BPU_ESCR0, 0x3B2, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(BPU_ESCR1, 0x3B3, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(IS_ESCR0, 0x3B4, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(IS_ESCR1, 0x3B5, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(ITLB_ESCR0, 0x3B6, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(ITLB_ESCR1, 0x3B7, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(CRU_ESCR0, 0x3B8, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR1, 0x3B9, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(IQ_ESCR0, 0x3BA, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(IQ_ESCR1, 0x3BB, IQ_COUNTER1, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(RAT_ESCR0, 0x3BC, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(RAT_ESCR1, 0x3BD, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(SSU_ESCR0, 0x3BE, IQ_COUNTER0, IQ_COUNTER2, IQ_COUNTER4) \
+ P4_ESCR(MS_ESCR0, 0x3C0, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(MS_ESCR1, 0x3C1, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(TBPU_ESCR0, 0x3C2, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(TBPU_ESCR1, 0x3C3, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(TC_ESCR0, 0x3C4, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(TC_ESCR1, 0x3C5, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(IX_ESCR0, 0x3C8, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(IX_ESCR1, 0x3C9, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(ALF_ESCR0, 0x3CA, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(ALF_ESCR1, 0x3CB, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(CRU_ESCR2, 0x3CC, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR3, 0x3CD, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(CRU_ESCR4, 0x3E0, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR5, 0x3E1, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(NONE, ~0, NONE, NONE, NONE)
+
+enum pmc_p4escr {
+#define P4_ESCR(N, MSR, P1, P2, P3) P4_ESCR_##N ,
+ P4_ESCRS()
+#undef P4_ESCR
+};
+
+struct pmc_p4escr_descr {
+ const char pm_escrname[PMC_NAME_MAX];
+ u_short pm_escr_msr;
+ const enum pmc_p4pmc pm_pmcs[P4_MAX_PMC_PER_ESCR];
+};
+
+static struct pmc_p4escr_descr p4_escrs[] =
+{
+#define P4_ESCR(N, MSR, P1, P2, P3) \
+ { \
+ .pm_escrname = #N, \
+ .pm_escr_msr = (MSR), \
+ .pm_pmcs = \
+ { \
+ P4_PMC_##P1, \
+ P4_PMC_##P2, \
+ P4_PMC_##P3 \
+ } \
+ } ,
+
+ P4_ESCRS()
+
+#undef P4_ESCR
+};
+
+/*
+ * P4 Event descriptor
+ */
+
+struct p4_event_descr {
+ const enum pmc_event pm_event;
+ const uint32_t pm_escr_eventselect;
+ const uint32_t pm_cccr_select;
+ const char pm_is_ti_event;
+ enum pmc_p4escr pm_escrs[P4_MAX_ESCR_PER_EVENT];
+};
+
+static struct p4_event_descr p4_events[] = {
+
+#define P4_EVDESCR(NAME, ESCREVENTSEL, CCCRSEL, TI_EVENT, ESCR0, ESCR1) \
+ { \
+ .pm_event = PMC_EV_P4_##NAME, \
+ .pm_escr_eventselect = (ESCREVENTSEL), \
+ .pm_cccr_select = (CCCRSEL), \
+ .pm_is_ti_event = (TI_EVENT), \
+ .pm_escrs = \
+ { \
+ P4_ESCR_##ESCR0, \
+ P4_ESCR_##ESCR1 \
+ } \
+ }
+
+P4_EVDESCR(TC_DELIVER_MODE, 0x01, 0x01, TRUE, TC_ESCR0, TC_ESCR1),
+P4_EVDESCR(BPU_FETCH_REQUEST, 0x03, 0x00, FALSE, BPU_ESCR0, BPU_ESCR1),
+P4_EVDESCR(ITLB_REFERENCE, 0x18, 0x03, FALSE, ITLB_ESCR0, ITLB_ESCR1),
+P4_EVDESCR(MEMORY_CANCEL, 0x02, 0x05, FALSE, DAC_ESCR0, DAC_ESCR1),
+P4_EVDESCR(MEMORY_COMPLETE, 0x08, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(LOAD_PORT_REPLAY, 0x04, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(STORE_PORT_REPLAY, 0x05, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(MOB_LOAD_REPLAY, 0x03, 0x02, FALSE, MOB_ESCR0, MOB_ESCR1),
+P4_EVDESCR(PAGE_WALK_TYPE, 0x01, 0x04, TRUE, PMH_ESCR0, PMH_ESCR1),
+P4_EVDESCR(BSQ_CACHE_REFERENCE, 0x0C, 0x07, FALSE, BSU_ESCR0, BSU_ESCR1),
+P4_EVDESCR(IOQ_ALLOCATION, 0x03, 0x06, FALSE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(IOQ_ACTIVE_ENTRIES, 0x1A, 0x06, FALSE, FSB_ESCR1, NONE),
+P4_EVDESCR(FSB_DATA_ACTIVITY, 0x17, 0x06, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(BSQ_ALLOCATION, 0x05, 0x07, FALSE, BSU_ESCR0, NONE),
+P4_EVDESCR(BSQ_ACTIVE_ENTRIES, 0x06, 0x07, FALSE, BSU_ESCR1, NONE),
+ /* BSQ_ACTIVE_ENTRIES inherits CPU specificity from BSQ_ALLOCATION */
+P4_EVDESCR(SSE_INPUT_ASSIST, 0x34, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(PACKED_SP_UOP, 0x08, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(PACKED_DP_UOP, 0x0C, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(SCALAR_SP_UOP, 0x0A, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(SCALAR_DP_UOP, 0x0E, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(64BIT_MMX_UOP, 0x02, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(128BIT_MMX_UOP, 0x1A, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(X87_FP_UOP, 0x04, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(X87_SIMD_MOVES_UOP, 0x2E, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(GLOBAL_POWER_EVENTS, 0x13, 0x06, FALSE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(TC_MS_XFER, 0x05, 0x00, FALSE, MS_ESCR0, MS_ESCR1),
+P4_EVDESCR(UOP_QUEUE_WRITES, 0x09, 0x00, FALSE, MS_ESCR0, MS_ESCR1),
+P4_EVDESCR(RETIRED_MISPRED_BRANCH_TYPE,
+ 0x05, 0x02, FALSE, TBPU_ESCR0, TBPU_ESCR1),
+P4_EVDESCR(RETIRED_BRANCH_TYPE, 0x04, 0x02, FALSE, TBPU_ESCR0, TBPU_ESCR1),
+P4_EVDESCR(RESOURCE_STALL, 0x01, 0x01, FALSE, ALF_ESCR0, ALF_ESCR1),
+P4_EVDESCR(WC_BUFFER, 0x05, 0x05, TRUE, DAC_ESCR0, DAC_ESCR1),
+P4_EVDESCR(B2B_CYCLES, 0x16, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(BNR, 0x08, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(SNOOP, 0x06, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(RESPONSE, 0x04, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(FRONT_END_EVENT, 0x08, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(EXECUTION_EVENT, 0x0C, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(REPLAY_EVENT, 0x09, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(INSTR_RETIRED, 0x02, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(UOPS_RETIRED, 0x01, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(UOP_TYPE, 0x02, 0x02, FALSE, RAT_ESCR0, RAT_ESCR1),
+P4_EVDESCR(BRANCH_RETIRED, 0x06, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(MISPRED_BRANCH_RETIRED, 0x03, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(X87_ASSIST, 0x03, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(MACHINE_CLEAR, 0x02, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3)
+
+#undef P4_EVDESCR
+};
+
+#define P4_EVENT_IS_TI(E) ((E)->pm_is_ti_event == TRUE)
+
+#define P4_NEVENTS (PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1)
+
+/*
+ * P4 PMC descriptors
+ */
+
+struct p4pmc_descr {
+ struct pmc_descr pm_descr; /* common information */
+ enum pmc_p4pmc pm_pmcnum; /* PMC number */
+ uint32_t pm_pmc_msr; /* PERFCTR MSR address */
+ uint32_t pm_cccr_msr; /* CCCR MSR address */
+};
+
+static struct p4pmc_descr p4_pmcdesc[P4_NPMCS] = {
+
+ /*
+ * TSC descriptor
+ */
+
+ {
+ .pm_descr =
+ {
+ .pd_name = "TSC",
+ .pd_class = PMC_CLASS_TSC,
+ .pd_caps = PMC_CAP_READ | PMC_CAP_WRITE,
+ .pd_width = 64
+ },
+ .pm_pmcnum = ~0,
+ .pm_cccr_msr = ~0,
+ .pm_pmc_msr = 0x10,
+ },
+
+ /*
+ * P4 PMCS
+ */
+
+#define P4_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
+ PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
+ PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE | \
+ PMC_CAP_TAGGING | PMC_CAP_CASCADE)
+
+#define P4_PMCDESCR(N, PMC, CCCR) \
+ { \
+ .pm_descr = \
+ { \
+ .pd_name = #N, \
+ .pd_class = PMC_CLASS_P4, \
+ .pd_caps = P4_PMC_CAPS, \
+ .pd_width = 40 \
+ }, \
+ .pm_pmcnum = P4_PMC_##N, \
+ .pm_cccr_msr = (CCCR), \
+ .pm_pmc_msr = (PMC) \
+ }
+
+ P4_PMCDESCR(BPU_COUNTER0, 0x300, 0x360),
+ P4_PMCDESCR(BPU_COUNTER1, 0x301, 0x361),
+ P4_PMCDESCR(BPU_COUNTER2, 0x302, 0x362),
+ P4_PMCDESCR(BPU_COUNTER3, 0x303, 0x363),
+ P4_PMCDESCR(MS_COUNTER0, 0x304, 0x364),
+ P4_PMCDESCR(MS_COUNTER1, 0x305, 0x365),
+ P4_PMCDESCR(MS_COUNTER2, 0x306, 0x366),
+ P4_PMCDESCR(MS_COUNTER3, 0x307, 0x367),
+ P4_PMCDESCR(FLAME_COUNTER0, 0x308, 0x368),
+ P4_PMCDESCR(FLAME_COUNTER1, 0x309, 0x369),
+ P4_PMCDESCR(FLAME_COUNTER2, 0x30A, 0x36A),
+ P4_PMCDESCR(FLAME_COUNTER3, 0x30B, 0x36B),
+ P4_PMCDESCR(IQ_COUNTER0, 0x30C, 0x36C),
+ P4_PMCDESCR(IQ_COUNTER1, 0x30D, 0x36D),
+ P4_PMCDESCR(IQ_COUNTER2, 0x30E, 0x36E),
+ P4_PMCDESCR(IQ_COUNTER3, 0x30F, 0x36F),
+ P4_PMCDESCR(IQ_COUNTER4, 0x310, 0x370),
+ P4_PMCDESCR(IQ_COUNTER5, 0x311, 0x371),
+
+#undef P4_PMCDESCR
+};
+
+/* HTT support */
+#define P4_NHTT 2 /* logical processors/chip */
+#define P4_HTT_CPU_INDEX_0 0
+#define P4_HTT_CPU_INDEX_1 1
+
+static int p4_system_has_htt;
+
+/*
+ * Per-CPU data structure for P4 class CPUs
+ *
+ * [common stuff]
+ * [19 struct pmc_hw pointers]
+ * [19 struct pmc_hw structures]
+ * [45 ESCRs status bytes]
+ * [per-cpu spin mutex]
+ * [19 flags for holding the config count and runcount]
+ * [19*2 saved value fields] (Thread mode PMC support)
+ * [19*2 pmc value fields] (-do-)
+ */
+
+struct p4_cpu {
+ struct pmc_cpu pc_common;
+ struct pmc_hw *pc_hwpmcs[P4_NPMCS];
+ struct pmc_hw pc_p4pmcs[P4_NPMCS];
+ char pc_escrs[P4_NESCR];
+ struct mtx pc_mtx; /* spin lock */
+ unsigned char pc_flags[P4_NPMCS]; /* 4 bits each: {cfg,run}count */
+ pmc_value_t pc_saved[P4_NPMCS * P4_NHTT];
+ pmc_value_t pc_pmc_values[P4_NPMCS * P4_NHTT];
+};
+
+#define P4_PCPU_SAVED_VALUE(PC,RI,CPU) (PC)->pc_saved[(RI)*((CPU) & 1)]
+#define P4_PCPU_PMC_VALUE(P,R,C) (P)->pc_pmc_values[(R)*((C) & 1)]
+
+#define P4_PCPU_GET_FLAGS(PC,RI,MASK) ((PC)->pc_flags[(RI)] & (MASK))
+#define P4_PCPU_SET_FLAGS(PC,RI,MASK,VAL) do { \
+ char _tmp; \
+ _tmp = (PC)->pc_flags[(RI)]; \
+ _tmp &= ~(MASK); \
+ _tmp |= (VAL) & (MASK); \
+ (PC)->pc_flags[(RI)] = _tmp; \
+} while (0)
+
+#define P4_PCPU_GET_RUNCOUNT(PC,RI) P4_PCPU_GET_FLAGS(PC,RI,0x0F)
+#define P4_PCPU_SET_RUNCOUNT(PC,RI,V) P4_PCPU_SET_FLAGS(PC,RI,0x0F,V)
+
+#define P4_PCPU_GET_CFGCOUNT(PC,RI) (P4_PCPU_GET_FLAGS(PC,RI,0xF0) >> 4)
+#define P4_PCPU_SET_CFGCOUNT(PC,RI,C) P4_PCPU_SET_FLAGS(PC,RI,0xF0,((C) <<4))
+
+/* ESCR row disposition */
+static int p4_escrdisp[P4_NESCR];
+
+#define P4_ESCR_ROW_DISP_IS_THREAD(E) (p4_escrdisp[(E)] > 0)
+#define P4_ESCR_ROW_DISP_IS_STANDALONE(E) (p4_escrdisp[(E)] < 0)
+#define P4_ESCR_ROW_DISP_IS_FREE(E) (p4_escrdisp[(E)] == 0)
+
+#define P4_ESCR_MARK_ROW_STANDALONE(E) do { \
+ KASSERT(p4_escrdisp[(E)] <= 0, ("[p4,%d] row disposition error",\
+ __LINE__)); \
+ atomic_add_int(&p4_escrdisp[(E)], -1); \
+ KASSERT(p4_escrdisp[(E)] >= (-mp_ncpus), ("[p4,%d] row " \
+ "disposition error", __LINE__)); \
+} while (0)
+
+#define P4_ESCR_UNMARK_ROW_STANDALONE(E) do { \
+ atomic_add_int(&p4_escrdisp[(E)], 1); \
+ KASSERT(p4_escrdisp[(E)] <= 0, ("[p4,%d] row disposition error",\
+ __LINE__)); \
+} while (0)
+
+#define P4_ESCR_MARK_ROW_THREAD(E) do { \
+ KASSERT(p4_escrdisp[(E)] >= 0, ("[p4,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&p4_escrdisp[(E)], 1); \
+} while (0)
+
+#define P4_ESCR_UNMARK_ROW_THREAD(E) do { \
+ atomic_add_int(&p4_escrdisp[(E)], -1); \
+ KASSERT(p4_escrdisp[(E)] >= 0, ("[p4,%d] row disposition error",\
+ __LINE__)); \
+} while (0)
+
+#define P4_PMC_IS_STOPPED(cccr) ((rdmsr(cccr) & P4_CCCR_ENABLE) == 0)
+
+#define P4_TO_PHYSICAL_CPU(cpu) (pmc_cpu_is_logical(cpu) ? \
+ ((cpu) & ~1) : (cpu))
+
+#define P4_CCCR_Tx_MASK (~(P4_CCCR_OVF_PMI_T0|P4_CCCR_OVF_PMI_T1| \
+ P4_CCCR_ENABLE|P4_CCCR_OVF))
+#define P4_ESCR_Tx_MASK (~(P4_ESCR_T0_OS|P4_ESCR_T0_USR|P4_ESCR_T1_OS| \
+ P4_ESCR_T1_USR))
+
+/*
+ * support routines
+ */
+
+static struct p4_event_descr *
+p4_find_event(enum pmc_event ev)
+{
+ int n;
+
+ for (n = 0; n < P4_NEVENTS; n++)
+ if (p4_events[n].pm_event == ev)
+ break;
+ if (n == P4_NEVENTS)
+ return NULL;
+ return &p4_events[n];
+}
+
+/*
+ * Initialize per-cpu state
+ */
+
+static int
+p4_init(int cpu)
+{
+ int n, phycpu;
+ char *pescr;
+ struct p4_cpu *pcs;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0, "p4-init cpu=%d logical=%d", cpu,
+ pmc_cpu_is_logical(cpu) != 0);
+
+ /*
+ * A 'logical' CPU shares its per-cpu state with its physical
+ * CPU. The physical CPU would have been initialized prior to
+ * the initialization for this cpu.
+ */
+
+ if (pmc_cpu_is_logical(cpu)) {
+ phycpu = P4_TO_PHYSICAL_CPU(cpu);
+ pcs = (struct p4_cpu *) pmc_pcpu[phycpu];
+ PMCDBG(MDP,INI,1, "p4-init cpu=%d phycpu=%d pcs=%p",
+ cpu, phycpu, pcs);
+ KASSERT(pcs,
+ ("[p4,%d] Null Per-Cpu state cpu=%d phycpu=%d", __LINE__,
+ cpu, phycpu));
+ if (pcs == NULL) /* decline to init */
+ return ENXIO;
+ p4_system_has_htt = 1;
+ pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
+ return 0;
+ }
+
+ MALLOC(pcs, struct p4_cpu *, sizeof(struct p4_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ if (pcs == NULL)
+ return ENOMEM;
+ phw = pcs->pc_p4pmcs;
+
+ for (n = 0; n < P4_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pcs->pc_hwpmcs[n] = phw;
+ }
+
+ /* Mark the TSC as shareable */
+ pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
+
+ pescr = pcs->pc_escrs;
+ for (n = 0; n < P4_NESCR; n++)
+ *pescr++ = P4_INVALID_PMC_INDEX;
+ pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
+
+ mtx_init(&pcs->pc_mtx, "p4-pcpu", "pmc", MTX_SPIN);
+
+ return 0;
+}
+
+/*
+ * Destroy per-cpu state.
+ */
+
+static int
+p4_cleanup(int cpu)
+{
+ struct p4_cpu *pcs;
+
+ PMCDBG(MDP,INI,0, "p4-cleanup cpu=%d", cpu);
+
+ /*
+ * Free up the per-cpu structure for the given cpu if
+ * allocated, and if this is a physical CPU.
+ */
+
+ if ((pcs = (struct p4_cpu *) pmc_pcpu[cpu]) != NULL &&
+ !pmc_cpu_is_logical(cpu)) {
+ mtx_destroy(&pcs->pc_mtx);
+ FREE(pcs, M_PMC);
+ }
+
+ pmc_pcpu[cpu] = NULL;
+
+ return 0;
+}
+
+/*
+ * Context switch in.
+ */
+
+static int
+p4_switch_in(struct pmc_cpu *pc)
+{
+ (void) pc;
+ /* enable the RDPMC instruction */
+ load_cr4(rcr4() | CR4_PCE);
+ return 0;
+}
+
+/*
+ * Context switch out.
+ */
+
+static int
+p4_switch_out(struct pmc_cpu *pc)
+{
+ (void) pc;
+ /* disallow RDPMC instruction */
+ load_cr4(rcr4() & ~CR4_PCE);
+ return 0;
+}
+
+/*
+ * Read a PMC
+ */
+
+static int
+p4_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ enum pmc_mode mode;
+ struct p4pmc_descr *pd;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ struct pmc_hw *phw;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+ pd = &p4_pmcdesc[ri];
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ mode = pm->pm_mode;
+
+ PMCDBG(MDP,REA,1, "p4-read cpu=%d ri=%d mode=%d", cpu, ri, mode);
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
+ KASSERT(PMC_IS_COUNTING_MODE(mode),
+ ("[p4,%d] TSC counter in non-counting mode", __LINE__));
+ *v = rdtsc();
+ PMCDBG(MDP,REA,2, "p4-read -> %jx", *v);
+ return 0;
+ }
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class));
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode))
+ tmp = rdmsr(p4_pmcdesc[ri].pm_pmc_msr);
+ else
+ tmp = P4_PCPU_PMC_VALUE(pc,ri,cpu);
+
+ if (PMC_IS_SAMPLING_MODE(mode))
+ *v = -(tmp + 1); /* undo transformation */
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,2, "p4-read -> %jx", *v);
+ return 0;
+}
+
+/*
+ * Write a PMC
+ */
+
+static int
+p4_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ const struct pmc_hw *phw;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p4_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,WRI,1, "p4-write cpu=%d ri=%d mode=%d v=%jx", cpu, ri,
+ pm->pm_mode, v);
+
+ /*
+ * The P4's TSC register is writeable, but we don't allow a
+ * write as changing the TSC's value could interfere with
+ * other parts of the system.
+ */
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ /*
+ * write the PMC value to the register/saved value: for
+ * sampling mode PMCs, the value to be programmed into the PMC
+ * counter is -(C+1) where 'C' is the requested sample rate.
+ */
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ v = -(v + 1);
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode))
+ wrmsr(pd->pm_pmc_msr, v);
+ else
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) = v;
+
+ return 0;
+}
+
+/*
+ * Configure a PMC 'pm' on the given CPU and row-index.
+ *
+ * 'pm' may be NULL to indicate de-configuration.
+ *
+ * On HTT systems, a PMC may get configured twice, once for each
+ * "logical" CPU.
+ */
+
+static int
+p4_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+ struct p4_cpu *pc;
+ int cfgcount;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL ||
+ (p4_system_has_htt && phw->phw_pmc == pm),
+ ("[p4,%d] hwpmc not unconfigured before re-config", __LINE__));
+
+ mtx_lock_spin(&pc->pc_mtx);
+ cfgcount = P4_PCPU_GET_CFGCOUNT(pc,ri);
+
+ KASSERT(cfgcount >= 0 || cfgcount <= 2,
+ ("[p4,%d] illegal cfgcount cfg=%d on cpu=%d ri=%d", __LINE__,
+ cfgcount, cpu, ri));
+
+ KASSERT(cfgcount == 0 || phw->phw_pmc,
+ ("[p4,%d] cpu=%d ri=%d pmc configured with zero cfg count",
+ __LINE__, cpu, ri));
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d cfg=%d pm=%p", cpu, ri, cfgcount,
+ pm);
+
+ if (pm) { /* config */
+ if (cfgcount == 0)
+ phw->phw_pmc = pm;
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[p4,%d] cpu=%d ri=%d config %p != hw %p",
+ __LINE__, cpu, ri, pm, phw->phw_pmc));
+
+ cfgcount++;
+ } else { /* unconfig */
+ --cfgcount;
+ if (cfgcount == 0)
+ phw->phw_pmc = NULL;
+ }
+
+ KASSERT(cfgcount >= 0 || cfgcount <= 2,
+ ("[p4,%d] illegal runcount cfg=%d on cpu=%d ri=%d", __LINE__,
+ cfgcount, cpu, ri));
+
+ P4_PCPU_SET_CFGCOUNT(pc,ri,cfgcount);
+
+ mtx_unlock_spin(&pc->pc_mtx);
+
+ return 0;
+}
+
+/*
+ * Allocate a PMC.
+ *
+ * The allocation strategy differs between HTT and non-HTT systems.
+ *
+ * The non-HTT case:
+ * - Given the desired event and the PMC row-index, lookup the
+ * list of valid ESCRs for the event.
+ * - For each valid ESCR:
+ * - Check if the ESCR is free and the ESCR row is in a compatible
+ * mode (i.e., system or process))
+ * - Check if the ESCR is usable with a P4 PMC at the desired row-index.
+ * If everything matches, we determine the appropriate bit values for the
+ * ESCR and CCCR registers.
+ *
+ * The HTT case:
+ *
+ * - Process mode PMCs require special care. The FreeBSD scheduler could
+ * schedule any two processes on the same physical CPU. We need to ensure
+ * that a given PMC row-index is never allocated to two different
+ * PMCs owned by different user-processes.
+ * This is ensured by always allocating a PMC from a 'FREE' PMC row
+ * if the system has HTT active.
+ * - A similar check needs to be done for ESCRs; we do not want two PMCs
+ * using the same ESCR to be scheduled at the same time. Thus ESCR
+ * allocation is also restricted to FREE rows if the system has HTT
+ * enabled.
+ * - Thirdly, some events are 'thread-independent' terminology, i.e.,
+ * the PMC hardware cannot distinguish between events caused by
+ * different logical CPUs. This makes it impossible to assign events
+ * to a given thread of execution. If the system has HTT enabled,
+ * these events are not allowed for process-mode PMCs.
+ */
+
+static int
+p4_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ int found, n, m;
+ uint32_t caps, cccrvalue, escrvalue, tflags;
+ enum pmc_p4escr escr;
+ struct p4_cpu *pc;
+ struct p4_event_descr *pevent;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index value %d", __LINE__, ri));
+
+ pd = &p4_pmcdesc[ri];
+
+ PMCDBG(MDP,ALL,1, "p4-allocate ri=%d class=%d pmccaps=0x%x "
+ "reqcaps=0x%x\n", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
+ pm->pm_caps);
+
+ /* check class */
+ if (pd->pm_descr.pd_class != pm->pm_class)
+ return EINVAL;
+
+ /* check requested capabilities */
+ caps = a->pm_caps;
+ if ((pd->pm_descr.pd_caps & caps) != caps)
+ return EPERM;
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
+ /* TSC's are always allocated in system-wide counting mode */
+ if (a->pm_ev != PMC_EV_TSC_TSC ||
+ a->pm_mode != PMC_MODE_SC)
+ return EINVAL;
+ return 0;
+ }
+
+ /*
+ * If the system has HTT enabled, and the desired allocation
+ * mode is process-private, and the PMC row disposition is not
+ * FREE (0), decline the allocation.
+ */
+
+ if (p4_system_has_htt &&
+ PMC_IS_VIRTUAL_MODE(pm->pm_mode) &&
+ pmc_getrowdisp(ri) != 0)
+ return EBUSY;
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] unknown PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ if (pm->pm_event < PMC_EV_P4_FIRST ||
+ pm->pm_event > PMC_EV_P4_LAST)
+ return EINVAL;
+
+ if ((pevent = p4_find_event(pm->pm_event)) == NULL)
+ return ESRCH;
+
+ PMCDBG(MDP,ALL,2, "pevent={ev=%d,escrsel=0x%x,cccrsel=0x%x,isti=%d}",
+ pevent->pm_event, pevent->pm_escr_eventselect,
+ pevent->pm_cccr_select, pevent->pm_is_ti_event);
+
+ /*
+ * Some PMC events are 'thread independent'and therefore
+ * cannot be used for process-private modes if HTT is being
+ * used.
+ */
+
+ if (P4_EVENT_IS_TI(pevent) &&
+ PMC_IS_VIRTUAL_MODE(pm->pm_mode) && p4_system_has_htt)
+ return EINVAL;
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+
+ found = 0;
+
+ /* look for a suitable ESCR for this event */
+ for (n = 0; n < P4_MAX_ESCR_PER_EVENT && !found; n++) {
+ if ((escr = pevent->pm_escrs[n]) == P4_ESCR_NONE)
+ break; /* out of ESCRs */
+ /*
+ * Check ESCR row disposition.
+ *
+ * If the request is for a system-mode PMC, then the
+ * ESCR row should not be in process-virtual mode, and
+ * should also be free on the current CPU.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ if (P4_ESCR_ROW_DISP_IS_THREAD(escr) ||
+ pc->pc_escrs[escr] != P4_INVALID_PMC_INDEX)
+ continue;
+ }
+
+ /*
+ * If the request is for a process-virtual PMC, and if
+ * HTT is not enabled, we can use an ESCR row that is
+ * either FREE or already in process mode.
+ *
+ * If HTT is enabled, then we need to ensure that a
+ * given ESCR is never allocated to two PMCS that
+ * could run simultaneously on the two logical CPUs of
+ * a CPU package. We ensure this be only allocating
+ * ESCRs from rows marked as 'FREE'.
+ */
+
+ if (PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+ if (p4_system_has_htt) {
+ if (!P4_ESCR_ROW_DISP_IS_FREE(escr))
+ continue;
+ } else
+ if (P4_ESCR_ROW_DISP_IS_STANDALONE(escr))
+ continue;
+ }
+
+ /*
+ * We found a suitable ESCR for this event. Now check if
+ * this escr can work with the PMC at row-index 'ri'.
+ */
+
+ for (m = 0; m < P4_MAX_PMC_PER_ESCR; m++)
+ if (p4_escrs[escr].pm_pmcs[m] == pd->pm_pmcnum) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found == 0)
+ return ESRCH;
+
+ KASSERT((int) escr >= 0 && escr < P4_NESCR,
+ ("[p4,%d] illegal ESCR value %d", __LINE__, escr));
+
+ /* mark ESCR row mode */
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ pc->pc_escrs[escr] = ri; /* mark ESCR as in use on this cpu */
+ P4_ESCR_MARK_ROW_STANDALONE(escr);
+ } else {
+ KASSERT(pc->pc_escrs[escr] == P4_INVALID_PMC_INDEX,
+ ("[p4,%d] escr[%d] already in use", __LINE__, escr));
+ P4_ESCR_MARK_ROW_THREAD(escr);
+ }
+
+ pm->pm_md.pm_p4.pm_p4_escrmsr = p4_escrs[escr].pm_escr_msr;
+ pm->pm_md.pm_p4.pm_p4_escr = escr;
+
+ cccrvalue = P4_CCCR_TO_ESCR_SELECT(pevent->pm_cccr_select);
+ escrvalue = P4_ESCR_TO_EVENT_SELECT(pevent->pm_escr_eventselect);
+
+ /* CCCR fields */
+ if (caps & PMC_CAP_THRESHOLD)
+ cccrvalue |= (a->pm_p4_cccrconfig & P4_CCCR_THRESHOLD_MASK) |
+ P4_CCCR_COMPARE;
+
+ if (caps & PMC_CAP_EDGE)
+ cccrvalue |= P4_CCCR_EDGE;
+
+ if (caps & PMC_CAP_INVERT)
+ cccrvalue |= P4_CCCR_COMPLEMENT;
+
+ if (p4_system_has_htt)
+ cccrvalue |= a->pm_p4_cccrconfig & P4_CCCR_ACTIVE_THREAD_MASK;
+ else /* no HTT; thread field should be '11b' */
+ cccrvalue |= P4_CCCR_TO_ACTIVE_THREAD(0x3);
+
+ if (caps & PMC_CAP_CASCADE)
+ cccrvalue |= P4_CCCR_CASCADE;
+
+ /* On HTT systems the PMI T0 field may get moved to T1 at pmc start */
+ if (caps & PMC_CAP_INTERRUPT)
+ cccrvalue |= P4_CCCR_OVF_PMI_T0;
+
+ /* ESCR fields */
+ if (caps & PMC_CAP_QUALIFIER)
+ escrvalue |= a->pm_p4_escrconfig & P4_ESCR_EVENT_MASK_MASK;
+ if (caps & PMC_CAP_TAGGING)
+ escrvalue |= (a->pm_p4_escrconfig & P4_ESCR_TAG_VALUE_MASK) |
+ P4_ESCR_TAG_ENABLE;
+ if (caps & PMC_CAP_QUALIFIER)
+ escrvalue |= (a->pm_p4_escrconfig & P4_ESCR_EVENT_MASK_MASK);
+
+ /* HTT: T0_{OS,USR} bits may get moved to T1 at pmc start */
+ tflags = 0;
+ if (caps & PMC_CAP_SYSTEM)
+ tflags |= P4_ESCR_T0_OS;
+ if (caps & PMC_CAP_USER)
+ tflags |= P4_ESCR_T0_USR;
+ if (tflags == 0)
+ tflags = (P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+ escrvalue |= tflags;
+
+ pm->pm_md.pm_p4.pm_p4_cccrvalue = cccrvalue;
+ pm->pm_md.pm_p4.pm_p4_escrvalue = escrvalue;
+
+ PMCDBG(MDP,ALL,2, "p4-allocate cccrsel=0x%x cccrval=0x%x "
+ "escr=%d escrmsr=0x%x escrval=0x%x\n", pevent->pm_cccr_select,
+ cccrvalue, escr, pm->pm_md.pm_p4.pm_p4_escrmsr, escrvalue);
+
+ return 0;
+}
+
+/*
+ * release a PMC.
+ */
+
+static int
+p4_release_pmc(int cpu, int ri, struct pmc *pm)
+{
+ enum pmc_p4escr escr;
+ struct pmc_hw *phw;
+ struct p4_cpu *pc;
+
+ if (p4_pmcdesc[ri].pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ escr = pm->pm_md.pm_p4.pm_p4_escr;
+
+ PMCDBG(MDP,REL,1, "p4-release cpu=%d ri=%d escr=%d", cpu, ri, escr);
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[p4,%d] releasing configured PMC ri=%d", __LINE__, ri));
+
+ P4_ESCR_UNMARK_ROW_STANDALONE(escr);
+ KASSERT(pc->pc_escrs[escr] == ri,
+ ("[p4,%d] escr[%d] not allocated to ri %d", __LINE__,
+ escr, ri));
+ pc->pc_escrs[escr] = P4_INVALID_PMC_INDEX; /* mark as free */
+ } else
+ P4_ESCR_UNMARK_ROW_THREAD(escr);
+
+ return 0;
+}
+
+/*
+ * Start a PMC
+ */
+
+static int
+p4_start_pmc(int cpu, int ri)
+{
+ int rc;
+ uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ struct pmc_hw *phw;
+ struct p4pmc_descr *pd;
+#if DEBUG
+ pmc_value_t tmp;
+#endif
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p4_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] starting cpu%d,pmc%d with null pmc", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,STA,1, "p4-start cpu=%d ri=%d", cpu, ri);
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) /* TSC are always on */
+ return 0;
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] wrong PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ /* retrieve the desired CCCR/ESCR values from the PMC */
+ cccrvalue = pm->pm_md.pm_p4.pm_p4_cccrvalue;
+ escrvalue = pm->pm_md.pm_p4.pm_p4_escrvalue;
+ escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr;
+
+ /* extract and zero the logical processor selection bits */
+ cccrtbits = cccrvalue & P4_CCCR_OVF_PMI_T0;
+ escrtbits = escrvalue & (P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+ cccrvalue &= ~P4_CCCR_OVF_PMI_T0;
+ escrvalue &= ~(P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+
+ if (pmc_cpu_is_logical(cpu)) { /* shift T0 bits to T1 position */
+ cccrtbits <<= 1;
+ escrtbits >>= 2;
+ }
+
+ /* start system mode PMCs directly */
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ wrmsr(escrmsr, escrvalue | escrtbits);
+ wrmsr(pd->pm_cccr_msr, cccrvalue | cccrtbits | P4_CCCR_ENABLE);
+ return 0;
+ }
+
+ /*
+ * Thread mode PMCs
+ *
+ * On HTT machines, the same PMC could be scheduled on the
+ * same physical CPU twice (once for each logical CPU), for
+ * example, if two threads of a multi-threaded process get
+ * scheduled on the same CPU.
+ *
+ */
+
+ mtx_lock_spin(&pc->pc_mtx);
+
+ rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
+ KASSERT(rc == 0 || rc == 1,
+ ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri,
+ rc));
+
+ if (rc == 0) { /* 1st CPU and the non-HTT case */
+ /*
+ * Enable the correct bits for this CPU.
+ */
+ escrvalue |= escrtbits;
+ cccrvalue |= cccrtbits | P4_CCCR_ENABLE;
+
+ KASSERT(P4_PMC_IS_STOPPED(pd->pm_cccr_msr),
+ ("[p4,%d] cpu=%d ri=%d cccr=0x%x not stopped", __LINE__,
+ cpu, ri, pd->pm_cccr_msr));
+
+ /* write out the low 40 bits of the saved value to hardware */
+ wrmsr(pd->pm_pmc_msr,
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) & P4_PERFCTR_MASK);
+ P4_PCPU_SAVED_VALUE(pc,ri,cpu) = P4_PCPU_PMC_VALUE(pc,ri,cpu) &
+ P4_PERFCTR_MASK;
+
+ /* Program the ESCR and CCCR and start the PMC */
+ wrmsr(escrmsr, escrvalue);
+ wrmsr(pd->pm_cccr_msr, cccrvalue);
+
+ PMCDBG(MDP,STA,2,"p4-start cpu=%d rc=%d ri=%d escr=%d "
+ "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x\n", cpu, rc,
+ ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr, escrvalue,
+ cccrvalue);
+
+ } else if (rc == 1) { /* 2nd CPU */
+
+ /*
+ * Retrieve the CCCR and ESCR values from their MSRs,
+ * and turn on the addition T[0/1] bits for the 2nd
+ * CPU. Remember the difference between the saved
+ * value from the previous 'write()' operation to this
+ * (PMC,CPU) pair and the current PMC reading; this is
+ * used at PMCSTOP time to derive the correct
+ * increment.
+ */
+
+ cccrvalue = rdmsr(pd->pm_cccr_msr);
+
+ KASSERT((cccrvalue & P4_CCCR_Tx_MASK) ==
+ (pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK),
+ ("[p4,%d] cpu=%d rc=%d ri=%d CCCR bits 0x%x PMC 0x%x",
+ __LINE__, cpu, rc, ri, cccrvalue & P4_CCCR_Tx_MASK,
+ pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK));
+ KASSERT(cccrvalue & P4_CCCR_ENABLE,
+ ("[p4,%d] 2nd cpu rc=%d cpu=%d ri=%d not running",
+ __LINE__, rc, cpu, ri));
+ KASSERT((cccrvalue & cccrtbits) == 0,
+ ("[p4,%d] CCCR T0/T1 mismatch rc=%d cpu=%d ri=%d"
+ "cccrvalue=0x%x tbits=0x%x", __LINE__, rc, cpu, ri,
+ cccrvalue, cccrtbits));
+
+ /* stop PMC */
+ wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE);
+
+ escrvalue = rdmsr(escrmsr);
+
+ KASSERT((escrvalue & P4_ESCR_Tx_MASK) ==
+ (pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK),
+ ("[p4,%d] Extra ESCR bits cpu=%d rc=%d ri=%d "
+ "escr=0x%x pm=0x%x", __LINE__, cpu, rc, ri,
+ escrvalue & P4_ESCR_Tx_MASK,
+ pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK));
+
+ KASSERT((escrvalue & escrtbits) == 0,
+ ("[p4,%d] ESCR T0/T1 mismatch rc=%d cpu=%d ri=%d "
+ "escrmsr=0x%x escrvalue=0x%x tbits=0x%x", __LINE__,
+ rc, cpu, ri, escrmsr, escrvalue, escrtbits));
+
+ /* read current value and save it */
+ P4_PCPU_SAVED_VALUE(pc,ri,cpu) =
+ rdmsr(pd->pm_pmc_msr) & P4_PERFCTR_MASK;
+
+ /*
+ * program the new bits into the ESCR and CCCR,
+ * starting the PMC in the process.
+ */
+
+ escrvalue |= escrtbits;
+ cccrvalue |= cccrvalue;
+
+ wrmsr(escrmsr, escrvalue);
+ wrmsr(pd->pm_cccr_msr, cccrvalue);
+
+ PMCDBG(MDP,STA,2,"p4-start/2 cpu=%d rc=%d ri=%d escr=%d"
+ "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x pmc=0x%jx",
+ cpu, rc, ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr,
+ escrvalue, cccrvalue, tmp);
+
+ } else
+ panic("invalid runcount %d\n", rc);
+
+ ++rc;
+ P4_PCPU_SET_RUNCOUNT(pc,ri,rc);
+
+ mtx_unlock_spin(&pc->pc_mtx);
+
+ return 0;
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+p4_stop_pmc(int cpu, int ri)
+{
+ int rc;
+ uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ struct pmc_hw *phw;
+ struct p4pmc_descr *pd;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row index %d", __LINE__, ri));
+
+ pd = &p4_pmcdesc[ri];
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(phw != NULL,
+ ("[p4,%d] null phw for cpu%d, ri%d", __LINE__, cpu, ri));
+
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] null pmc for cpu%d, ri%d", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STO,1, "p4-stop cpu=%d ri=%d", cpu, ri);
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ wrmsr(pd->pm_cccr_msr,
+ pm->pm_md.pm_p4.pm_p4_cccrvalue & ~P4_CCCR_ENABLE);
+ return 0;
+ }
+
+ /*
+ * Thread mode PMCs.
+ *
+ * On HTT machines, this PMC may be in use by two threads
+ * running on two logical CPUS. Thus we look at the
+ * 'pm_runcount' field and only turn off the appropriate TO/T1
+ * bits (and keep the PMC running).
+ *
+ * The 'pc_saved' field has the 'diff' between the value in
+ * the hardware register at PMCSTART time and the nominal
+ * start value for the PMC. This diff is added to the current
+ * PMC reading to derived the correct (absolute) return value.
+ */
+
+ /* bits to mask */
+ cccrtbits = P4_CCCR_OVF_PMI_T0;
+ escrtbits = P4_ESCR_T0_OS | P4_ESCR_T0_USR;
+ if (pmc_cpu_is_logical(cpu)) {
+ cccrtbits <<= 1;
+ escrtbits >>= 2;
+ }
+
+ mtx_lock_spin(&pc->pc_mtx);
+
+ rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
+
+ KASSERT(rc == 2 || rc == 1,
+ ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri,
+ rc));
+
+ --rc;
+
+ P4_PCPU_SET_RUNCOUNT(pc,ri,rc);
+
+ /* Stop this PMC */
+ cccrvalue = rdmsr(pd->pm_cccr_msr);
+ wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE);
+
+ escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr;
+ escrvalue = rdmsr(escrmsr);
+
+ /* get the current PMC reading */
+ tmp = rdmsr(pd->pm_pmc_msr) & P4_PERFCTR_MASK;
+
+ if (rc == 1) { /* need to keep the PMC running */
+
+ KASSERT(escrvalue & escrtbits,
+ ("[p4,%d] ESCR T0/T1 mismatch cpu=%d ri=%d escrmsr=0x%x "
+ "escrvalue=0x%x tbits=0x%x", __LINE__, cpu, ri, escrmsr,
+ escrvalue, escrtbits));
+
+ KASSERT(PMC_IS_COUNTING_MODE(pm->pm_mode) ||
+ (cccrvalue & cccrtbits),
+ ("[p4,%d] CCCR T0/T1 mismatch cpu=%d ri=%d cccrvalue=0x%x "
+ "tbits=0x%x", __LINE__, cpu, ri, cccrvalue, cccrtbits));
+
+ escrvalue &= ~escrtbits;
+ cccrvalue &= ~cccrtbits;
+
+ wrmsr(escrmsr, escrvalue);
+ wrmsr(pd->pm_cccr_msr, cccrvalue);
+
+ }
+
+ PMCDBG(MDP,STO,2, "p4-stop/2 cpu=%d rc=%d ri=%d escrmsr=0x%x escrval=0x%x "
+ "cccrval=0x%x", cpu, rc, ri, escrmsr, escrvalue, cccrvalue);
+
+ /* get the incremental count from this context switch */
+ tmp -= P4_PCPU_SAVED_VALUE(pc,ri,cpu);
+ if ((int64_t) tmp < 0) /* counter wrap-around */
+ tmp = -tmp + 1;
+
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) += tmp;
+
+ mtx_unlock_spin(&pc->pc_mtx);
+ return 0;
+}
+
+/*
+ * Handle an interrupt.
+ */
+
+static int
+p4_intr(int cpu, uintptr_t eip)
+{
+ (void) cpu;
+ (void) eip;
+
+ return 0;
+}
+
+/*
+ * Describe a CPU's PMC state.
+ */
+
+static int
+p4_describe(int cpu, int ri, struct pmc_info *pi,
+ struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ struct pmc_hw *phw;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] row-index %d out of range", __LINE__, ri));
+
+ PMCDBG(MDP,OPS,1,"p4-describe cpu=%d ri=%d", cpu, ri);
+
+ if (pmc_cpu_is_logical(cpu))
+ return EINVAL;
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &p4_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return error;
+
+ pi->pm_class = pd->pm_descr.pd_class;
+ pi->pm_caps = pd->pm_descr.pd_caps;
+ pi->pm_width = pd->pm_descr.pd_width;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Get MSR# for use with RDPMC.
+ */
+
+static int
+p4_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = p4_pmcdesc[ri].pm_pmc_msr;
+ return 0;
+}
+
+
+int
+pmc_initialize_p4(struct pmc_mdep *pmc_mdep)
+{
+ struct p4_event_descr *pe;
+
+ KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
+ ("[p4,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "p4-initialize");
+
+ switch (pmc_mdep->pmd_cputype) {
+ case PMC_CPU_INTEL_PIV:
+
+ pmc_mdep->pmd_npmc = P4_NPMCS;
+ pmc_mdep->pmd_classes[1] = PMC_CLASS_P4;
+ pmc_mdep->pmd_nclasspmcs[1] = 18;
+
+ pmc_mdep->pmd_init = p4_init;
+ pmc_mdep->pmd_cleanup = p4_cleanup;
+ pmc_mdep->pmd_switch_in = p4_switch_in;
+ pmc_mdep->pmd_switch_out = p4_switch_out;
+ pmc_mdep->pmd_read_pmc = p4_read_pmc;
+ pmc_mdep->pmd_write_pmc = p4_write_pmc;
+ pmc_mdep->pmd_config_pmc = p4_config_pmc;
+ pmc_mdep->pmd_allocate_pmc = p4_allocate_pmc;
+ pmc_mdep->pmd_release_pmc = p4_release_pmc;
+ pmc_mdep->pmd_start_pmc = p4_start_pmc;
+ pmc_mdep->pmd_stop_pmc = p4_stop_pmc;
+ pmc_mdep->pmd_intr = p4_intr;
+ pmc_mdep->pmd_describe = p4_describe;
+ pmc_mdep->pmd_get_msr = p4_get_msr; /* i386 */
+
+ /* model specific munging */
+ if ((cpu_id & 0xFFF) < 0xF27) {
+
+ /*
+ * On P4 and Xeon with CPUID < (Family 15,
+ * Model 2, Stepping 7), only one ESCR is
+ * available for the IOQ_ALLOCATION event.
+ */
+
+ pe = p4_find_event(PMC_EV_P4_IOQ_ALLOCATION);
+ pe->pm_escrs[1] = P4_ESCR_NONE;
+ }
+
+ break;
+
+ default:
+ KASSERT(0,("[p4,%d] Unknown CPU type", __LINE__));
+ return ENOSYS;
+ }
+
+ return 0;
+}
diff --git a/sys/dev/hwpmc/hwpmc_ppro.c b/sys/dev/hwpmc/hwpmc_ppro.c
new file mode 100644
index 0000000..3a289a5
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_ppro.c
@@ -0,0 +1,742 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * PENTIUM PRO SUPPORT
+ */
+
+struct p6pmc_descr {
+ struct pmc_descr pm_descr; /* common information */
+ uint32_t pm_pmc_msr;
+ uint32_t pm_evsel_msr;
+};
+
+static struct p6pmc_descr p6_pmcdesc[P6_NPMCS] = {
+
+ /* TSC */
+ {
+ .pm_descr =
+ {
+ .pd_name = "TSC",
+ .pd_class = PMC_CLASS_TSC,
+ .pd_caps = PMC_CAP_READ,
+ .pd_width = 64
+ },
+ .pm_pmc_msr = 0x10,
+ .pm_evsel_msr = ~0
+ },
+
+#define P6_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
+ PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
+ PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
+
+ /* PMC 0 */
+ {
+ .pm_descr =
+ {
+ .pd_name ="P6-0",
+ .pd_class = PMC_CLASS_P6,
+ .pd_caps = P6_PMC_CAPS,
+ .pd_width = 40
+ },
+ .pm_pmc_msr = P6_MSR_PERFCTR0,
+ .pm_evsel_msr = P6_MSR_EVSEL0
+ },
+
+ /* PMC 1 */
+ {
+ .pm_descr =
+ {
+ .pd_name ="P6-1",
+ .pd_class = PMC_CLASS_P6,
+ .pd_caps = P6_PMC_CAPS,
+ .pd_width = 40
+ },
+ .pm_pmc_msr = P6_MSR_PERFCTR1,
+ .pm_evsel_msr = P6_MSR_EVSEL1
+ }
+};
+
+static enum pmc_cputype p6_cputype;
+
+/*
+ * P6 Event descriptor
+ */
+
+struct p6_event_descr {
+ const enum pmc_event pm_event;
+ uint32_t pm_evsel;
+ uint32_t pm_flags;
+ uint32_t pm_unitmask;
+};
+
+static const struct p6_event_descr p6_events[] = {
+
+#define P6_EVDESCR(NAME, EVSEL, FLAGS, UMASK) \
+ { \
+ .pm_event = PMC_EV_P6_##NAME, \
+ .pm_evsel = (EVSEL), \
+ .pm_flags = (FLAGS), \
+ .pm_unitmask = (UMASK) \
+ }
+
+#define P6F_P6 (1 << PMC_CPU_INTEL_P6)
+#define P6F_CL (1 << PMC_CPU_INTEL_CL)
+#define P6F_PII (1 << PMC_CPU_INTEL_PII)
+#define P6F_PIII (1 << PMC_CPU_INTEL_PIII)
+#define P6F_PM (1 << PMC_CPU_INTEL_PM)
+#define P6F_CTR0 0x0001
+#define P6F_CTR1 0x0002
+#define P6F_ALL_CPUS (P6F_P6 | P6F_PII | P6F_CL | P6F_PIII | P6F_PM)
+#define P6F_ALL_CTRS (P6F_CTR0 | P6F_CTR1)
+#define P6F_ALL (P6F_ALL_CPUS | P6F_ALL_CTRS)
+
+#define P6_EVENT_VALID_FOR_CPU(P,CPU) ((P)->pm_flags & (1 << (CPU)))
+#define P6_EVENT_VALID_FOR_CTR(P,CTR) ((P)->pm_flags & (1 << (CTR)))
+
+P6_EVDESCR(DATA_MEM_REFS, 0x43, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_LINES_IN, 0x45, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_M_LINES_IN, 0x46, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_M_LINES_OUT, 0x47, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_MISS_OUTSTANDING, 0x47, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_FETCH, 0x80, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_FETCH_MISS, 0x81, P6F_ALL, 0x00),
+P6_EVDESCR(ITLB_MISS, 0x85, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_MEM_STALL, 0x86, P6F_ALL, 0x00),
+P6_EVDESCR(ILD_STALL, 0x87, P6F_ALL, 0x00),
+P6_EVDESCR(L2_IFETCH, 0x28, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LD, 0x29, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_ST, 0x2A, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LINES_IN, 0x24, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LINES_OUT, 0x26, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_M_LINES_INM, 0x25, P6F_ALL, 0x00),
+P6_EVDESCR(L2_M_LINES_OUTM, 0x27, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_RQSTS, 0x2E, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_ADS, 0x21, P6F_ALL, 0x00),
+P6_EVDESCR(L2_DBUS_BUSY, 0x22, P6F_ALL, 0x00),
+P6_EVDESCR(L2_DBUS_BUSY_RD, 0x23, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_DRDY_CLOCKS, 0x62, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_LOCK_CLOCKS, 0x63, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_REQ_OUTSTANDING, 0x60, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_TRAN_BRD, 0x65, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_RFO, 0x66, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_WB, 0x67, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_IFETCH, 0x68, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_INVAL, 0x69, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_PWR, 0x6A, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_P, 0x6B, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_IO, 0x6C, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_DEF, 0x6D, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_BURST, 0x6E, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_ANY, 0x70, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_MEM, 0x6F, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_DATA_RCV, 0x64, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_BNR_DRV, 0x61, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_HIT_DRV, 0x7A, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_HITM_DRV, 0x7B, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_SNOOP_STALL, 0x7E, P6F_ALL, 0x00),
+P6_EVDESCR(FLOPS, 0xC1, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(FP_COMPS_OPS_EXE, 0x10, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(FP_ASSIST, 0x11, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(MUL, 0x12, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(DIV, 0x13, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(CYCLES_DIV_BUSY, 0x14, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(LD_BLOCKS, 0x03, P6F_ALL, 0x00),
+P6_EVDESCR(SB_DRAINS, 0x04, P6F_ALL, 0x00),
+P6_EVDESCR(MISALIGN_MEM_REF, 0x05, P6F_ALL, 0x00),
+P6_EVDESCR(EMON_KNI_PREF_DISPATCHED, 0x07, P6F_PIII | P6F_ALL_CTRS, 0x03),
+P6_EVDESCR(EMON_KNI_PREF_MISS, 0x4B, P6F_PIII | P6F_ALL_CTRS, 0x03),
+P6_EVDESCR(INST_RETIRED, 0xC0, P6F_ALL, 0x00),
+P6_EVDESCR(UOPS_RETIRED, 0xC2, P6F_ALL, 0x00),
+P6_EVDESCR(INST_DECODED, 0xD0, P6F_ALL, 0x00),
+P6_EVDESCR(EMON_KNI_INST_RETIRED, 0xD8, P6F_PIII | P6F_ALL_CTRS, 0x01),
+P6_EVDESCR(EMON_KNI_COMP_INST_RET, 0xD9, P6F_PIII | P6F_ALL_CTRS, 0x01),
+P6_EVDESCR(HW_INT_RX, 0xC8, P6F_ALL, 0x00),
+P6_EVDESCR(CYCLES_INT_MASKED, 0xC6, P6F_ALL, 0x00),
+P6_EVDESCR(CYCLES_INT_PENDING_AND_MASKED, 0xC7, P6F_ALL, 0x00),
+P6_EVDESCR(BR_INST_RETIRED, 0xC4, P6F_ALL, 0x00),
+P6_EVDESCR(BR_MISS_PRED_RETIRED, 0xC5, P6F_ALL, 0x00),
+P6_EVDESCR(BR_TAKEN_RETIRED, 0xC9, P6F_ALL, 0x00),
+P6_EVDESCR(BR_MISS_PRED_TAKEN_RET, 0xCA, P6F_ALL, 0x00),
+P6_EVDESCR(BR_INST_DECODED, 0xE0, P6F_ALL, 0x00),
+P6_EVDESCR(BTB_MISSES, 0xE2, P6F_ALL, 0x00),
+P6_EVDESCR(BR_BOGUS, 0xE4, P6F_ALL, 0x00),
+P6_EVDESCR(BACLEARS, 0xE6, P6F_ALL, 0x00),
+P6_EVDESCR(RESOURCE_STALLS, 0xA2, P6F_ALL, 0x00),
+P6_EVDESCR(PARTIAL_RAT_STALLS, 0xD2, P6F_ALL, 0x00),
+P6_EVDESCR(SEGMENT_REG_LOADS, 0x06, P6F_ALL, 0x00),
+P6_EVDESCR(CPU_CLK_UNHALTED, 0x79, P6F_ALL, 0x00),
+P6_EVDESCR(MMX_INSTR_EXEC, 0xB0,
+ P6F_ALL_CTRS | P6F_CL | P6F_PII, 0x00),
+P6_EVDESCR(MMX_SAT_INSTR_EXEC, 0xB1,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(MMX_UOPS_EXEC, 0xB2,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(MMX_INSTR_TYPE_EXEC, 0xB3,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x3F),
+P6_EVDESCR(FP_MMX_TRANS, 0xCC,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x01),
+P6_EVDESCR(MMX_ASSIST, 0xCD,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(MMX_INSTR_RET, 0xCE, P6F_ALL_CTRS | P6F_PII, 0x00),
+P6_EVDESCR(SEG_RENAME_STALLS, 0xD4,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(SEG_REG_RENAMES, 0xD5,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(RET_SEG_RENAMES, 0xD6,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(EMON_EST_TRANS, 0x58, P6F_ALL_CTRS | P6F_PM, 0x02),
+P6_EVDESCR(EMON_THERMAL_TRIP, 0x59, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_INST_EXEC, 0x88, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_MISSP_EXEC, 0x89, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_BAC_MISSP_EXEC, 0x8A, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CND_EXEC, 0x8B, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CND_MISSP_EXEC, 0x8C, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_EXEC, 0x8D, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_MISSP_EXEC, 0x8E, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_EXEC, 0x8F, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_MISSP_EXEC, 0x90, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_BAC_MISSP_EXEC, 0x91, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CALL_EXEC, 0x92, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CALL_MISSP_EXEC, 0x93, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_CALL_EXEC, 0x94, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SIMD_INSTR_RETIRED, 0xCE, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SYNCH_UOPS, 0xD3, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_ESP_UOPS, 0xD7, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_FUSED_UOPS_RET, 0xDA, P6F_ALL_CTRS | P6F_PM, 0x03),
+P6_EVDESCR(EMON_UNFUSION, 0xDB, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_PREF_RQSTS_UP, 0xF0, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_PREF_RQSTS_DN, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SSE_SSE2_INST_RETIRED, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x03),
+P6_EVDESCR(EMON_SSE_SSE2_COMP_INST_RETIRED, 0xD9, P6F_ALL_CTRS | P6F_PM, 0x03)
+
+#undef P6_EVDESCR
+};
+
+#define P6_NEVENTS (PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1)
+
+static const struct p6_event_descr *
+p6_find_event(enum pmc_event ev)
+{
+ int n;
+
+ for (n = 0; n < P6_NEVENTS; n++)
+ if (p6_events[n].pm_event == ev)
+ break;
+ if (n == P6_NEVENTS)
+ return NULL;
+ return &p6_events[n];
+}
+
+/*
+ * Per-CPU data structure for P6 class CPUs
+ *
+ * [common stuff]
+ * [3 struct pmc_hw pointers]
+ * [3 struct pmc_hw structures]
+ */
+
+struct p6_cpu {
+ struct pmc_cpu pc_common;
+ struct pmc_hw *pc_hwpmcs[P6_NPMCS];
+ struct pmc_hw pc_p6pmcs[P6_NPMCS];
+};
+
+static int
+p6_init(int cpu)
+{
+ int n;
+ struct p6_cpu *pcs;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] bad cpu %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0,"p6-init cpu=%d", cpu);
+
+ MALLOC(pcs, struct p6_cpu *, sizeof(struct p6_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ if (pcs == NULL)
+ return ENOMEM;
+
+ phw = pcs->pc_p6pmcs;
+
+ for (n = 0; n < P6_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pcs->pc_hwpmcs[n] = phw;
+ }
+
+ /* Mark the TSC as shareable */
+ pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
+
+ pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
+
+ return 0;
+}
+
+static int
+p6_cleanup(int cpu)
+{
+ struct pmc_cpu *pcs;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] bad cpu %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0,"p6-cleanup cpu=%d", cpu);
+
+ if ((pcs = pmc_pcpu[cpu]) != NULL)
+ FREE(pcs, M_PMC);
+ pmc_pcpu[cpu] = NULL;
+
+ return 0;
+}
+
+static int
+p6_switch_in(struct pmc_cpu *pc)
+{
+ (void) pc;
+ return 0;
+}
+
+static int
+p6_switch_out(struct pmc_cpu *pc)
+{
+ (void) pc;
+ return 0;
+}
+
+static int
+p6_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc_hw *phw;
+ struct pmc *pm;
+ struct p6pmc_descr *pd;
+ pmc_value_t tmp;
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ tmp = rdmsr(pd->pm_pmc_msr) & P6_PERFCTR_MASK;
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ *v = -tmp;
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,1, "p6-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
+ pd->pm_pmc_msr, *v);
+
+ return 0;
+}
+
+static int
+p6_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc_hw *phw;
+ struct pmc *pm;
+ struct p6pmc_descr *pd;
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ PMCDBG(MDP,WRI,1, "p6-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
+ pd->pm_pmc_msr, v);
+
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ v = -v;
+
+ wrmsr(pd->pm_pmc_msr, v & P6_PERFCTR_MASK);
+
+ return 0;
+}
+
+static int
+p6_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "p6-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+/*
+ * A pmc may be allocated to a given row index if:
+ * - the event is valid for this CPU
+ * - the event is valid for this counter index
+ */
+
+static int
+p6_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ uint32_t allowed_unitmask, caps, config, unitmask;
+ const struct p6pmc_descr *pd;
+ const struct p6_event_descr *pevent;
+ enum pmc_event ev;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p4,%d] illegal row-index value %d", __LINE__, ri));
+
+ pd = &p6_pmcdesc[ri];
+
+ PMCDBG(MDP,ALL,1, "p6-allocate ri=%d class=%d pmccaps=0x%x "
+ "reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
+ pm->pm_caps);
+
+ /* check class */
+ if (pd->pm_descr.pd_class != pm->pm_class)
+ return EINVAL;
+
+ /* check requested capabilities */
+ caps = a->pm_caps;
+ if ((pd->pm_descr.pd_caps & caps) != caps)
+ return EPERM;
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
+ /* TSC's are always allocated in system-wide counting mode */
+ if (a->pm_ev != PMC_EV_TSC_TSC ||
+ a->pm_mode != PMC_MODE_SC)
+ return EINVAL;
+ return 0;
+ }
+
+ /*
+ * P6 class events
+ */
+
+ ev = pm->pm_event;
+
+ if (ev < PMC_EV_P6_FIRST || ev > PMC_EV_P6_LAST)
+ return EINVAL;
+
+ if ((pevent = p6_find_event(ev)) == NULL)
+ return ESRCH;
+
+ if (!P6_EVENT_VALID_FOR_CPU(pevent, p6_cputype) ||
+ !P6_EVENT_VALID_FOR_CTR(pevent, (ri-1)))
+ return EINVAL;
+
+ /* For certain events, Pentium M differs from the stock P6 */
+ allowed_unitmask = 0;
+ if (p6_cputype == PMC_CPU_INTEL_PM) {
+ if (ev == PMC_EV_P6_L2_LD || ev == PMC_EV_P6_L2_LINES_IN ||
+ ev == PMC_EV_P6_L2_LINES_OUT)
+ allowed_unitmask = P6_EVSEL_TO_UMASK(0x3F);
+ else if (ev == PMC_EV_P6_L2_M_LINES_OUTM)
+ allowed_unitmask = P6_EVSEL_TO_UMASK(0x30);
+ } else
+ allowed_unitmask = P6_EVSEL_TO_UMASK(pevent->pm_unitmask);
+
+ unitmask = a->pm_p6_config & P6_EVSEL_UMASK_MASK;
+ if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
+ return EINVAL;
+
+ if (ev == PMC_EV_P6_MMX_UOPS_EXEC) /* hardcoded mask */
+ unitmask = P6_EVSEL_TO_UMASK(0x0F);
+
+ config = 0;
+
+ config |= P6_EVSEL_EVENT_SELECT(pevent->pm_evsel);
+
+ if (unitmask & (caps & PMC_CAP_QUALIFIER))
+ config |= unitmask;
+
+ if (caps & PMC_CAP_THRESHOLD)
+ config |= a->pm_p6_config & P6_EVSEL_CMASK_MASK;
+
+ /* set at least one of the 'usr' or 'os' caps */
+ if (caps & PMC_CAP_USER)
+ config |= P6_EVSEL_USR;
+ if (caps & PMC_CAP_SYSTEM)
+ config |= P6_EVSEL_OS;
+ if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
+ config |= (P6_EVSEL_USR|P6_EVSEL_OS);
+
+ if (caps & PMC_CAP_EDGE)
+ config |= P6_EVSEL_E;
+ if (caps & PMC_CAP_INVERT)
+ config |= P6_EVSEL_INV;
+ if (caps & PMC_CAP_INTERRUPT)
+ config |= P6_EVSEL_INT;
+
+ pm->pm_md.pm_p6.pm_p6_evsel = config;
+
+ PMCDBG(MDP,ALL,2, "p6-allocate config=0x%x", config);
+
+ return 0;
+}
+
+static int
+p6_release_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ (void) pm;
+
+ PMCDBG(MDP,REL,1, "p6-release cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[p6,%d] PHW pmc %p != pmc %p", __LINE__, phw->phw_pmc, pm));
+
+ return 0;
+}
+
+static int
+p6_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ const struct p6pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] starting cpu%d,ri%d with no pmc configured",
+ __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STA,1, "p6-start cpu=%d ri=%d", cpu, ri);
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0; /* TSC are always running */
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P6,
+ ("[p6,%d] unknown PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ config = pm->pm_md.pm_p6.pm_p6_evsel;
+
+ PMCDBG(MDP,STA,2, "p6-start/2 cpu=%d ri=%d evselmsr=0x%x config=0x%x",
+ cpu, ri, pd->pm_evsel_msr, config);
+
+ if (pd->pm_evsel_msr == P6_MSR_EVSEL0) /* CTR 0 */
+ wrmsr(pd->pm_evsel_msr, config | P6_EVSEL_EN);
+ else { /* CTR1 shares the enable bit CTR 0 */
+ wrmsr(pd->pm_evsel_msr, config);
+ wrmsr(P6_MSR_EVSEL0, rdmsr(P6_MSR_EVSEL0) | P6_EVSEL_EN);
+ }
+ return 0;
+}
+
+static int
+p6_stop_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ struct p6pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
+ cpu, ri));
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P6,
+ ("[p6,%d] unknown PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ PMCDBG(MDP,STO,1, "p6-stop cpu=%d ri=%d", cpu, ri);
+
+ /*
+ * If CTR0 is being turned off but CTR1 is active, we need
+ * leave CTR0's EN field set. If CTR1 is being stopped, it
+ * suffices to zero its EVSEL register.
+ */
+
+ if (ri == 1 &&
+ pmc_pcpu[cpu]->pc_hwpmcs[2]->phw_pmc != NULL)
+ config = P6_EVSEL_EN;
+ else
+ config = 0;
+ wrmsr(pd->pm_evsel_msr, config);
+
+ PMCDBG(MDP,STO,2, "p6-stop/2 cpu=%d ri=%d config=0x%x", cpu, ri,
+ config);
+ return 0;
+}
+
+static int
+p6_intr(int cpu, uintptr_t eip)
+{
+ (void) cpu;
+ (void) eip;
+ return 0;
+}
+
+static int
+p6_describe(int cpu, int ri, struct pmc_info *pi,
+ struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ struct pmc_hw *phw;
+ struct p6pmc_descr *pd;
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &p6_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return error;
+
+ pi->pm_class = pd->pm_descr.pd_class;
+ pi->pm_caps = pd->pm_descr.pd_caps;
+ pi->pm_width = pd->pm_descr.pd_width;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return 0;
+}
+
+static int
+p6_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d ri %d out of range", __LINE__, ri));
+
+ *msr = p6_pmcdesc[ri].pm_pmc_msr;
+ return 0;
+}
+
+int
+pmc_initialize_p6(struct pmc_mdep *pmc_mdep)
+{
+ KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
+ ("[p6,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "p6-initialize");
+
+ switch (pmc_mdep->pmd_cputype) {
+
+ /*
+ * P6 Family Processors
+ */
+
+ case PMC_CPU_INTEL_P6:
+ case PMC_CPU_INTEL_CL:
+ case PMC_CPU_INTEL_PII:
+ case PMC_CPU_INTEL_PIII:
+ case PMC_CPU_INTEL_PM:
+
+ p6_cputype = pmc_mdep->pmd_cputype;
+
+ pmc_mdep->pmd_npmc = P6_NPMCS;
+ pmc_mdep->pmd_classes[1] = PMC_CLASS_P6;
+ pmc_mdep->pmd_nclasspmcs[1] = 2;
+
+ pmc_mdep->pmd_init = p6_init;
+ pmc_mdep->pmd_cleanup = p6_cleanup;
+ pmc_mdep->pmd_switch_in = p6_switch_in;
+ pmc_mdep->pmd_switch_out = p6_switch_out;
+ pmc_mdep->pmd_read_pmc = p6_read_pmc;
+ pmc_mdep->pmd_write_pmc = p6_write_pmc;
+ pmc_mdep->pmd_config_pmc = p6_config_pmc;
+ pmc_mdep->pmd_allocate_pmc = p6_allocate_pmc;
+ pmc_mdep->pmd_release_pmc = p6_release_pmc;
+ pmc_mdep->pmd_start_pmc = p6_start_pmc;
+ pmc_mdep->pmd_stop_pmc = p6_stop_pmc;
+ pmc_mdep->pmd_intr = p6_intr;
+ pmc_mdep->pmd_describe = p6_describe;
+ pmc_mdep->pmd_get_msr = p6_get_msr; /* i386 */
+
+ break;
+ default:
+ KASSERT(0,("[p6,%d] Unknown CPU type", __LINE__));
+ return ENOSYS;
+ }
+
+ return 0;
+}
diff --git a/sys/hwpmc/hwpmc_amd.c b/sys/hwpmc/hwpmc_amd.c
new file mode 100644
index 0000000..c3bb56c
--- /dev/null
+++ b/sys/hwpmc/hwpmc_amd.c
@@ -0,0 +1,996 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Support for the AMD K7 and later processors */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/* AMD K7 and K8 PMCs */
+
+#define AMD_PMC_EVSEL_0 0xC0010000
+#define AMD_PMC_EVSEL_1 0xC0010001
+#define AMD_PMC_EVSEL_2 0xC0010002
+#define AMD_PMC_EVSEL_3 0xC0010003
+
+#define AMD_PMC_PERFCTR_0 0xC0010004
+#define AMD_PMC_PERFCTR_1 0xC0010005
+#define AMD_PMC_PERFCTR_2 0xC0010006
+#define AMD_PMC_PERFCTR_3 0xC0010007
+
+#define K7_VALID_EVENT_CODE(c) (((c) >= 0x40 && (c) <= 0x47) || \
+ ((c) >= 0x80 && (c) <= 0x85) || ((c) >= 0xC0 && (c) <= 0xC7) || \
+ ((c) >= 0xCD && (c) <= 0xCF))
+
+#define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \
+ PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
+
+/* reserved bits include bit 21 and the top two bits of the unit mask */
+#define K7_PMC_RESERVED ((1 << 21) | (3 << 13))
+
+#define K8_PMC_RESERVED (1 << 21)
+
+#define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
+#define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
+
+#if __i386__
+#define AMD_NPMCS K7_NPMCS
+#define AMD_PMC_CLASS PMC_CLASS_K7
+#define AMD_PMC_COUNTERMASK K7_PMC_COUNTERMASK
+#define AMD_PMC_TO_COUNTER(x) K7_PMC_TO_COUNTER(x)
+#define AMD_PMC_INVERT K7_PMC_INVERT
+#define AMD_PMC_ENABLE K7_PMC_ENABLE
+#define AMD_PMC_INT K7_PMC_INT
+#define AMD_PMC_PC K7_PMC_PC
+#define AMD_PMC_EDGE K7_PMC_EDGE
+#define AMD_PMC_OS K7_PMC_OS
+#define AMD_PMC_USR K7_PMC_USR
+
+#define AMD_PMC_UNITMASK_M K7_PMC_UNITMASK_M
+#define AMD_PMC_UNITMASK_O K7_PMC_UNITMASK_O
+#define AMD_PMC_UNITMASK_E K7_PMC_UNITMASK_E
+#define AMD_PMC_UNITMASK_S K7_PMC_UNITMASK_S
+#define AMD_PMC_UNITMASK_I K7_PMC_UNITMASK_I
+
+#define AMD_PMC_UNITMASK K7_PMC_UNITMASK
+#define AMD_PMC_EVENTMASK K7_PMC_EVENTMASK
+#define AMD_PMC_TO_UNITMASK(x) K7_PMC_TO_UNITMASK(x)
+#define AMD_PMC_TO_EVENTMASK(x) K7_PMC_TO_EVENTMASK(x)
+#define AMD_VALID_BITS K7_VALID_BITS
+
+#define AMD_PMC_CLASS_NAME "K7-"
+
+#elif __amd64__
+
+#define AMD_NPMCS K8_NPMCS
+#define AMD_PMC_CLASS PMC_CLASS_K8
+#define AMD_PMC_COUNTERMASK K8_PMC_COUNTERMASK
+#define AMD_PMC_TO_COUNTER(x) K8_PMC_TO_COUNTER(x)
+#define AMD_PMC_INVERT K8_PMC_INVERT
+#define AMD_PMC_ENABLE K8_PMC_ENABLE
+#define AMD_PMC_INT K8_PMC_INT
+#define AMD_PMC_PC K8_PMC_PC
+#define AMD_PMC_EDGE K8_PMC_EDGE
+#define AMD_PMC_OS K8_PMC_OS
+#define AMD_PMC_USR K8_PMC_USR
+
+#define AMD_PMC_UNITMASK_M K8_PMC_UNITMASK_M
+#define AMD_PMC_UNITMASK_O K8_PMC_UNITMASK_O
+#define AMD_PMC_UNITMASK_E K8_PMC_UNITMASK_E
+#define AMD_PMC_UNITMASK_S K8_PMC_UNITMASK_S
+#define AMD_PMC_UNITMASK_I K8_PMC_UNITMASK_I
+
+#define AMD_PMC_UNITMASK K8_PMC_UNITMASK
+#define AMD_PMC_EVENTMASK K8_PMC_EVENTMASK
+#define AMD_PMC_TO_UNITMASK(x) K8_PMC_TO_UNITMASK(x)
+#define AMD_PMC_TO_EVENTMASK(x) K8_PMC_TO_EVENTMASK(x)
+#define AMD_VALID_BITS K8_VALID_BITS
+
+#define AMD_PMC_CLASS_NAME "K8-"
+
+#else
+#error Unsupported architecture.
+#endif
+
+/* AMD K7 & K8 PMCs */
+struct amd_descr {
+ struct pmc_descr pm_descr; /* "base class" */
+ uint32_t pm_evsel; /* address of EVSEL register */
+ uint32_t pm_perfctr; /* address of PERFCTR register */
+};
+
+static const struct amd_descr amd_pmcdesc[AMD_NPMCS] =
+{
+ {
+ .pm_descr =
+ {
+ .pd_name = "TSC",
+ .pd_class = PMC_CLASS_TSC,
+ .pd_caps = PMC_CAP_READ,
+ .pd_width = 64
+ },
+ .pm_evsel = MSR_TSC,
+ .pm_perfctr = 0 /* unused */
+ },
+
+ {
+ .pm_descr =
+ {
+ .pd_name = AMD_PMC_CLASS_NAME "0",
+ .pd_class = AMD_PMC_CLASS,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_0,
+ .pm_perfctr = AMD_PMC_PERFCTR_0
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = AMD_PMC_CLASS_NAME "1",
+ .pd_class = AMD_PMC_CLASS,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_1,
+ .pm_perfctr = AMD_PMC_PERFCTR_1
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = AMD_PMC_CLASS_NAME "2",
+ .pd_class = AMD_PMC_CLASS,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_2,
+ .pm_perfctr = AMD_PMC_PERFCTR_2
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = AMD_PMC_CLASS_NAME "3",
+ .pd_class = AMD_PMC_CLASS,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_3,
+ .pm_perfctr = AMD_PMC_PERFCTR_3
+ }
+};
+
+struct amd_event_code_map {
+ enum pmc_event pe_ev; /* enum value */
+ uint8_t pe_code; /* encoded event mask */
+ uint8_t pe_mask; /* bits allowed in unit mask */
+};
+
+const struct amd_event_code_map amd_event_codes[] = {
+#if __i386__
+ { PMC_EV_K7_DC_ACCESSES, 0x40, 0 },
+ { PMC_EV_K7_DC_MISSES, 0x41, 0 },
+ { PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, K7_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, K7_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_DC_WRITEBACKS, 0x44, K7_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
+ { PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 },
+ { PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 },
+
+ { PMC_EV_K7_IC_FETCHES, 0x80, 0 },
+ { PMC_EV_K7_IC_MISSES, 0x81, 0 },
+
+ { PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 },
+ { PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 },
+
+ { PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 },
+ { PMC_EV_K7_RETIRED_OPS, 0xC1, 0 },
+ { PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 },
+ { PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
+ { PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 },
+ { PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
+ { PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
+ { PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 },
+ { PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 },
+ { PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
+ { PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 }
+#endif
+
+#if __amd64__
+ { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F },
+ { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 },
+ { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 },
+
+ { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
+ 0x21, 0x00 },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
+ { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 },
+ { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 },
+ { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 },
+ { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 },
+
+ { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 },
+ { PMC_EV_K8_DC_MISS, 0x41, 0x00 },
+ { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F },
+ { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F },
+ { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F },
+ { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 },
+ { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 },
+ { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 },
+ { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 },
+ { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
+ { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 },
+ { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
+ { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 },
+
+ { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 },
+ { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F },
+ { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 },
+ { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 },
+
+ { PMC_EV_K8_IC_FETCH, 0x80, 0x00 },
+ { PMC_EV_K8_IC_MISS, 0x81, 0x00 },
+ { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 },
+ { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 },
+ { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 },
+ { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 },
+ { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
+ { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 },
+ { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 },
+ { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 },
+
+ { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
+ 0xCA, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F },
+ { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
+ 0xCC, 0x07 },
+ { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 },
+ { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
+ { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 },
+
+ { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
+ 0xD2, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
+ 0xD5, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
+ 0xD6, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
+ 0xD9, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
+ 0xDA, 0x00 },
+ { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 },
+
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
+ 0xE2, 0x00 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
+ { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F },
+ { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
+#endif
+
+};
+
+const int amd_event_codes_size =
+ sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
+
+/*
+ * read a pmc register
+ */
+
+static int
+amd_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ enum pmc_mode mode;
+ const struct amd_descr *pd;
+ struct pmc *pm;
+ const struct pmc_hw *phw;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &amd_pmcdesc[ri];
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ mode = pm->pm_mode;
+
+ PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
+
+ /* Reading the TSC is a special case */
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
+ KASSERT(PMC_IS_COUNTING_MODE(mode),
+ ("[amd,%d] TSC counter in non-counting mode", __LINE__));
+ *v = rdtsc();
+ PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
+ return 0;
+ }
+
+ KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+
+ tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
+ if (PMC_IS_SAMPLING_MODE(mode))
+ *v = -tmp;
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
+
+ return 0;
+}
+
+/*
+ * Write a PMC MSR.
+ */
+
+static int
+amd_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ const struct amd_descr *pd;
+ struct pmc *pm;
+ const struct pmc_hw *phw;
+ enum pmc_mode mode;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &amd_pmcdesc[ri];
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
+ cpu, ri));
+
+ mode = pm->pm_mode;
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+
+ /* use 2's complement of the count for sampling mode PMCs */
+ if (PMC_IS_SAMPLING_MODE(mode))
+ v = -v;
+
+ PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ /* write the PMC value */
+ wrmsr(pd->pm_perfctr, v);
+ return 0;
+}
+
+/*
+ * configure hardware pmc according to the configuration recorded in
+ * pmc 'pm'.
+ */
+
+static int
+amd_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[amd,%d] hwpmc not unconfigured before re-config", __LINE__));
+
+ phw->phw_pmc = pm;
+ return 0;
+}
+
+/*
+ * Machine dependent actions taken during the context switch in of a
+ * thread.
+ */
+
+static int
+amd_switch_in(struct pmc_cpu *pc)
+{
+ (void) pc;
+
+ /* enable the RDPMC instruction */
+ load_cr4(rcr4() | CR4_PCE);
+ return 0;
+}
+
+/*
+ * Machine dependent actions taken during the context switch out of a
+ * thread.
+ */
+
+static int
+amd_switch_out(struct pmc_cpu *pc)
+{
+ (void) pc;
+
+ /* disallow RDPMC instruction */
+ load_cr4(rcr4() & ~CR4_PCE);
+ return 0;
+}
+
+/*
+ * Check if a given allocation is feasible.
+ */
+
+static int
+amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ int i;
+ uint32_t allowed_unitmask, caps, config, unitmask;
+ enum pmc_event pe;
+ const struct pmc_descr *pd;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row index %d", __LINE__, ri));
+
+ pd = &amd_pmcdesc[ri].pm_descr;
+
+ /* check class match */
+ if (pd->pd_class != pm->pm_class)
+ return EINVAL;
+
+ caps = pm->pm_caps;
+
+ PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
+
+ if ((pd->pd_caps & caps) != caps)
+ return EPERM;
+ if (pd->pd_class == PMC_CLASS_TSC) {
+ /* TSC's are always allocated in system-wide counting mode */
+ if (a->pm_ev != PMC_EV_TSC_TSC ||
+ a->pm_mode != PMC_MODE_SC)
+ return EINVAL;
+ return 0;
+ }
+
+ KASSERT(pd->pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
+
+ pe = a->pm_ev;
+
+ /* map ev to the correct event mask code */
+ config = allowed_unitmask = 0;
+ for (i = 0; i < amd_event_codes_size; i++)
+ if (amd_event_codes[i].pe_ev == pe) {
+ config =
+ AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
+ allowed_unitmask =
+ AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
+ break;
+ }
+ if (i == amd_event_codes_size)
+ return EINVAL;
+
+ unitmask = a->pm_amd_config & AMD_PMC_UNITMASK;
+ if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
+ return EINVAL;
+
+ if (unitmask && (caps & PMC_CAP_QUALIFIER))
+ config |= unitmask;
+
+ if (caps & PMC_CAP_THRESHOLD)
+ config |= a->pm_amd_config & AMD_PMC_COUNTERMASK;
+
+ /* set at least one of the 'usr' or 'os' caps */
+ if (caps & PMC_CAP_USER)
+ config |= AMD_PMC_USR;
+ if (caps & PMC_CAP_SYSTEM)
+ config |= AMD_PMC_OS;
+ if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
+ config |= (AMD_PMC_USR|AMD_PMC_OS);
+
+ if (caps & PMC_CAP_EDGE)
+ config |= AMD_PMC_EDGE;
+ if (caps & PMC_CAP_INVERT)
+ config |= AMD_PMC_INVERT;
+ if (caps & PMC_CAP_INTERRUPT)
+ config |= AMD_PMC_INT;
+
+ pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
+
+ PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+/*
+ * Release machine dependent state associated with a PMC. This is a
+ * no-op on this architecture.
+ *
+ */
+
+/* ARGSUSED0 */
+static int
+amd_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+#if DEBUG
+ const struct amd_descr *pd;
+#endif
+ struct pmc_hw *phw;
+
+ (void) pmc;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+#if DEBUG
+ pd = &amd_pmcdesc[ri];
+ if (pd->pm_descr.pd_class == AMD_PMC_CLASS)
+ KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] PMC %d released while active", __LINE__, ri));
+#endif
+
+ return 0;
+}
+
+/*
+ * start a PMC.
+ */
+
+static int
+amd_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ const struct amd_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &amd_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0; /* TSCs are always running */
+
+ KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+
+ KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
+ ri, cpu, pd->pm_descr.pd_name));
+
+ /* turn on the PMC ENABLE bit */
+ config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
+
+ PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
+
+ wrmsr(pd->pm_evsel, config);
+ return 0;
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+amd_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ const struct amd_descr *pd;
+ uint64_t config;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &amd_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
+ cpu, ri));
+
+ /* can't stop a TSC */
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+
+ KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
+ __LINE__, ri, cpu, pd->pm_descr.pd_name));
+
+ PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
+
+ /* turn off the PMC ENABLE bit */
+ config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
+ wrmsr(pd->pm_evsel, config);
+ return 0;
+}
+
+/*
+ * Interrupt handler. This function needs to return '1' if the
+ * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
+ * to sleep or do anything a 'fast' interrupt handler is not allowed
+ * to do.
+ */
+
+static int
+amd_intr(int cpu, uintptr_t eip)
+{
+ int i, retval;
+ enum pmc_mode mode;
+ uint32_t perfctr;
+ struct pmc *pm;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] out of range CPU %d", __LINE__, cpu));
+
+ retval = 0;
+
+ pc = pmc_pcpu[cpu];
+
+ /*
+ * look for all PMCs that have interrupted:
+ * - skip over the TSC [PMC#0]
+ * - look for a PMC with a valid 'struct pmc' association
+ * - look for a PMC in (a) sampling mode and (b) which has
+ * overflowed. If found, we update the process's
+ * histogram or send it a profiling signal by calling
+ * the appropriate helper function.
+ */
+
+ for (i = 1; i < AMD_NPMCS; i++) {
+
+ phw = pc->pc_hwpmcs[i];
+ perfctr = amd_pmcdesc[i].pm_perfctr;
+ KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
+
+ if ((pm = phw->phw_pmc) == NULL ||
+ pm->pm_state != PMC_STATE_RUNNING) {
+ atomic_add_int(&pmc_stats.pm_intr_ignored, 1);
+ continue;
+ }
+
+ mode = pm->pm_mode;
+ if (PMC_IS_SAMPLING_MODE(mode) &&
+ AMD_PMC_HAS_OVERFLOWED(perfctr)) {
+ atomic_add_int(&pmc_stats.pm_intr_processed, 1);
+ if (PMC_IS_SYSTEM_MODE(mode))
+ pmc_update_histogram(phw, eip);
+ else if (PMC_IS_VIRTUAL_MODE(mode))
+ pmc_send_signal(pm);
+ retval = 1;
+ }
+ }
+ return retval;
+}
+
+/*
+ * describe a PMC
+ */
+static int
+amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ const struct amd_descr *pd;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &amd_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return error;
+
+ pi->pm_class = pd->pm_descr.pd_class;
+ pi->pm_caps = pd->pm_descr.pd_caps;
+ pi->pm_width = pd->pm_descr.pd_width;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * i386 specific entry points
+ */
+
+/*
+ * return the MSR address of the given PMC.
+ */
+
+static int
+amd_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = amd_pmcdesc[ri].pm_perfctr;
+ return 0;
+}
+
+/*
+ * processor dependent initialization.
+ */
+
+/*
+ * Per-processor data structure
+ *
+ * [common stuff]
+ * [5 struct pmc_hw pointers]
+ * [5 struct pmc_hw structures]
+ */
+
+struct amd_cpu {
+ struct pmc_cpu pc_common;
+ struct pmc_hw *pc_hwpmcs[AMD_NPMCS];
+ struct pmc_hw pc_amdpmcs[AMD_NPMCS];
+};
+
+
+static int
+amd_init(int cpu)
+{
+ int n;
+ struct amd_cpu *pcs;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
+
+ MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ if (pcs == NULL)
+ return ENOMEM;
+
+ phw = &pcs->pc_amdpmcs[0];
+
+ /*
+ * Initialize the per-cpu mutex and set the content of the
+ * hardware descriptors to a known state.
+ */
+
+ for (n = 0; n < AMD_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pcs->pc_hwpmcs[n] = phw;
+ }
+
+ /* Mark the TSC as shareable */
+ pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
+
+ pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
+
+ return 0;
+}
+
+
+/*
+ * processor dependent cleanup prior to the KLD
+ * being unloaded
+ */
+
+static int
+amd_cleanup(int cpu)
+{
+ int i;
+ uint32_t evsel;
+ struct pmc_cpu *pcs;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
+
+ /*
+ * First, turn off all PMCs on this CPU.
+ */
+
+ for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
+ evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
+ evsel &= ~AMD_PMC_ENABLE;
+ wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
+ }
+
+ /*
+ * Next, free up allocated space.
+ */
+
+ pcs = pmc_pcpu[cpu];
+
+#if DEBUG
+ /* check the TSC */
+ KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
+ ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
+ for (i = 1; i < AMD_NPMCS; i++) {
+ KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
+ ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
+ KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
+ ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
+ }
+#endif
+ KASSERT(pcs != NULL,
+ ("[amd,%d] null per-cpu state pointer (cpu%d)", __LINE__, cpu));
+
+ pmc_pcpu[cpu] = NULL;
+ FREE(pcs, M_PMC);
+ return 0;
+}
+
+/*
+ * Initialize ourselves.
+ */
+
+struct pmc_mdep *
+pmc_amd_initialize(void)
+{
+
+ struct pmc_mdep *pmc_mdep;
+
+ /* The presence of hardware performance counters on the AMD
+ Athlon, Duron or later processors, is _not_ indicated by
+ any of the processor feature flags set by the 'CPUID'
+ instruction, so we only check the 'instruction family'
+ field returned by CPUID for instruction family >= 6. This
+ test needs to be be refined. */
+
+ if ((cpu_id & 0xF00) < 0x600)
+ return NULL;
+
+ MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
+ M_PMC, M_WAITOK|M_ZERO);
+
+#if __i386__
+ pmc_mdep->pmd_cputype = PMC_CPU_AMD_K7;
+#elif __amd64__
+ pmc_mdep->pmd_cputype = PMC_CPU_AMD_K8;
+#else
+#error Unknown AMD CPU type.
+#endif
+
+ pmc_mdep->pmd_npmc = AMD_NPMCS;
+
+ /* this processor has two classes of usable PMCs */
+ pmc_mdep->pmd_nclass = 2;
+ pmc_mdep->pmd_classes[0] = PMC_CLASS_TSC;
+ pmc_mdep->pmd_classes[1] = AMD_PMC_CLASS;
+ pmc_mdep->pmd_nclasspmcs[0] = 1;
+ pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
+
+ pmc_mdep->pmd_init = amd_init;
+ pmc_mdep->pmd_cleanup = amd_cleanup;
+ pmc_mdep->pmd_switch_in = amd_switch_in;
+ pmc_mdep->pmd_switch_out = amd_switch_out;
+ pmc_mdep->pmd_read_pmc = amd_read_pmc;
+ pmc_mdep->pmd_write_pmc = amd_write_pmc;
+ pmc_mdep->pmd_config_pmc = amd_config_pmc;
+ pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
+ pmc_mdep->pmd_release_pmc = amd_release_pmc;
+ pmc_mdep->pmd_start_pmc = amd_start_pmc;
+ pmc_mdep->pmd_stop_pmc = amd_stop_pmc;
+ pmc_mdep->pmd_intr = amd_intr;
+ pmc_mdep->pmd_describe = amd_describe;
+ pmc_mdep->pmd_get_msr = amd_get_msr; /* i386 */
+
+ PMCDBG(MDP,INI,0,"%s","amd-initialize");
+
+ return pmc_mdep;
+}
diff --git a/sys/hwpmc/hwpmc_intel.c b/sys/hwpmc/hwpmc_intel.c
new file mode 100644
index 0000000..2448b37
--- /dev/null
+++ b/sys/hwpmc/hwpmc_intel.c
@@ -0,0 +1,142 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+struct pmc_mdep *
+pmc_intel_initialize(void)
+{
+ struct pmc_mdep *pmc_mdep;
+ enum pmc_cputype cputype;
+ int error, model;
+
+ KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
+ ("[intel,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,0, "intel-initialize cpuid=0x%x", cpu_id);
+
+ cputype = -1;
+
+ switch (cpu_id & 0xF00) {
+ case 0x500: /* Pentium family processors */
+ cputype = PMC_CPU_INTEL_P5;
+ break;
+ case 0x600: /* Pentium Pro, Celeron, Pentium II & III */
+ switch ((cpu_id & 0xF0) >> 4) { /* model number field */
+ case 0x1:
+ cputype = PMC_CPU_INTEL_P6;
+ break;
+ case 0x3: case 0x5:
+ cputype = PMC_CPU_INTEL_PII;
+ break;
+ case 0x6:
+ cputype = PMC_CPU_INTEL_CL;
+ break;
+ case 0x7: case 0x8: case 0xA: case 0xB:
+ cputype = PMC_CPU_INTEL_PIII;
+ break;
+ case 0x9: case 0xD:
+ cputype = PMC_CPU_INTEL_PM;
+ break;
+ }
+ break;
+ case 0xF00: /* P4 */
+ model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
+ if (model >= 0 && model <= 3) /* known models */
+ cputype = PMC_CPU_INTEL_PIV;
+ break;
+ }
+
+ if ((int) cputype == -1) {
+ printf("pmc: Unknown Intel CPU.\n");
+ return NULL;
+ }
+
+ MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
+ M_PMC, M_WAITOK|M_ZERO);
+
+ pmc_mdep->pmd_cputype = cputype;
+ pmc_mdep->pmd_nclass = 2;
+ pmc_mdep->pmd_classes[0] = PMC_CLASS_TSC;
+ pmc_mdep->pmd_nclasspmcs[0] = 1;
+
+ error = 0;
+
+ switch (cputype) {
+
+ /*
+ * Intel Pentium 4 Processors
+ */
+
+ case PMC_CPU_INTEL_PIV:
+ error = pmc_initialize_p4(pmc_mdep);
+ break;
+
+ /*
+ * P6 Family Processors
+ */
+
+ case PMC_CPU_INTEL_P6:
+ case PMC_CPU_INTEL_CL:
+ case PMC_CPU_INTEL_PII:
+ case PMC_CPU_INTEL_PIII:
+ case PMC_CPU_INTEL_PM:
+
+ error = pmc_initialize_p6(pmc_mdep);
+ break;
+
+ /*
+ * Intel Pentium PMCs.
+ */
+
+ case PMC_CPU_INTEL_P5:
+ error = pmc_initialize_p5(pmc_mdep);
+ break;
+
+ default:
+ KASSERT(0,("[intel,%d] Unknown CPU type", __LINE__));
+ }
+
+ if (error) {
+ FREE(pmc_mdep, M_PMC);
+ pmc_mdep = NULL;
+ }
+
+ return pmc_mdep;
+}
diff --git a/sys/hwpmc/hwpmc_mod.c b/sys/hwpmc/hwpmc_mod.c
new file mode 100644
index 0000000..89b2954
--- /dev/null
+++ b/sys/hwpmc/hwpmc_mod.c
@@ -0,0 +1,3671 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/eventhandler.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/sched.h>
+#include <sys/signalvar.h>
+#include <sys/smp.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/systm.h>
+
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * Types
+ */
+
+enum pmc_flags {
+ PMC_FLAG_NONE = 0x00, /* do nothing */
+ PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
+ PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
+};
+
+/*
+ * The offset in sysent where the syscall is allocated.
+ */
+
+static int pmc_syscall_num = NO_SYSCALL;
+struct pmc_cpu **pmc_pcpu; /* per-cpu state */
+pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
+
+#define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
+
+struct mtx_pool *pmc_mtxpool;
+static int *pmc_pmcdisp; /* PMC row dispositions */
+
+#define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
+#define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
+#define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
+
+#define PMC_MARK_ROW_FREE(R) do { \
+ pmc_pmcdisp[(R)] = 0; \
+} while (0)
+
+#define PMC_MARK_ROW_STANDALONE(R) do { \
+ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&pmc_pmcdisp[(R)], -1); \
+ KASSERT(pmc_pmcdisp[(R)] >= (-mp_ncpus), ("[pmc,%d] row " \
+ "disposition error", __LINE__)); \
+} while (0)
+
+#define PMC_UNMARK_ROW_STANDALONE(R) do { \
+ atomic_add_int(&pmc_pmcdisp[(R)], 1); \
+ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+} while (0)
+
+#define PMC_MARK_ROW_THREAD(R) do { \
+ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&pmc_pmcdisp[(R)], 1); \
+} while (0)
+
+#define PMC_UNMARK_ROW_THREAD(R) do { \
+ atomic_add_int(&pmc_pmcdisp[(R)], -1); \
+ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+} while (0)
+
+
+/* various event handlers */
+static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
+
+/* Module statistics */
+struct pmc_op_getdriverstats pmc_stats;
+
+/* Machine/processor dependent operations */
+struct pmc_mdep *md;
+
+/*
+ * Hash tables mapping owner processes and target threads to PMCs.
+ */
+
+struct mtx pmc_processhash_mtx; /* spin mutex */
+static u_long pmc_processhashmask;
+static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
+
+/*
+ * Hash table of PMC owner descriptors. This table is protected by
+ * the shared PMC "sx" lock.
+ */
+
+static u_long pmc_ownerhashmask;
+static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
+
+/*
+ * Prototypes
+ */
+
+#if DEBUG
+static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
+static int pmc_debugflags_parse(char *newstr, char *fence);
+#endif
+
+static int load(struct module *module, int cmd, void *arg);
+static int pmc_syscall_handler(struct thread *td, void *syscall_args);
+static int pmc_configure_log(struct pmc_owner *po, int logfd);
+static void pmc_log_process_exit(struct pmc *pm, struct pmc_process *pp);
+static struct pmc *pmc_allocate_pmc_descriptor(void);
+static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
+ pmc_id_t pmc);
+static void pmc_release_pmc_descriptor(struct pmc *pmc);
+static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri);
+static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
+ uint32_t mode);
+static void pmc_remove_process_descriptor(struct pmc_process *pp);
+static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
+static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
+static void pmc_remove_owner(struct pmc_owner *po);
+static void pmc_maybe_remove_owner(struct pmc_owner *po);
+static void pmc_unlink_target_process(struct pmc *pmc,
+ struct pmc_process *pp);
+static void pmc_link_target_process(struct pmc *pm,
+ struct pmc_process *pp);
+static void pmc_unlink_owner(struct pmc *pmc);
+static void pmc_cleanup(void);
+static void pmc_save_cpu_binding(struct pmc_binding *pb);
+static void pmc_restore_cpu_binding(struct pmc_binding *pb);
+static void pmc_select_cpu(int cpu);
+static void pmc_process_exit(void *arg, struct proc *p);
+static void pmc_process_fork(void *arg, struct proc *p1,
+ struct proc *p2, int n);
+static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
+static int pmc_attach_process(struct proc *p, struct pmc *pm);
+static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
+ int flags);
+static int pmc_detach_process(struct proc *p, struct pmc *pm);
+static int pmc_start(struct pmc *pm);
+static int pmc_stop(struct pmc *pm);
+static int pmc_can_attach(struct pmc *pm, struct proc *p);
+
+/*
+ * Kernel tunables and sysctl(8) interface.
+ */
+
+#define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "."
+
+SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
+
+#if DEBUG
+unsigned int pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
+char pmc_debugstr[PMC_DEBUG_STRSIZE];
+TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
+ sizeof(pmc_debugstr));
+SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
+ CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
+ 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
+#endif
+
+/*
+ * kern.pmc.hashrows -- determines the number of rows in the
+ * of the hash table used to look up threads
+ */
+
+static int pmc_hashsize = PMC_HASH_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_hashsize, 0, "rows in hash tables");
+
+/*
+ * kern.pmc.pcpusize -- the size of each per-cpu
+ * area for collection PC samples.
+ */
+
+static int pmc_pcpu_buffer_size = PMC_PCPU_BUFFER_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "pcpubuffersize", &pmc_pcpu_buffer_size);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, pcpubuffersize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_pcpu_buffer_size, 0, "size of per-cpu buffer in 4K pages");
+
+/*
+ * kern.pmc.mtxpoolsize -- number of mutexes in the mutex pool.
+ */
+
+static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_mtxpool_size, 0, "size of spin mutex pool");
+
+
+
+/*
+ * security.bsd.unprivileged_syspmcs -- allow non-root processes to
+ * allocate system-wide PMCs.
+ *
+ * Allowing unprivileged processes to allocate system PMCs is convenient
+ * if system-wide measurements need to be taken concurrently with other
+ * per-process measurements. This feature is turned off by default.
+ */
+
+SYSCTL_DECL(_security_bsd);
+
+static int pmc_unprivileged_syspmcs = 0;
+TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
+SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
+ &pmc_unprivileged_syspmcs, 0,
+ "allow unprivileged process to allocate system PMCs");
+
+#if PMC_HASH_USE_CRC32
+
+#define PMC_HASH_PTR(P,M) (crc32(&(P), sizeof((P))) & (M))
+
+#else /* integer multiplication */
+
+#if LONG_BIT == 64
+#define _PMC_HM 11400714819323198486u
+#elif LONG_BIT == 32
+#define _PMC_HM 2654435769u
+#else
+#error Must know the size of 'long' to compile
+#endif
+
+/*
+ * Hash function. Discard the lower 2 bits of the pointer since
+ * these are always zero for our uses. The hash multiplier is
+ * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
+ */
+
+#define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
+
+#endif
+
+/*
+ * Syscall structures
+ */
+
+/* The `sysent' for the new syscall */
+static struct sysent pmc_sysent = {
+ 2, /* sy_narg */
+ pmc_syscall_handler /* sy_call */
+};
+
+static struct syscall_module_data pmc_syscall_mod = {
+ load,
+ NULL,
+ &pmc_syscall_num,
+ &pmc_sysent,
+ { 0, NULL }
+};
+
+static moduledata_t pmc_mod = {
+ PMC_MODULE_NAME,
+ syscall_module_handler,
+ &pmc_syscall_mod
+};
+
+DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
+MODULE_VERSION(pmc, PMC_VERSION);
+
+#if DEBUG
+static int
+pmc_debugflags_parse(char *newstr, char *fence)
+{
+ char c, *e, *p, *q;
+ unsigned int tmpflags;
+ int level;
+ char tmpbuf[4]; /* 3 character keyword + '\0' */
+
+ tmpflags = 0;
+ level = 0xF; /* max verbosity */
+
+ p = newstr;
+
+ for (; p < fence && (c = *p);) {
+
+ /* skip separators */
+ if (c == ' ' || c == '\t' || c == ',') {
+ p++; continue;
+ }
+
+ (void) strlcpy(tmpbuf, p, sizeof(tmpbuf));
+
+#define CMP_SET_FLAG_MAJ(S,F) \
+ else if (strncmp(tmpbuf, S, 3) == 0) \
+ tmpflags |= __PMCDFMAJ(F)
+
+#define CMP_SET_FLAG_MIN(S,F) \
+ else if (strncmp(tmpbuf, S, 3) == 0) \
+ tmpflags |= __PMCDFMIN(F)
+
+ if (e - p > 6 && strncmp(p, "level=", 6) == 0) {
+ p += 6; /* skip over keyword */
+ level = strtoul(p, &q, 16);
+ }
+ CMP_SET_FLAG_MAJ("mod", MOD);
+ CMP_SET_FLAG_MAJ("pmc", PMC);
+ CMP_SET_FLAG_MAJ("ctx", CTX);
+ CMP_SET_FLAG_MAJ("own", OWN);
+ CMP_SET_FLAG_MAJ("prc", PRC);
+ CMP_SET_FLAG_MAJ("mdp", MDP);
+ CMP_SET_FLAG_MAJ("cpu", CPU);
+
+ CMP_SET_FLAG_MIN("all", ALL);
+ CMP_SET_FLAG_MIN("rel", REL);
+ CMP_SET_FLAG_MIN("ops", OPS);
+ CMP_SET_FLAG_MIN("ini", INI);
+ CMP_SET_FLAG_MIN("fnd", FND);
+ CMP_SET_FLAG_MIN("pmh", PMH);
+ CMP_SET_FLAG_MIN("pms", PMS);
+ CMP_SET_FLAG_MIN("orm", ORM);
+ CMP_SET_FLAG_MIN("omr", OMR);
+ CMP_SET_FLAG_MIN("tlk", TLK);
+ CMP_SET_FLAG_MIN("tul", TUL);
+ CMP_SET_FLAG_MIN("ext", EXT);
+ CMP_SET_FLAG_MIN("exc", EXC);
+ CMP_SET_FLAG_MIN("frk", FRK);
+ CMP_SET_FLAG_MIN("att", ATT);
+ CMP_SET_FLAG_MIN("swi", SWI);
+ CMP_SET_FLAG_MIN("swo", SWO);
+ CMP_SET_FLAG_MIN("reg", REG);
+ CMP_SET_FLAG_MIN("alr", ALR);
+ CMP_SET_FLAG_MIN("rea", REA);
+ CMP_SET_FLAG_MIN("wri", WRI);
+ CMP_SET_FLAG_MIN("cfg", CFG);
+ CMP_SET_FLAG_MIN("sta", STA);
+ CMP_SET_FLAG_MIN("sto", STO);
+ CMP_SET_FLAG_MIN("bnd", BND);
+ CMP_SET_FLAG_MIN("sel", SEL);
+ else /* unrecognized keyword */
+ return EINVAL;
+
+ p += 4; /* skip keyword and separator */
+ }
+
+ pmc_debugflags = (tmpflags|level);
+
+ return 0;
+}
+
+static int
+pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ char *fence, *newstr;
+ int error;
+ unsigned int n;
+
+ (void) arg1; (void) arg2; /* unused parameters */
+
+ n = sizeof(pmc_debugstr);
+ MALLOC(newstr, char *, n, M_PMC, M_ZERO|M_WAITOK);
+ (void) strlcpy(newstr, pmc_debugstr, sizeof(pmc_debugstr));
+
+ error = sysctl_handle_string(oidp, newstr, n, req);
+
+ /* if there is a new string, parse and copy it */
+ if (error == 0 && req->newptr != NULL) {
+ fence = newstr + (n < req->newlen ? n : req->newlen);
+ if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
+ (void) strlcpy(pmc_debugstr, newstr,
+ sizeof(pmc_debugstr));
+ }
+
+ FREE(newstr, M_PMC);
+
+ return error;
+}
+#endif
+
+/*
+ * Concurrency Control
+ *
+ * The driver manages the following data structures:
+ *
+ * - target process descriptors, one per target process
+ * - owner process descriptors (and attached lists), one per owner process
+ * - lookup hash tables for owner and target processes
+ * - PMC descriptors (and attached lists)
+ * - per-cpu hardware state
+ * - the 'hook' variable through which the kernel calls into
+ * this module
+ * - the machine hardware state (managed by the MD layer)
+ *
+ * These data structures are accessed from:
+ *
+ * - thread context-switch code
+ * - interrupt handlers (possibly on multiple cpus)
+ * - kernel threads on multiple cpus running on behalf of user
+ * processes doing system calls
+ * - this driver's private kernel threads
+ *
+ * = Locks and Locking strategy =
+ *
+ * The driver uses four locking strategies for its operation:
+ *
+ * - There is a 'global' SX lock "pmc_sx" that is used to protect
+ * the its 'meta-data'.
+ *
+ * Calls into the module (via syscall() or by the kernel) start with
+ * this lock being held in exclusive mode. Depending on the requested
+ * operation, the lock may be downgraded to 'shared' mode to allow
+ * more concurrent readers into the module.
+ *
+ * This SX lock is held in exclusive mode for any operations that
+ * modify the linkages between the driver's internal data structures.
+ *
+ * The 'pmc_hook' function pointer is also protected by this lock.
+ * It is only examined with the sx lock held in exclusive mode. The
+ * kernel module is allowed to be unloaded only with the sx lock
+ * held in exclusive mode. In normal syscall handling, after
+ * acquiring the pmc_sx lock we first check that 'pmc_hook' is
+ * non-null before proceeding. This prevents races between the
+ * thread unloading the module and other threads seeking to use the
+ * module.
+ *
+ * - Lookups of target process structures and owner process structures
+ * cannot use the global "pmc_sx" SX lock because these lookups need
+ * to happen during context switches and in other critical sections
+ * where sleeping is not allowed. We protect these lookup tables
+ * with their own private spin-mutexes, "pmc_processhash_mtx" and
+ * "pmc_ownerhash_mtx". These are 'leaf' mutexes, in that no other
+ * lock is acquired with these locks held.
+ *
+ * - Interrupt handlers work in a lock free manner. At interrupt
+ * time, handlers look at the PMC pointer (phw->phw_pmc) configured
+ * when the PMC was started. If this pointer is NULL, the interrupt
+ * is ignored after updating driver statistics. We ensure that this
+ * pointer is set (using an atomic operation if necessary) before the
+ * PMC hardware is started. Conversely, this pointer is unset atomically
+ * only after the PMC hardware is stopped.
+ *
+ * We ensure that everything needed for the operation of an
+ * interrupt handler is available without it needing to acquire any
+ * locks. We also ensure that a PMC's software state is destroyed only
+ * after the PMC is taken off hardware (on all CPUs).
+ *
+ * - Context-switch handling with process-private PMCs needs more
+ * care.
+ *
+ * A given process may be the target of multiple PMCs. For example,
+ * PMCATTACH and PMCDETACH may be requested by a process on one CPU
+ * while the target process is running on another. A PMC could also
+ * be getting released because its owner is exiting. We tackle
+ * these situations in the following manner:
+ *
+ * - each target process structure 'pmc_process' has an array
+ * of 'struct pmc *' pointers, one for each hardware PMC.
+ *
+ * - At context switch IN time, each "target" PMC in RUNNING state
+ * gets started on hardware and a pointer to each PMC is copied into
+ * the per-cpu phw array. The 'runcount' for the PMC is
+ * incremented.
+ *
+ * - At context switch OUT time, all process-virtual PMCs are stopped
+ * on hardware. The saved value is added to the PMCs value field
+ * only if the PMC is in a non-deleted state (the PMCs state could
+ * have changed during the current time slice).
+ *
+ * Note that since in-between a switch IN on a processor and a switch
+ * OUT, the PMC could have been released on another CPU. Therefore
+ * context switch OUT always looks at the hardware state to turn
+ * OFF PMCs and will update a PMC's saved value only if reachable
+ * from the target process record.
+ *
+ * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
+ * be attached to many processes at the time of the call and could
+ * be active on multiple CPUs).
+ *
+ * We prevent further scheduling of the PMC by marking it as in
+ * state 'DELETED'. If the runcount of the PMC is non-zero then
+ * this PMC is currently running on a CPU somewhere. The thread
+ * doing the PMCRELEASE operation waits by repeatedly doing an
+ * tsleep() till the runcount comes to zero.
+ *
+ */
+
+/*
+ * save the cpu binding of the current kthread
+ */
+
+static void
+pmc_save_cpu_binding(struct pmc_binding *pb)
+{
+ PMCDBG(CPU,BND,2, "%s", "save-cpu");
+ mtx_lock_spin(&sched_lock);
+ pb->pb_bound = sched_is_bound(curthread);
+ pb->pb_cpu = curthread->td_oncpu;
+ mtx_unlock_spin(&sched_lock);
+ PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
+}
+
+/*
+ * restore the cpu binding of the current thread
+ */
+
+static void
+pmc_restore_cpu_binding(struct pmc_binding *pb)
+{
+ PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
+ curthread->td_oncpu, pb->pb_cpu);
+ mtx_lock_spin(&sched_lock);
+ if (pb->pb_bound)
+ sched_bind(curthread, pb->pb_cpu);
+ else
+ sched_unbind(curthread);
+ mtx_unlock_spin(&sched_lock);
+ PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
+}
+
+/*
+ * move execution over the specified cpu and bind it there.
+ */
+
+static void
+pmc_select_cpu(int cpu)
+{
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
+
+ /* never move to a disabled CPU */
+ KASSERT(pmc_cpu_is_disabled(cpu) == 0, ("[pmc,%d] selecting "
+ "disabled CPU %d", __LINE__, cpu));
+
+ PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
+ mtx_lock_spin(&sched_lock);
+ sched_bind(curthread, cpu);
+ mtx_unlock_spin(&sched_lock);
+
+ KASSERT(curthread->td_oncpu == cpu,
+ ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
+ cpu, curthread->td_oncpu));
+
+ PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
+}
+
+/*
+ * Update the per-pmc histogram
+ */
+
+void
+pmc_update_histogram(struct pmc_hw *phw, uintptr_t pc)
+{
+ (void) phw;
+ (void) pc;
+}
+
+/*
+ * Send a signal to a process. This is meant to be invoked from an
+ * interrupt handler.
+ */
+
+void
+pmc_send_signal(struct pmc *pmc)
+{
+ (void) pmc; /* shutup gcc */
+
+#if 0
+ struct proc *proc;
+ struct thread *td;
+
+ KASSERT(pmc->pm_owner != NULL,
+ ("[pmc,%d] No owner for PMC", __LINE__));
+
+ KASSERT((pmc->pm_owner->po_flags & PMC_FLAG_IS_OWNER) &&
+ (pmc->pm_owner->po_flags & PMC_FLAG_HAS_TS_PMC),
+ ("[pmc,%d] interrupting PMC owner has wrong flags 0x%x",
+ __LINE__, pmc->pm_owner->po_flags));
+
+ proc = pmc->pm_owner->po_owner;
+
+ KASSERT(curthread->td_proc == proc,
+ ("[pmc,%d] interruping the wrong thread (owner %p, "
+ "cur %p)", __LINE__, (void *) proc, curthread->td_proc));
+
+ mtx_lock_spin(&sched_lock);
+ td = TAILQ_FIRST(&proc->p_threads);
+ mtx_unlock_spin(&sched_lock);
+ /* XXX RACE HERE: can 'td' disappear now? */
+ trapsignal(td, SIGPROF, 0);
+ /* XXX rework this to use the regular 'psignal' interface from a
+ helper thread */
+#endif
+
+}
+
+/*
+ * remove an process owning PMCs
+ */
+
+void
+pmc_remove_owner(struct pmc_owner *po)
+{
+ struct pmc_list *pl, *tmp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
+
+ /* Remove descriptor from the owner hash table */
+ LIST_REMOVE(po, po_next);
+
+ /* pass 1: release all owned PMC descriptors */
+ LIST_FOREACH_SAFE(pl, &po->po_pmcs, pl_next, tmp) {
+
+ PMCDBG(OWN,ORM,2, "pl=%p pmc=%p", pl, pl->pl_pmc);
+
+ /* remove the associated PMC descriptor, if present */
+ if (pl->pl_pmc)
+ pmc_release_pmc_descriptor(pl->pl_pmc);
+
+ /* remove the linked list entry */
+ LIST_REMOVE(pl, pl_next);
+ FREE(pl, M_PMC);
+ }
+
+ /* pass 2: delete the pmc_list chain */
+ LIST_FOREACH_SAFE(pl, &po->po_pmcs, pl_next, tmp) {
+ KASSERT(pl->pl_pmc == NULL,
+ ("[pmc,%d] non-null pmc pointer", __LINE__));
+ LIST_REMOVE(pl, pl_next);
+ FREE(pl, M_PMC);
+ }
+
+ KASSERT(LIST_EMPTY(&po->po_pmcs),
+ ("[pmc,%d] PMC list not empty", __LINE__));
+
+
+ /*
+ * If this process owns a log file used for system wide logging,
+ * remove the log file.
+ *
+ * XXX rework needed.
+ */
+
+ if (po->po_flags & PMC_FLAG_OWNS_LOGFILE)
+ pmc_configure_log(po, -1);
+
+}
+
+/*
+ * remove an owner process record if all conditions are met.
+ */
+
+static void
+pmc_maybe_remove_owner(struct pmc_owner *po)
+{
+
+ PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
+
+ /*
+ * Remove owner record if
+ * - this process does not own any PMCs
+ * - this process has not allocated a system-wide sampling buffer
+ */
+
+ if (LIST_EMPTY(&po->po_pmcs) &&
+ ((po->po_flags & PMC_FLAG_OWNS_LOGFILE) == 0)) {
+ pmc_remove_owner(po);
+ FREE(po, M_PMC);
+ }
+}
+
+/*
+ * Add an association between a target process and a PMC.
+ */
+
+static void
+pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
+{
+ int ri;
+ struct pmc_target *pt;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL && pp != NULL,
+ ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
+
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < ((int) md->pmd_npmc - 1),
+ ("[pmc,%d] Illegal reference count %d for process record %p",
+ __LINE__, pp->pp_refcnt, (void *) pp));
+
+ ri = pm->pm_rowindex;
+
+ PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
+ pm, ri, pp);
+
+#if DEBUG
+ LIST_FOREACH(pt, &pm->pm_targets, pt_next)
+ if (pt->pt_process == pp)
+ KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
+ __LINE__, pp, pm));
+#endif
+
+ MALLOC(pt, struct pmc_target *, sizeof(struct pmc_target),
+ M_PMC, M_ZERO|M_WAITOK);
+
+ pt->pt_process = pp;
+
+ LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
+
+ atomic_store_rel_ptr(&pp->pp_pmcs[ri].pp_pmc, pm);
+
+ pp->pp_refcnt++;
+
+}
+
+/*
+ * Removes the association between a target process and a PMC.
+ */
+
+static void
+pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
+{
+ int ri;
+ struct pmc_target *ptgt;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL && pp != NULL,
+ ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
+
+ KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt < (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal ref count %d on process record %p",
+ __LINE__, pp->pp_refcnt, (void *) pp));
+
+ ri = pm->pm_rowindex;
+
+ PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
+ pm, ri, pp);
+
+ KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
+ ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
+ ri, pm, pp->pp_pmcs[ri].pp_pmc));
+
+ pp->pp_pmcs[ri].pp_pmc = NULL;
+ pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
+
+ pp->pp_refcnt--;
+
+ /* Remove the target process from the PMC structure */
+ LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
+ if (ptgt->pt_process == pp)
+ break;
+
+ KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
+ "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
+
+ PMCDBG(PRC,TUL,4, "unlink ptgt=%p", ptgt);
+
+ LIST_REMOVE(ptgt, pt_next);
+ FREE(ptgt, M_PMC);
+}
+
+/*
+ * Remove PMC descriptor 'pmc' from the owner descriptor.
+ */
+
+void
+pmc_unlink_owner(struct pmc *pm)
+{
+ struct pmc_list *pl, *tmp;
+ struct pmc_owner *po;
+
+#if DEBUG
+ KASSERT(LIST_EMPTY(&pm->pm_targets),
+ ("[pmc,%d] unlinking PMC with targets", __LINE__));
+#endif
+
+ po = pm->pm_owner;
+
+ KASSERT(po != NULL, ("[pmc,%d] No owner for PMC", __LINE__));
+
+ LIST_FOREACH_SAFE(pl, &po->po_pmcs, pl_next, tmp) {
+ if (pl->pl_pmc == pm) {
+ pl->pl_pmc = NULL;
+ pm->pm_owner = NULL;
+ return;
+ }
+ }
+
+ KASSERT(0, ("[pmc,%d] couldn't find pmc in owner list", __LINE__));
+}
+
+/*
+ * Check if PMC 'pm' may be attached to target process 't'.
+ */
+
+static int
+pmc_can_attach(struct pmc *pm, struct proc *t)
+{
+ struct proc *o; /* pmc owner */
+ struct ucred *oc, *tc; /* owner, target credentials */
+ int decline_attach, i;
+
+ /*
+ * A PMC's owner can always attach that PMC to itself.
+ */
+
+ if ((o = pm->pm_owner->po_owner) == t)
+ return 0;
+
+ PROC_LOCK(o);
+ oc = o->p_ucred;
+ crhold(oc);
+ PROC_UNLOCK(o);
+
+ PROC_LOCK(t);
+ tc = t->p_ucred;
+ crhold(tc);
+ PROC_UNLOCK(t);
+
+ /*
+ * The effective uid of the PMC owner should match at least one
+ * of the {effective,real,saved} uids of the target process.
+ */
+
+ decline_attach = oc->cr_uid != tc->cr_uid &&
+ oc->cr_uid != tc->cr_svuid &&
+ oc->cr_uid != tc->cr_ruid;
+
+ /*
+ * Every one of the target's group ids, must be in the owner's
+ * group list.
+ */
+ for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
+ decline_attach = !groupmember(tc->cr_groups[i], oc);
+
+ /* check the read and saved gids too */
+ if (decline_attach == 0)
+ decline_attach = !groupmember(tc->cr_rgid, oc) ||
+ !groupmember(tc->cr_svgid, oc);
+
+ crfree(tc);
+ crfree(oc);
+
+ return !decline_attach;
+}
+
+/*
+ * Attach a process to a PMC.
+ */
+
+static int
+pmc_attach_one_process(struct proc *p, struct pmc *pm)
+{
+ int ri;
+ struct pmc_process *pp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
+ pm->pm_rowindex, p, p->p_pid, p->p_comm);
+
+ /*
+ * Locate the process descriptor corresponding to process 'p',
+ * allocating space as needed.
+ *
+ * Verify that rowindex 'pm_rowindex' is free in the process
+ * descriptor.
+ *
+ * If not, allocate space for a descriptor and link the
+ * process descriptor and PMC.
+ */
+
+ ri = pm->pm_rowindex;
+
+ if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
+ return ENOMEM;
+
+ if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
+ return EEXIST;
+
+ if (pp->pp_pmcs[ri].pp_pmc != NULL)
+ return EBUSY;
+
+ pmc_link_target_process(pm, pp);
+
+ /* mark process as using HWPMCs */
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+
+ return 0;
+}
+
+/*
+ * Attach a process and optionally its children
+ */
+
+static int
+pmc_attach_process(struct proc *p, struct pmc *pm)
+{
+ int error;
+ struct proc *top;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
+ pm->pm_rowindex, p, p->p_pid, p->p_comm);
+
+ if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
+ return pmc_attach_one_process(p, pm);
+
+ /*
+ * Traverse all child processes, attaching them to
+ * this PMC.
+ */
+
+ sx_slock(&proctree_lock);
+
+ top = p;
+
+ for (;;) {
+ if ((error = pmc_attach_one_process(p, pm)) != 0)
+ break;
+ if (!LIST_EMPTY(&p->p_children))
+ p = LIST_FIRST(&p->p_children);
+ else for (;;) {
+ if (p == top)
+ goto done;
+ if (LIST_NEXT(p, p_sibling)) {
+ p = LIST_NEXT(p, p_sibling);
+ break;
+ }
+ p = p->p_pptr;
+ }
+ }
+
+ if (error)
+ (void) pmc_detach_process(top, pm);
+
+ done:
+ sx_sunlock(&proctree_lock);
+ return error;
+}
+
+/*
+ * Detach a process from a PMC. If there are no other PMCs tracking
+ * this process, remove the process structure from its hash table. If
+ * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
+ */
+
+static int
+pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
+{
+ int ri;
+ struct pmc_process *pp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL,
+ ("[pmc,%d] null pm pointer", __LINE__));
+
+ PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
+ pm, pm->pm_rowindex, p, p->p_pid, p->p_comm, flags);
+
+ ri = pm->pm_rowindex;
+
+ if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
+ return ESRCH;
+
+ if (pp->pp_pmcs[ri].pp_pmc != pm)
+ return EINVAL;
+
+ pmc_unlink_target_process(pm, pp);
+
+ /*
+ * If there are no PMCs targetting this process, we remove its
+ * descriptor from the target hash table and unset the P_HWPMC
+ * flag in the struct proc.
+ */
+
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal refcnt %d for process struct %p",
+ __LINE__, pp->pp_refcnt, pp));
+
+ if (pp->pp_refcnt != 0) /* still a target of some PMC */
+ return 0;
+
+ pmc_remove_process_descriptor(pp);
+
+ if (flags & PMC_FLAG_REMOVE)
+ FREE(pp, M_PMC);
+
+ PROC_LOCK(p);
+ p->p_flag &= ~P_HWPMC;
+ PROC_UNLOCK(p);
+
+ return 0;
+}
+
+/*
+ * Detach a process and optionally its descendants from a PMC.
+ */
+
+static int
+pmc_detach_process(struct proc *p, struct pmc *pm)
+{
+ struct proc *top;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
+ pm->pm_rowindex, p, p->p_pid, p->p_comm);
+
+ if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
+ return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
+
+ /*
+ * Traverse all children, detaching them from this PMC. We
+ * ignore errors since we could be detaching a PMC from a
+ * partially attached proc tree.
+ */
+
+ sx_slock(&proctree_lock);
+
+ top = p;
+
+ for (;;) {
+ (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
+
+ if (!LIST_EMPTY(&p->p_children))
+ p = LIST_FIRST(&p->p_children);
+ else for (;;) {
+ if (p == top)
+ goto done;
+ if (LIST_NEXT(p, p_sibling)) {
+ p = LIST_NEXT(p, p_sibling);
+ break;
+ }
+ p = p->p_pptr;
+ }
+ }
+
+ done:
+ sx_sunlock(&proctree_lock);
+ return 0;
+}
+
+/*
+ * The 'hook' invoked from the kernel proper
+ */
+
+
+#if DEBUG
+const char *pmc_hooknames[] = {
+ "",
+ "EXIT",
+ "EXEC",
+ "FORK",
+ "CSW-IN",
+ "CSW-OUT"
+};
+#endif
+
+static int
+pmc_hook_handler(struct thread *td, int function, void *arg)
+{
+
+ KASSERT(td->td_proc->p_flag & P_HWPMC,
+ ("[pmc,%d] unregistered thread called pmc_hook()", __LINE__));
+
+ PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
+ pmc_hooknames[function], arg);
+
+ switch (function)
+ {
+
+ /*
+ * Process exit.
+ *
+ * Remove this process from all hash tables. If this process
+ * owned any PMCs, turn off those PMCs and deallocate them,
+ * removing any associations with target processes.
+ *
+ * This function will be called by the last 'thread' of a
+ * process.
+ *
+ */
+
+ case PMC_FN_PROCESS_EXIT: /* release PMCs */
+ {
+ int cpu;
+ unsigned int ri;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ struct pmc_process *pp;
+ struct pmc_owner *po;
+ struct proc *p;
+ pmc_value_t newvalue, tmp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ p = (struct proc *) arg;
+
+ /*
+ * Since this code is invoked by the last thread in an
+ * exiting process, we would have context switched IN
+ * at some prior point. Kernel mode context switches
+ * may happen any time, so we want to disable a context
+ * switch OUT till we get any PMCs targetting this
+ * process off the hardware.
+ *
+ * We also need to atomically remove this process'
+ * entry from our target process hash table, using
+ * PMC_FLAG_REMOVE.
+ */
+
+ PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
+ p->p_comm);
+
+ critical_enter(); /* no preemption */
+
+ cpu = curthread->td_oncpu;
+
+ if ((pp = pmc_find_process_descriptor(p,
+ PMC_FLAG_REMOVE)) != NULL) {
+
+ PMCDBG(PRC,EXT,2,
+ "process-exit proc=%p pmc-process=%p", p, pp);
+
+ /*
+ * This process could the target of some PMCs.
+ * Such PMCs will thus be running on currently
+ * executing CPU at this point in the code
+ * since we've disallowed context switches.
+ * We need to turn these PMCs off like we
+ * would do at context switch OUT time.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ /*
+ * Pick up the pmc pointer from hardware
+ * state similar to the CSW_OUT code.
+ */
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+
+ PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
+
+ if (pm == NULL ||
+ !PMC_IS_VIRTUAL_MODE(pm->pm_mode))
+ continue;
+
+ PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
+ "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
+ pm, pm->pm_state);
+
+ KASSERT(pm->pm_rowindex == ri,
+ ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
+ __LINE__, pm->pm_rowindex, ri));
+
+ KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
+ ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
+ __LINE__, pm, ri,
+ pp->pp_pmcs[ri].pp_pmc));
+
+ (void) md->pmd_stop_pmc(cpu, ri);
+
+ KASSERT(pm->pm_runcount > 0,
+ ("[pmc,%d] bad runcount ri %d rc %d",
+ __LINE__, ri, pm->pm_runcount));
+
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+ md->pmd_read_pmc(cpu, ri, &newvalue);
+ tmp = newvalue -
+ PMC_PCPU_SAVED(cpu,ri);
+
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ pm->pm_gv.pm_savedvalue += tmp;
+ pp->pp_pmcs[ri].pp_pmcval += tmp;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+ }
+
+ KASSERT((int) pm->pm_runcount >= 0,
+ ("[pmc,%d] runcount is %d", __LINE__, ri));
+
+ atomic_subtract_rel_32(&pm->pm_runcount,1);
+ (void) md->pmd_config_pmc(cpu, ri, NULL);
+ }
+ critical_exit(); /* ok to be pre-empted now */
+
+ /*
+ * Unlink this process from the PMCs that are
+ * targetting it. Log value at exit() time if
+ * requested.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
+ if (pm->pm_flags &
+ PMC_F_LOG_TC_PROCEXIT)
+ pmc_log_process_exit(pm, pp);
+ pmc_unlink_target_process(pm, pp);
+ }
+
+ FREE(pp, M_PMC);
+
+ } else
+ critical_exit(); /* pp == NULL */
+
+ /*
+ * If the process owned PMCs, free them up and free up
+ * memory.
+ */
+
+ if ((po = pmc_find_owner_descriptor(p)) != NULL) {
+ pmc_remove_owner(po);
+ FREE(po, M_PMC);
+ }
+
+ }
+ break;
+
+ /*
+ * Process exec()
+ */
+
+ case PMC_FN_PROCESS_EXEC:
+ {
+ int *credentials_changed;
+ unsigned int ri;
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_owner *po;
+ struct pmc_process *pp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ /*
+ * PMCs are not inherited across an exec(): remove any
+ * PMCs that this process is the owner of.
+ */
+
+ p = td->td_proc;
+
+ if ((po = pmc_find_owner_descriptor(p)) != NULL) {
+ pmc_remove_owner(po);
+ FREE(po, M_PMC);
+ }
+
+ /*
+ * If this process is the target of a PMC, check if the new
+ * credentials are compatible with the owner's permissions.
+ */
+
+ if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
+ break;
+
+ credentials_changed = arg;
+
+ PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
+ p, p->p_pid, p->p_comm, *credentials_changed);
+
+ if (*credentials_changed == 0) /* credentials didn't change */
+ break;
+
+ /*
+ * If the newly exec()'ed process has a different credential
+ * than before, allow it to be the target of a PMC only if
+ * the PMC's owner has sufficient priviledge.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
+ if (pmc_can_attach(pm, td->td_proc) != 0)
+ pmc_detach_one_process(td->td_proc,
+ pm, PMC_FLAG_NONE);
+
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
+ pp->pp_refcnt, pp));
+
+ /*
+ * If this process is no longer the target of any
+ * PMCs, we can remove the process entry and free
+ * up space.
+ */
+
+ if (pp->pp_refcnt == 0) {
+ pmc_remove_process_descriptor(pp);
+ FREE(pp, M_PMC);
+ }
+ }
+ break;
+
+ /*
+ * Process fork()
+ */
+
+ case PMC_FN_PROCESS_FORK:
+ {
+ unsigned int ri;
+ uint32_t do_descendants;
+ struct pmc *pm;
+ struct pmc_process *ppnew, *ppold;
+ struct proc *newproc;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ newproc = (struct proc *) arg;
+
+ PMCDBG(PMC,FRK,2, "process-fork p1=%p p2=%p",
+ curthread->td_proc, newproc);
+ /*
+ * If the parent process (curthread->td_proc) is a
+ * target of any PMCs, look for PMCs that are to be
+ * inherited, and link these into the new process
+ * descriptor.
+ */
+
+ if ((ppold = pmc_find_process_descriptor(
+ curthread->td_proc, PMC_FLAG_NONE)) == NULL)
+ break;
+
+ do_descendants = 0;
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
+ do_descendants |=
+ pm->pm_flags & PMC_F_DESCENDANTS;
+ if (do_descendants == 0) /* nothing to do */
+ break;
+
+ if ((ppnew = pmc_find_process_descriptor(newproc,
+ PMC_FLAG_ALLOCATE)) == NULL)
+ return ENOMEM;
+
+ /*
+ * Run through all PMCs targeting the old process and
+ * attach them to the new process.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
+ pm->pm_flags & PMC_F_DESCENDANTS)
+ pmc_link_target_process(pm, ppnew);
+
+ /*
+ * Now mark the new process as being tracked by this
+ * driver.
+ */
+
+ PROC_LOCK(newproc);
+ newproc->p_flag |= P_HWPMC;
+ PROC_UNLOCK(newproc);
+
+ }
+ break;
+
+ /*
+ * Thread context switch IN
+ */
+
+ case PMC_FN_CSW_IN:
+ {
+ int cpu;
+ unsigned int ri;
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ struct pmc_process *pp;
+ pmc_value_t newvalue;
+
+ p = td->td_proc;
+
+ if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
+ break;
+
+ KASSERT(pp->pp_proc == td->td_proc,
+ ("[pmc,%d] not my thread state", __LINE__));
+
+ critical_enter(); /* no preemption on this CPU */
+
+ cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
+
+ PMCDBG(CTX,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
+ p->p_pid, p->p_comm, pp);
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
+
+ pc = pmc_pcpu[cpu];
+
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
+ continue;
+
+ KASSERT(PMC_IS_VIRTUAL_MODE(pm->pm_mode),
+ ("[pmc,%d] Target PMC in non-virtual mode (%d)",
+ __LINE__, pm->pm_mode));
+
+ KASSERT(pm->pm_rowindex == ri,
+ ("[pmc,%d] Row index mismatch pmc %d != ri %d",
+ __LINE__, pm->pm_rowindex, ri));
+
+ /*
+ * Only PMCs that are marked as 'RUNNING' need
+ * be placed on hardware.
+ */
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ /* increment PMC runcount */
+ atomic_add_rel_32(&pm->pm_runcount, 1);
+
+ /* configure the HWPMC we are going to use. */
+ md->pmd_config_pmc(cpu, ri, pm);
+
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(phw != NULL,
+ ("[pmc,%d] null hw pointer", __LINE__));
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
+ phw->phw_pmc, pm));
+
+ /* write out saved value and start the PMC */
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ newvalue = PMC_PCPU_SAVED(cpu, ri) =
+ pm->pm_gv.pm_savedvalue;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ md->pmd_write_pmc(cpu, ri, newvalue);
+ md->pmd_start_pmc(cpu, ri);
+
+ }
+
+ /*
+ * perform any other architecture/cpu dependent thread
+ * switch-in actions.
+ */
+
+ (void) (*md->pmd_switch_in)(pc);
+
+ critical_exit();
+
+ }
+ break;
+
+ /*
+ * Thread context switch OUT.
+ */
+
+ case PMC_FN_CSW_OUT:
+ {
+ int cpu;
+ unsigned int ri;
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ struct pmc_process *pp;
+ pmc_value_t newvalue, tmp;
+
+ /*
+ * Locate our process descriptor; this may be NULL if
+ * this process is exiting and we have already removed
+ * the process from the target process table.
+ *
+ * Note that due to kernel preemption, multiple
+ * context switches may happen while the process is
+ * exiting.
+ *
+ * Note also that if the target process cannot be
+ * found we still need to deconfigure any PMCs that
+ * are currently running on hardware.
+ */
+
+ p = td->td_proc;
+ pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
+
+ /*
+ * save PMCs
+ */
+
+ critical_enter();
+
+ cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
+
+ PMCDBG(CTX,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
+ p->p_pid, p->p_comm, pp);
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
+
+ pc = pmc_pcpu[cpu];
+
+ /*
+ * When a PMC gets unlinked from a target PMC, it will
+ * be removed from the target's pp_pmc[] array.
+ *
+ * However, on a MP system, the target could have been
+ * executing on another CPU at the time of the unlink.
+ * So, at context switch OUT time, we need to look at
+ * the hardware to determine if a PMC is scheduled on
+ * it.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ phw = pc->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+
+ if (pm == NULL) /* nothing at this row index */
+ continue;
+
+ if (!PMC_IS_VIRTUAL_MODE(pm->pm_mode))
+ continue; /* not a process virtual PMC */
+
+ KASSERT(pm->pm_rowindex == ri,
+ ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
+ __LINE__, pm->pm_rowindex, ri));
+
+ /* Stop hardware */
+ md->pmd_stop_pmc(cpu, ri);
+
+ /* reduce this PMC's runcount */
+ atomic_subtract_rel_32(&pm->pm_runcount, 1);
+
+ /*
+ * If this PMC is associated with this process,
+ * save the reading.
+ */
+
+ if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
+
+ KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
+ ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
+ __LINE__, pm, ri,
+ pp->pp_pmcs[ri].pp_pmc));
+
+ KASSERT(pp->pp_refcnt > 0,
+ ("[pmc,%d] pp refcnt = %d", __LINE__,
+ pp->pp_refcnt));
+
+ md->pmd_read_pmc(cpu, ri, &newvalue);
+
+ tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
+
+ KASSERT((int64_t) tmp >= 0,
+ ("[pmc,%d] negative increment cpu=%d "
+ "ri=%d newvalue=%jx saved=%jx "
+ "incr=%jx", __LINE__, cpu, ri,
+ newvalue, PMC_PCPU_SAVED(cpu,ri),
+ tmp));
+
+ /*
+ * Increment the PMC's count and this
+ * target process's count by the difference
+ * between the current reading and the
+ * saved value at context switch in time.
+ */
+
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+
+ pm->pm_gv.pm_savedvalue += tmp;
+ pp->pp_pmcs[ri].pp_pmcval += tmp;
+
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ }
+
+ /* mark hardware as free */
+ md->pmd_config_pmc(cpu, ri, NULL);
+ }
+
+ /*
+ * perform any other architecture/cpu dependent thread
+ * switch out functions.
+ */
+
+ (void) (*md->pmd_switch_out)(pc);
+
+ critical_exit();
+
+ }
+ break;
+
+ default:
+#if DEBUG
+ KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
+#endif
+ break;
+
+ }
+
+ return 0;
+}
+
+/*
+ * allocate a 'struct pmc_owner' descriptor in the owner hash table.
+ */
+
+static struct pmc_owner *
+pmc_allocate_owner_descriptor(struct proc *p)
+{
+ uint32_t hindex;
+ struct pmc_owner *po;
+ struct pmc_ownerhash *poh;
+
+ hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
+ poh = &pmc_ownerhash[hindex];
+
+ /* allocate space for N pointers and one descriptor struct */
+ MALLOC(po, struct pmc_owner *, sizeof(struct pmc_owner),
+ M_PMC, M_WAITOK);
+
+ po->po_flags = 0;
+ po->po_owner = p;
+ LIST_INIT(&po->po_pmcs);
+ LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
+
+ PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
+ p, p->p_pid, p->p_comm, po);
+
+ return po;
+}
+
+/*
+ * find the descriptor corresponding to process 'p', adding or removing it
+ * as specified by 'mode'.
+ */
+
+static struct pmc_process *
+pmc_find_process_descriptor(struct proc *p, uint32_t mode)
+{
+ uint32_t hindex;
+ struct pmc_process *pp, *ppnew;
+ struct pmc_processhash *pph;
+
+ hindex = PMC_HASH_PTR(p, pmc_processhashmask);
+ pph = &pmc_processhash[hindex];
+
+ ppnew = NULL;
+
+ /*
+ * Pre-allocate memory in the FIND_ALLOCATE case since we
+ * cannot call malloc(9) once we hold a spin lock.
+ */
+
+ if (mode & PMC_FLAG_ALLOCATE) {
+ /* allocate additional space for 'n' pmc pointers */
+ MALLOC(ppnew, struct pmc_process *,
+ sizeof(struct pmc_process) + md->pmd_npmc *
+ sizeof(struct pmc_targetstate), M_PMC, M_ZERO|M_WAITOK);
+ }
+
+ mtx_lock_spin(&pmc_processhash_mtx);
+ LIST_FOREACH(pp, pph, pp_next)
+ if (pp->pp_proc == p)
+ break;
+
+ if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
+ LIST_REMOVE(pp, pp_next);
+
+ if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
+ ppnew != NULL) {
+ ppnew->pp_proc = p;
+ LIST_INSERT_HEAD(pph, ppnew, pp_next);
+ pp = ppnew;
+ ppnew = NULL;
+ }
+ mtx_unlock_spin(&pmc_processhash_mtx);
+
+ if (pp != NULL && ppnew != NULL)
+ FREE(ppnew, M_PMC);
+
+ return pp;
+}
+
+/*
+ * remove a process descriptor from the process hash table.
+ */
+
+static void
+pmc_remove_process_descriptor(struct pmc_process *pp)
+{
+ KASSERT(pp->pp_refcnt == 0,
+ ("[pmc,%d] Removing process descriptor %p with count %d",
+ __LINE__, pp, pp->pp_refcnt));
+
+ mtx_lock_spin(&pmc_processhash_mtx);
+ LIST_REMOVE(pp, pp_next);
+ mtx_unlock_spin(&pmc_processhash_mtx);
+}
+
+
+/*
+ * find an owner descriptor corresponding to proc 'p'
+ */
+
+static struct pmc_owner *
+pmc_find_owner_descriptor(struct proc *p)
+{
+ uint32_t hindex;
+ struct pmc_owner *po;
+ struct pmc_ownerhash *poh;
+
+ hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
+ poh = &pmc_ownerhash[hindex];
+
+ po = NULL;
+ LIST_FOREACH(po, poh, po_next)
+ if (po->po_owner == p)
+ break;
+
+ PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
+ "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
+
+ return po;
+}
+
+/*
+ * pmc_allocate_pmc_descriptor
+ *
+ * Allocate a pmc descriptor and initialize its
+ * fields.
+ */
+
+static struct pmc *
+pmc_allocate_pmc_descriptor(void)
+{
+ struct pmc *pmc;
+
+ MALLOC(pmc, struct pmc *, sizeof(struct pmc), M_PMC, M_ZERO|M_WAITOK);
+
+ if (pmc != NULL) {
+ pmc->pm_owner = NULL;
+ LIST_INIT(&pmc->pm_targets);
+ }
+
+ PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
+
+ return pmc;
+}
+
+/*
+ * Destroy a pmc descriptor.
+ */
+
+static void
+pmc_destroy_pmc_descriptor(struct pmc *pm)
+{
+ (void) pm;
+
+#if DEBUG
+ KASSERT(pm->pm_state == PMC_STATE_DELETED ||
+ pm->pm_state == PMC_STATE_FREE,
+ ("[pmc,%d] destroying non-deleted PMC", __LINE__));
+ KASSERT(LIST_EMPTY(&pm->pm_targets),
+ ("[pmc,%d] destroying pmc with targets", __LINE__));
+ KASSERT(pm->pm_owner == NULL,
+ ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
+ KASSERT(pm->pm_runcount == 0,
+ ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
+ pm->pm_runcount));
+#endif
+}
+
+/*
+ * This function does the following things:
+ *
+ * - detaches the PMC from hardware
+ * - unlinks all target threads that were attached to it
+ * - removes the PMC from its owner's list
+ * - destroy's the PMC private mutex
+ *
+ * Once this function completes, the given pmc pointer can be safely
+ * FREE'd by the caller.
+ */
+
+static void
+pmc_release_pmc_descriptor(struct pmc *pm)
+{
+#if DEBUG
+ volatile int maxloop;
+#endif
+ u_int ri, cpu;
+ u_char curpri;
+ struct pmc_hw *phw;
+ struct pmc_process *pp;
+ struct pmc_target *ptgt, *tmp;
+ struct pmc_binding pb;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
+
+ ri = pm->pm_rowindex;
+
+ PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
+ pm->pm_mode);
+
+ /*
+ * First, we take the PMC off hardware.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+
+ /*
+ * A system mode PMC runs on a specific CPU. Switch
+ * to this CPU and turn hardware off.
+ */
+
+ pmc_save_cpu_binding(&pb);
+
+ cpu = pm->pm_gv.pm_cpu;
+
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+
+ pmc_select_cpu(cpu);
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
+ __LINE__, ri, phw->phw_pmc, pm));
+
+ PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
+
+ critical_enter();
+ md->pmd_stop_pmc(cpu, ri);
+ critical_exit();
+ }
+
+ PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
+
+ critical_enter();
+ md->pmd_config_pmc(cpu, ri, NULL);
+ critical_exit();
+
+ pm->pm_state = PMC_STATE_DELETED;
+
+ pmc_restore_cpu_binding(&pb);
+
+ } else if (PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+
+ /*
+ * A virtual PMC could be running on multiple CPUs at
+ * a given instant.
+ *
+ * By marking its state as DELETED, we ensure that
+ * this PMC is never further scheduled on hardware.
+ *
+ * Then we wait till all CPUs are done with this PMC.
+ */
+
+ pm->pm_state = PMC_STATE_DELETED;
+
+
+ /*
+ * Wait for the PMCs runcount to come to zero.
+ */
+
+#if DEBUG
+ maxloop = 100 * mp_ncpus;
+#endif
+
+ while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
+
+#if DEBUG
+ maxloop--;
+ KASSERT(maxloop > 0,
+ ("[pmc,%d] (ri%d, rc%d) waiting too long for "
+ "pmc to be free", __LINE__, pm->pm_rowindex,
+ pm->pm_runcount));
+#endif
+
+ mtx_lock_spin(&sched_lock);
+ curpri = curthread->td_priority;
+ mtx_unlock_spin(&sched_lock);
+
+ (void) tsleep((void *) pmc_release_pmc_descriptor,
+ curpri, "pmcrel", 1);
+
+ }
+
+ /*
+ * At this point the PMC is off all CPUs and cannot be
+ * freshly scheduled onto a CPU. It is now safe to
+ * unlink all targets from this PMC. If a
+ * process-record's refcount falls to zero, we remove
+ * it from the hash table. The module-wide SX lock
+ * protects us from races.
+ */
+
+ LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
+ pp = ptgt->pt_process;
+ pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
+
+ PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
+
+ /*
+ * If the target process record shows that no
+ * PMCs are attached to it, reclaim its space.
+ */
+
+ if (pp->pp_refcnt == 0) {
+ pmc_remove_process_descriptor(pp);
+ FREE(pp, M_PMC);
+ }
+ }
+
+ cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
+
+ }
+
+ /*
+ * Release any MD resources
+ */
+
+ (void) md->pmd_release_pmc(cpu, ri, pm);
+
+ /*
+ * Update row disposition
+ */
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode))
+ PMC_UNMARK_ROW_STANDALONE(ri);
+ else
+ PMC_UNMARK_ROW_THREAD(ri);
+
+ /* unlink from the owner's list */
+ if (pm->pm_owner)
+ pmc_unlink_owner(pm);
+
+ pmc_destroy_pmc_descriptor(pm);
+}
+
+/*
+ * Register an owner and a pmc.
+ */
+
+static int
+pmc_register_owner(struct proc *p, struct pmc *pmc)
+{
+ struct pmc_list *pl;
+ struct pmc_owner *po;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ MALLOC(pl, struct pmc_list *, sizeof(struct pmc_list), M_PMC,
+ M_WAITOK);
+
+ if (pl == NULL)
+ return ENOMEM;
+
+ if ((po = pmc_find_owner_descriptor(p)) == NULL) {
+ if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
+ FREE(pl, M_PMC);
+ return ENOMEM;
+ }
+ po->po_flags |= PMC_FLAG_IS_OWNER; /* real owner */
+ }
+
+ if (pmc->pm_mode == PMC_MODE_TS) {
+ /* can have only one TS mode PMC per process */
+ if (po->po_flags & PMC_FLAG_HAS_TS_PMC) {
+ FREE(pl, M_PMC);
+ return EINVAL;
+ }
+ po->po_flags |= PMC_FLAG_HAS_TS_PMC;
+ }
+
+ KASSERT(pmc->pm_owner == NULL,
+ ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
+ pmc->pm_owner = po;
+
+ pl->pl_pmc = pmc;
+
+ LIST_INSERT_HEAD(&po->po_pmcs, pl, pl_next);
+
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+
+ PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pl=%p pmc=%p",
+ po, pl, pmc);
+
+ return 0;
+}
+
+/*
+ * Return the current row disposition:
+ * == 0 => FREE
+ * > 0 => PROCESS MODE
+ * < 0 => SYSTEM MODE
+ */
+
+int
+pmc_getrowdisp(int ri)
+{
+ return pmc_pmcdisp[ri];
+}
+
+/*
+ * Check if a PMC at row index 'ri' can be allocated to the current
+ * process.
+ *
+ * Allocation can fail if:
+ * - the current process is already being profiled by a PMC at index 'ri',
+ * attached to it via OP_PMCATTACH.
+ * - the current process has already allocated a PMC at index 'ri'
+ * via OP_ALLOCATE.
+ */
+
+static int
+pmc_can_allocate_rowindex(struct proc *p, unsigned int ri)
+{
+ struct pmc_list *pl;
+ struct pmc_owner *po;
+ struct pmc_process *pp;
+
+ PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d",
+ p, p->p_pid, p->p_comm, ri);
+
+ /* we shouldn't have allocated a PMC at row index 'ri' */
+ if ((po = pmc_find_owner_descriptor(p)) != NULL)
+ LIST_FOREACH(pl, &po->po_pmcs, pl_next)
+ if (pl->pl_pmc->pm_rowindex == ri)
+ return EEXIST;
+
+ /* we shouldn't be the target of any PMC ourselves at this index */
+ if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
+ if (pp->pp_pmcs[ri].pp_pmc)
+ return EEXIST;
+
+ PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
+ p, p->p_pid, p->p_comm, ri);
+
+ return 0;
+}
+
+/*
+ * Check if a given PMC at row index 'ri' can be currently used in
+ * mode 'mode'.
+ */
+
+static int
+pmc_can_allocate_row(int ri, enum pmc_mode mode)
+{
+ enum pmc_disp disp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ disp = PMC_DISP_STANDALONE;
+ else
+ disp = PMC_DISP_THREAD;
+
+ /*
+ * check disposition for PMC row 'ri':
+ *
+ * Expected disposition Row-disposition Result
+ *
+ * STANDALONE STANDALONE or FREE proceed
+ * STANDALONE THREAD fail
+ * THREAD THREAD or FREE proceed
+ * THREAD STANDALONE fail
+ */
+
+ if (!PMC_ROW_DISP_IS_FREE(ri) &&
+ !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
+ !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
+ return EBUSY;
+
+ /*
+ * All OK
+ */
+
+ PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
+
+ return 0;
+
+}
+
+/*
+ * Find a PMC descriptor with user handle 'pmc' for thread 'td'.
+ */
+
+static struct pmc *
+pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
+{
+ struct pmc_list *pl;
+
+ KASSERT(pmcid < md->pmd_npmc,
+ ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__, pmcid,
+ md->pmd_npmc));
+
+ LIST_FOREACH(pl, &po->po_pmcs, pl_next)
+ if (pl->pl_pmc->pm_rowindex == pmcid)
+ return pl->pl_pmc;
+
+ return NULL;
+}
+
+static int
+pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
+{
+
+ struct pmc *pm;
+ struct pmc_owner *po;
+
+ PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
+
+ if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
+ return ESRCH;
+
+ if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
+ return EINVAL;
+
+ PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
+
+ *pmc = pm;
+ return 0;
+}
+
+/*
+ * Start a PMC.
+ */
+
+static int
+pmc_start(struct pmc *pm)
+{
+ int error, cpu, ri;
+ struct pmc_binding pb;
+
+ KASSERT(pm != NULL,
+ ("[pmc,%d] null pm", __LINE__));
+
+ PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, pm->pm_mode,
+ pm->pm_rowindex);
+
+ pm->pm_state = PMC_STATE_RUNNING;
+
+ if (PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+
+ /*
+ * If a PMCATTACH hadn't been done on this
+ * PMC, attach this PMC to its owner process.
+ */
+
+ if (LIST_EMPTY(&pm->pm_targets))
+ return pmc_attach_process(pm->pm_owner->po_owner, pm);
+
+
+ /*
+ * Nothing further to be done; thread context switch code
+ * will start/stop the PMC as appropriate.
+ */
+
+ return 0;
+
+ }
+
+ /*
+ * A system-mode PMC. Move to the CPU associated with this
+ * PMC, and start the hardware.
+ */
+
+ pmc_save_cpu_binding(&pb);
+
+ cpu = pm->pm_gv.pm_cpu;
+
+ if (pmc_cpu_is_disabled(cpu))
+ return ENXIO;
+
+ ri = pm->pm_rowindex;
+
+ pmc_select_cpu(cpu);
+
+ /*
+ * global PMCs are configured at allocation time
+ * so write out the initial value and start the PMC.
+ */
+
+ if ((error = md->pmd_write_pmc(cpu, ri,
+ PMC_IS_SAMPLING_MODE(pm->pm_mode) ?
+ pm->pm_sc.pm_reloadcount :
+ pm->pm_sc.pm_initial)) == 0)
+ error = md->pmd_start_pmc(cpu, ri);
+
+ pmc_restore_cpu_binding(&pb);
+
+ return error;
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+pmc_stop(struct pmc *pm)
+{
+ int error, cpu;
+ struct pmc_binding pb;
+
+ KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
+
+ PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm, pm->pm_mode,
+ pm->pm_rowindex);
+
+ pm->pm_state = PMC_STATE_STOPPED;
+
+ /*
+ * If the PMC is a virtual mode one, changing the state to
+ * non-RUNNING is enough to ensure that the PMC never gets
+ * scheduled.
+ *
+ * If this PMC is current running on a CPU, then it will
+ * handled correctly at the time its target process is context
+ * switched out.
+ */
+
+ if (PMC_IS_VIRTUAL_MODE(pm->pm_mode))
+ return 0;
+
+ /*
+ * A system-mode PMC. Move to the CPU associated with
+ * this PMC, and stop the hardware. We update the
+ * 'initial count' so that a subsequent PMCSTART will
+ * resume counting from the current hardware count.
+ */
+
+ pmc_save_cpu_binding(&pb);
+
+ cpu = pm->pm_gv.pm_cpu;
+
+ if (pmc_cpu_is_disabled(cpu))
+ return ENXIO;
+
+ pmc_select_cpu(cpu);
+
+ if ((error = md->pmd_stop_pmc(cpu, pm->pm_rowindex)) == 0)
+ error = md->pmd_read_pmc(cpu, pm->pm_rowindex,
+ &pm->pm_sc.pm_initial);
+
+ pmc_restore_cpu_binding(&pb);
+
+ return error;
+}
+
+
+#if DEBUG
+static const char *pmc_op_to_name[] = {
+#undef __PMC_OP
+#define __PMC_OP(N, D) #N ,
+ __PMC_OPS()
+ NULL
+};
+#endif
+
+/*
+ * The syscall interface
+ */
+
+#define PMC_GET_SX_XLOCK(...) do { \
+ sx_xlock(&pmc_sx); \
+ if (pmc_hook == NULL) { \
+ sx_xunlock(&pmc_sx); \
+ return __VA_ARGS__; \
+ } \
+} while (0)
+
+#define PMC_DOWNGRADE_SX() do { \
+ sx_downgrade(&pmc_sx); \
+ is_sx_downgraded = 1; \
+} while (0)
+
+static int
+pmc_syscall_handler(struct thread *td, void *syscall_args)
+{
+ int error, is_sx_downgraded, op;
+ struct pmc_syscall_args *c;
+ void *arg;
+
+ PMC_GET_SX_XLOCK(ENOSYS);
+
+ is_sx_downgraded = 0;
+
+ c = (struct pmc_syscall_args *) syscall_args;
+
+ op = c->pmop_code;
+ arg = c->pmop_data;
+
+ PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
+ pmc_op_to_name[op], arg);
+
+ error = 0;
+ atomic_add_int(&pmc_stats.pm_syscalls, 1);
+
+ switch(op)
+ {
+
+
+ /*
+ * Configure a log file.
+ *
+ * XXX This OP will be reworked.
+ */
+
+ case PMC_OP_CONFIGURELOG:
+ {
+ struct pmc_owner *po;
+ struct pmc_op_configurelog cl;
+ struct proc *p;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
+ break;
+
+ /* mark this process as owning a log file */
+ p = td->td_proc;
+ if ((po = pmc_find_owner_descriptor(p)) == NULL)
+ if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
+ return ENOMEM;
+
+ if ((error = pmc_configure_log(po, cl.pm_logfd)) != 0)
+ break;
+
+ }
+ break;
+
+
+ /*
+ * Retrieve hardware configuration.
+ */
+
+ case PMC_OP_GETCPUINFO: /* CPU information */
+ {
+ struct pmc_op_getcpuinfo gci;
+
+ gci.pm_cputype = md->pmd_cputype;
+ gci.pm_npmc = md->pmd_npmc;
+ gci.pm_nclass = md->pmd_nclass;
+ bcopy(md->pmd_classes, &gci.pm_classes,
+ sizeof(gci.pm_classes));
+ gci.pm_ncpu = mp_ncpus;
+ error = copyout(&gci, arg, sizeof(gci));
+ }
+ break;
+
+
+ /*
+ * Get module statistics
+ */
+
+ case PMC_OP_GETDRIVERSTATS:
+ {
+ struct pmc_op_getdriverstats gms;
+
+ bcopy(&pmc_stats, &gms, sizeof(gms));
+ error = copyout(&gms, arg, sizeof(gms));
+ }
+ break;
+
+
+ /*
+ * Retrieve module version number
+ */
+
+ case PMC_OP_GETMODULEVERSION:
+ {
+ error = copyout(&_pmc_version.mv_version, arg, sizeof(int));
+ }
+ break;
+
+
+ /*
+ * Retrieve the state of all the PMCs on a given
+ * CPU.
+ */
+
+ case PMC_OP_GETPMCINFO:
+ {
+ uint32_t cpu, n, npmc;
+ size_t pmcinfo_size;
+ struct pmc *pm;
+ struct pmc_info *p, *pmcinfo;
+ struct pmc_op_getpmcinfo *gpi;
+ struct pmc_owner *po;
+ struct pmc_binding pb;
+
+ PMC_DOWNGRADE_SX();
+
+ gpi = (struct pmc_op_getpmcinfo *) arg;
+
+ if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
+ break;
+
+ if (cpu >= (unsigned int) mp_ncpus) {
+ error = EINVAL;
+ break;
+ }
+
+ if (pmc_cpu_is_disabled(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /* switch to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ npmc = md->pmd_npmc;
+
+ pmcinfo_size = npmc * sizeof(struct pmc_info);
+ MALLOC(pmcinfo, struct pmc_info *, pmcinfo_size, M_PMC,
+ M_WAITOK);
+
+ p = pmcinfo;
+
+ for (n = 0; n < md->pmd_npmc; n++, p++) {
+
+ if ((error = md->pmd_describe(cpu, n, p, &pm)) != 0)
+ break;
+
+ if (PMC_ROW_DISP_IS_STANDALONE(n))
+ p->pm_rowdisp = PMC_DISP_STANDALONE;
+ else if (PMC_ROW_DISP_IS_THREAD(n))
+ p->pm_rowdisp = PMC_DISP_THREAD;
+ else
+ p->pm_rowdisp = PMC_DISP_FREE;
+
+ p->pm_ownerpid = -1;
+
+ if (pm == NULL) /* no PMC associated */
+ continue;
+
+ po = pm->pm_owner;
+
+ KASSERT(po->po_owner != NULL,
+ ("[pmc,%d] pmc_owner had a null proc pointer",
+ __LINE__));
+
+ p->pm_ownerpid = po->po_owner->p_pid;
+ p->pm_mode = pm->pm_mode;
+ p->pm_event = pm->pm_event;
+ p->pm_flags = pm->pm_flags;
+
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ p->pm_reloadcount =
+ pm->pm_sc.pm_reloadcount;
+ }
+
+ pmc_restore_cpu_binding(&pb);
+
+ /* now copy out the PMC info collected */
+ if (error == 0)
+ error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
+
+ FREE(pmcinfo, M_PMC);
+ }
+ break;
+
+
+ /*
+ * Set the administrative state of a PMC. I.e. whether
+ * the PMC is to be used or not.
+ */
+
+ case PMC_OP_PMCADMIN:
+ {
+ int cpu, ri;
+ enum pmc_state request;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ struct pmc_op_pmcadmin pma;
+ struct pmc_binding pb;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(td == curthread,
+ ("[pmc,%d] td != curthread", __LINE__));
+
+ if (suser(td) || jailed(td->td_ucred)) {
+ error = EPERM;
+ break;
+ }
+
+ if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
+ break;
+
+ cpu = pma.pm_cpu;
+
+ if (cpu < 0 || cpu >= mp_ncpus) {
+ error = EINVAL;
+ break;
+ }
+
+ if (pmc_cpu_is_disabled(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ request = pma.pm_state;
+
+ if (request != PMC_STATE_DISABLED &&
+ request != PMC_STATE_FREE) {
+ error = EINVAL;
+ break;
+ }
+
+ ri = pma.pm_pmc; /* pmc id == row index */
+ if (ri < 0 || ri >= (int) md->pmd_npmc) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * We can't disable a PMC with a row-index allocated
+ * for process virtual PMCs.
+ */
+
+ if (PMC_ROW_DISP_IS_THREAD(ri) &&
+ request == PMC_STATE_DISABLED) {
+ error = EBUSY;
+ break;
+ }
+
+ /*
+ * otherwise, this PMC on this CPU is either free or
+ * in system-wide mode.
+ */
+
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ pc = pmc_pcpu[cpu];
+ phw = pc->pc_hwpmcs[ri];
+
+ /*
+ * XXX do we need some kind of 'forced' disable?
+ */
+
+ if (phw->phw_pmc == NULL) {
+ if (request == PMC_STATE_DISABLED &&
+ (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
+ phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
+ PMC_MARK_ROW_STANDALONE(ri);
+ } else if (request == PMC_STATE_FREE &&
+ (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
+ phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
+ PMC_UNMARK_ROW_STANDALONE(ri);
+ }
+ /* other cases are a no-op */
+ } else
+ error = EBUSY;
+
+ pmc_restore_cpu_binding(&pb);
+ }
+ break;
+
+
+ /*
+ * Allocate a PMC.
+ */
+
+ case PMC_OP_PMCALLOCATE:
+ {
+ uint32_t caps;
+ u_int cpu;
+ int n;
+ enum pmc_mode mode;
+ struct pmc *pmc;
+ struct pmc_op_pmcallocate pa;
+ struct pmc_binding pb;
+
+ if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
+ break;
+
+ caps = pa.pm_caps;
+ mode = pa.pm_mode;
+ cpu = pa.pm_cpu;
+
+ if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
+ mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
+ (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) mp_ncpus)) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * Virtual PMCs should only ask for a default CPU.
+ * System mode PMCs need to specify a non-default CPU.
+ */
+
+ if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
+ (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * Check that a disabled CPU is not being asked for.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode) && pmc_cpu_is_disabled(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /*
+ * Refuse an allocation for a system-wide PMC if this
+ * process has been jailed, or if this process lacks
+ * super-user credentials and the sysctl tunable
+ * 'security.bsd.unprivileged_syspmcs' is zero.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode)) {
+ if (jailed(curthread->td_ucred))
+ error = EPERM;
+ else if (suser(curthread) &&
+ (pmc_unprivileged_syspmcs == 0))
+ error = EPERM;
+ }
+
+ if (error)
+ break;
+
+ /*
+ * Look for valid values for 'pm_flags'
+ */
+
+ if ((pa.pm_flags & ~(PMC_F_DESCENDANTS|PMC_F_LOG_TC_CSW))
+ != 0) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * All sampling mode PMCs need to be able to interrupt the
+ * CPU.
+ */
+
+ if (PMC_IS_SAMPLING_MODE(mode)) {
+ caps |= PMC_CAP_INTERRUPT;
+ error = ENOSYS; /* for snapshot 6 */
+ break;
+ }
+
+ PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
+ pa.pm_ev, caps, mode, cpu);
+
+ pmc = pmc_allocate_pmc_descriptor();
+ pmc->pm_event = pa.pm_ev;
+ pmc->pm_class = pa.pm_class;
+ pmc->pm_state = PMC_STATE_FREE;
+ pmc->pm_mode = mode;
+ pmc->pm_caps = caps;
+ pmc->pm_flags = pa.pm_flags;
+
+ /* switch thread to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+
+#define PMC_IS_SHAREABLE_PMC(cpu, n) \
+ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
+ PMC_PHW_FLAG_IS_SHAREABLE)
+#define PMC_IS_UNALLOCATED(cpu, n) \
+ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
+
+ if (PMC_IS_SYSTEM_MODE(mode)) {
+ pmc_select_cpu(cpu);
+ for (n = 0; n < (int) md->pmd_npmc; n++)
+ if (pmc_can_allocate_row(n, mode) == 0 &&
+ pmc_can_allocate_rowindex(
+ curthread->td_proc, n) == 0 &&
+ (PMC_IS_UNALLOCATED(cpu, n) ||
+ PMC_IS_SHAREABLE_PMC(cpu, n)) &&
+ md->pmd_allocate_pmc(cpu, n, pmc,
+ &pa) == 0)
+ break;
+ } else {
+ /* Process virtual mode */
+ for (n = 0; n < (int) md->pmd_npmc; n++) {
+ if (pmc_can_allocate_row(n, mode) == 0 &&
+ pmc_can_allocate_rowindex(
+ curthread->td_proc, n) == 0 &&
+ md->pmd_allocate_pmc(curthread->td_oncpu,
+ n, pmc, &pa) == 0)
+ break;
+ }
+ }
+
+#undef PMC_IS_UNALLOCATED
+#undef PMC_IS_SHAREABLE_PMC
+
+ pmc_restore_cpu_binding(&pb);
+
+ if (n == (int) md->pmd_npmc) {
+ pmc_destroy_pmc_descriptor(pmc);
+ FREE(pmc, M_PMC);
+ pmc = NULL;
+ error = EINVAL;
+ break;
+ }
+
+ PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d -> n=%d",
+ pmc->pm_event, pmc->pm_class, pmc->pm_mode, n);
+
+ /*
+ * Configure global pmc's immediately
+ */
+
+ if (PMC_IS_SYSTEM_MODE(pmc->pm_mode))
+ if ((error = md->pmd_config_pmc(cpu, n, pmc)) != 0) {
+ (void) md->pmd_release_pmc(cpu, n, pmc);
+ pmc_destroy_pmc_descriptor(pmc);
+ FREE(pmc, M_PMC);
+ pmc = NULL;
+ break;
+ }
+
+ /*
+ * Mark the row index allocated.
+ */
+
+ pmc->pm_rowindex = n;
+ pmc->pm_state = PMC_STATE_ALLOCATED;
+
+ /*
+ * mark row disposition
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ PMC_MARK_ROW_STANDALONE(n);
+ else
+ PMC_MARK_ROW_THREAD(n);
+
+ /*
+ * If this is a system-wide CPU, mark the CPU it
+ * was allocated on.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ pmc->pm_gv.pm_cpu = cpu;
+
+ /*
+ * Register this PMC with the current thread as its owner.
+ */
+
+ if ((error =
+ pmc_register_owner(curthread->td_proc, pmc)) != 0) {
+ pmc_release_pmc_descriptor(pmc);
+ FREE(pmc, M_PMC);
+ pmc = NULL;
+ break;
+ }
+
+ /*
+ * Return the allocated index.
+ */
+
+ pa.pm_pmcid = n;
+
+ error = copyout(&pa, arg, sizeof(pa));
+ }
+ break;
+
+
+ /*
+ * Attach a PMC to a process.
+ */
+
+ case PMC_OP_PMCATTACH:
+ {
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_op_pmcattach a;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &a, sizeof(a))) != 0)
+ break;
+
+ if (a.pm_pid < 0) {
+ error = EINVAL;
+ break;
+ } else if (a.pm_pid == 0)
+ a.pm_pid = td->td_proc->p_pid;
+
+ if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
+ break;
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ error = EINVAL;
+ break;
+ }
+
+ /* PMCs may be (re)attached only when allocated or stopped */
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+ error = EBUSY;
+ break;
+ } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
+ pm->pm_state != PMC_STATE_STOPPED) {
+ error = EINVAL;
+ break;
+ }
+
+ /* lookup pid */
+ if ((p = pfind(a.pm_pid)) == NULL) {
+ error = ESRCH;
+ break;
+ }
+
+ /*
+ * Ignore processes that are working on exiting.
+ */
+ if (p->p_flag & P_WEXIT) {
+ error = ESRCH;
+ PROC_UNLOCK(p); /* pfind() returns a locked process */
+ break;
+ }
+
+ /*
+ * we are allowed to attach a PMC to a process if
+ * we can debug it.
+ */
+ error = p_candebug(curthread, p);
+
+ PROC_UNLOCK(p);
+
+ if (error == 0)
+ error = pmc_attach_process(p, pm);
+ }
+ break;
+
+
+ /*
+ * Detach an attached PMC from a process.
+ */
+
+ case PMC_OP_PMCDETACH:
+ {
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_op_pmcattach a;
+
+ if ((error = copyin(arg, &a, sizeof(a))) != 0)
+ break;
+
+ if (a.pm_pid < 0) {
+ error = EINVAL;
+ break;
+ } else if (a.pm_pid == 0)
+ a.pm_pid = td->td_proc->p_pid;
+
+ if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
+ break;
+
+ if ((p = pfind(a.pm_pid)) == NULL) {
+ error = ESRCH;
+ break;
+ }
+
+ /*
+ * Treat processes that are in the process of exiting
+ * as if they were not present.
+ */
+
+ if (p->p_flag & P_WEXIT)
+ error = ESRCH;
+
+ PROC_UNLOCK(p); /* pfind() returns a locked process */
+
+ if (error == 0)
+ error = pmc_detach_process(p, pm);
+ }
+ break;
+
+
+ /*
+ * Release an allocated PMC
+ */
+
+ case PMC_OP_PMCRELEASE:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_owner *po;
+ struct pmc_op_simple sp;
+
+ /*
+ * Find PMC pointer for the named PMC.
+ *
+ * Use pmc_release_pmc_descriptor() to switch off the
+ * PMC, remove all its target threads, and remove the
+ * PMC from its owner's list.
+ *
+ * Remove the owner record if this is the last PMC
+ * owned.
+ *
+ * Free up space.
+ */
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ po = pm->pm_owner;
+ pmc_release_pmc_descriptor(pm);
+ pmc_maybe_remove_owner(po);
+
+ FREE(pm, M_PMC);
+ }
+ break;
+
+
+ /*
+ * Read and/or write a PMC.
+ */
+
+ case PMC_OP_PMCRW:
+ {
+ uint32_t cpu, ri;
+ struct pmc *pm;
+ struct pmc_op_pmcrw *pprw;
+ struct pmc_op_pmcrw prw;
+ struct pmc_binding pb;
+ pmc_value_t oldvalue;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
+ break;
+
+ PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
+ prw.pm_flags);
+
+ /* must have at least one flag set */
+ if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
+ error = EINVAL;
+ break;
+ }
+
+ /* locate pmc descriptor */
+ if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
+ break;
+
+ /* Can't read a PMC that hasn't been started. */
+ if (pm->pm_state != PMC_STATE_ALLOCATED &&
+ pm->pm_state != PMC_STATE_STOPPED &&
+ pm->pm_state != PMC_STATE_RUNNING) {
+ error = EINVAL;
+ break;
+ }
+
+ /* writing a new value is allowed only for 'STOPPED' pmcs */
+ if (pm->pm_state == PMC_STATE_RUNNING &&
+ (prw.pm_flags & PMC_F_NEWVALUE)) {
+ error = EBUSY;
+ break;
+ }
+
+ if (PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+
+ /* read/write the saved value in the PMC record */
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ if (prw.pm_flags & PMC_F_OLDVALUE)
+ oldvalue = pm->pm_gv.pm_savedvalue;
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ pm->pm_gv.pm_savedvalue = prw.pm_value;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ } else { /* System mode PMCs */
+ cpu = pm->pm_gv.pm_cpu;
+ ri = pm->pm_rowindex;
+
+ if (pmc_cpu_is_disabled(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /* move this thread to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ /* save old value */
+ if (prw.pm_flags & PMC_F_OLDVALUE)
+ if ((error = (*md->pmd_read_pmc)(cpu, ri,
+ &oldvalue)))
+ goto error;
+ /* write out new value */
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ error = (*md->pmd_write_pmc)(cpu, ri,
+ prw.pm_value);
+ error:
+ pmc_restore_cpu_binding(&pb);
+ if (error)
+ break;
+ }
+
+ pprw = (struct pmc_op_pmcrw *) arg;
+
+#if DEBUG
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
+ ri, prw.pm_value, oldvalue);
+ else
+ PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
+#endif
+
+ /* return old value if requested */
+ if (prw.pm_flags & PMC_F_OLDVALUE)
+ if ((error = copyout(&oldvalue, &pprw->pm_value,
+ sizeof(prw.pm_value))))
+ break;
+
+ /*
+ * send a signal (SIGIO) to the owner if it is trying to read
+ * a PMC with no target processes attached.
+ */
+
+ if (LIST_EMPTY(&pm->pm_targets) &&
+ (prw.pm_flags & PMC_F_OLDVALUE)) {
+ PROC_LOCK(curthread->td_proc);
+ psignal(curthread->td_proc, SIGIO);
+ PROC_UNLOCK(curthread->td_proc);
+ }
+ }
+ break;
+
+
+ /*
+ * Set the sampling rate for a sampling mode PMC and the
+ * initial count for a counting mode PMC.
+ */
+
+ case PMC_OP_PMCSETCOUNT:
+ {
+ struct pmc *pm;
+ struct pmc_op_pmcsetcount sc;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
+ break;
+
+ if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
+ break;
+
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+ error = EBUSY;
+ break;
+ }
+
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ pm->pm_sc.pm_reloadcount = sc.pm_count;
+ else
+ pm->pm_sc.pm_initial = sc.pm_count;
+ }
+ break;
+
+
+ /*
+ * Start a PMC.
+ */
+
+ case PMC_OP_PMCSTART:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_op_simple sp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ KASSERT(pmcid == pm->pm_rowindex,
+ ("[pmc,%d] row index %d != id %d", __LINE__,
+ pm->pm_rowindex, pmcid));
+
+ if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
+ break;
+ else if (pm->pm_state != PMC_STATE_STOPPED &&
+ pm->pm_state != PMC_STATE_ALLOCATED) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmc_start(pm);
+ }
+ break;
+
+
+ /*
+ * Stop a PMC.
+ */
+
+ case PMC_OP_PMCSTOP:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_op_simple sp;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ /*
+ * Mark the PMC as inactive and invoke the MD stop
+ * routines if needed.
+ */
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ KASSERT(pmcid == pm->pm_rowindex,
+ ("[pmc,%d] row index %d != pmcid %d", __LINE__,
+ pm->pm_rowindex, pmcid));
+
+ if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
+ break;
+ else if (pm->pm_state != PMC_STATE_RUNNING) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmc_stop(pm);
+ }
+ break;
+
+
+ /*
+ * Write a user-entry to the log file.
+ */
+
+ case PMC_OP_WRITELOG:
+ {
+
+ PMC_DOWNGRADE_SX();
+
+ /*
+ * flush all per-cpu hash tables
+ * append user-log entry
+ */
+
+ error = ENOSYS;
+ }
+ break;
+
+
+#if __i386__ || __amd64__
+
+ /*
+ * Machine dependent operation for i386-class processors.
+ *
+ * Retrieve the MSR number associated with the counter
+ * 'pmc_id'. This allows processes to directly use RDPMC
+ * instructions to read their PMCs, without the overhead of a
+ * system call.
+ */
+
+ case PMC_OP_PMCX86GETMSR:
+ {
+ int ri;
+ struct pmc *pm;
+ struct pmc_op_x86_getmsr gm;
+
+ PMC_DOWNGRADE_SX();
+
+ /* CPU has no 'GETMSR' support */
+ if (md->pmd_get_msr == NULL) {
+ error = ENOSYS;
+ break;
+ }
+
+ if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
+ break;
+
+ if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
+ break;
+
+ /*
+ * The allocated PMC needs to be a process virtual PMC,
+ * i.e., of type T[CS].
+ *
+ * Global PMCs can only be read using the PMCREAD
+ * operation since they may be allocated on a
+ * different CPU than the one we could be running on
+ * at the time of the read.
+ */
+
+ if (!PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+ error = EINVAL;
+ break;
+ }
+
+ ri = pm->pm_rowindex;
+
+ if ((error = (*md->pmd_get_msr)(ri, &gm.pm_msr)) < 0)
+ break;
+ if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
+ break;
+ }
+ break;
+#endif
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ if (is_sx_downgraded)
+ sx_sunlock(&pmc_sx);
+ else
+ sx_xunlock(&pmc_sx);
+
+ if (error)
+ atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
+
+ return error;
+}
+
+/*
+ * Helper functions
+ */
+
+/*
+ * Configure a log file.
+ */
+
+static int
+pmc_configure_log(struct pmc_owner *po, int logfd)
+{
+ struct proc *p;
+
+ return ENOSYS; /* for now */
+
+ p = po->po_owner;
+
+ if (po->po_logfd < 0 && logfd < 0) /* nothing to do */
+ return 0;
+
+ if (po->po_logfd >= 0 && logfd < 0) {
+ /* deconfigure log */
+ /* XXX */
+ po->po_flags &= ~PMC_FLAG_OWNS_LOGFILE;
+ pmc_maybe_remove_owner(po);
+
+ } else if (po->po_logfd < 0 && logfd >= 0) {
+ /* configure log file */
+ /* XXX */
+ po->po_flags |= PMC_FLAG_OWNS_LOGFILE;
+
+ /* mark process as using HWPMCs */
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+ } else
+ return EBUSY;
+
+ return 0;
+}
+
+/*
+ * Log an exit event to the PMC owner's log file.
+ */
+
+static void
+pmc_log_process_exit(struct pmc *pm, struct pmc_process *pp)
+{
+ KASSERT(pm->pm_flags & PMC_F_LOG_TC_PROCEXIT,
+ ("[pmc,%d] log-process-exit called gratuitously", __LINE__));
+
+ (void) pm;
+ (void) pp;
+
+ return;
+}
+
+/*
+ * Event handlers.
+ */
+
+/*
+ * Handle a process exit.
+ *
+ * XXX This eventhandler gets called early in the exit process.
+ * Consider using a 'hook' invocation from thread_exit() or equivalent
+ * spot. Another negative is that kse_exit doesn't seem to call
+ * exit1() [??].
+ */
+
+static void
+pmc_process_exit(void *arg __unused, struct proc *p)
+{
+ int is_using_hwpmcs;
+
+ PROC_LOCK(p);
+ is_using_hwpmcs = p->p_flag & P_HWPMC;
+ PROC_UNLOCK(p);
+
+ if (is_using_hwpmcs) {
+ PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
+ p->p_comm);
+
+ PMC_GET_SX_XLOCK();
+ (void) pmc_hook_handler(curthread, PMC_FN_PROCESS_EXIT,
+ (void *) p);
+ sx_xunlock(&pmc_sx);
+ }
+}
+
+/*
+ * Handle a process fork.
+ *
+ * If the parent process 'p1' is under HWPMC monitoring, then copy
+ * over any attached PMCs that have 'do_descendants' semantics.
+ */
+
+static void
+pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *p2,
+ int flags)
+{
+ int is_using_hwpmcs;
+
+ (void) flags; /* unused parameter */
+
+ PROC_LOCK(p1);
+ is_using_hwpmcs = p1->p_flag & P_HWPMC;
+ PROC_UNLOCK(p1);
+
+ if (is_using_hwpmcs) {
+ PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s)", p1,
+ p1->p_pid, p1->p_comm);
+ PMC_GET_SX_XLOCK();
+ (void) pmc_hook_handler(curthread, PMC_FN_PROCESS_FORK,
+ (void *) p2);
+ sx_xunlock(&pmc_sx);
+ }
+}
+
+
+/*
+ * initialization
+ */
+
+static const char *pmc_name_of_pmcclass[] = {
+#undef __PMC_CLASS
+#define __PMC_CLASS(N) #N ,
+ __PMC_CLASSES()
+};
+
+static int
+pmc_initialize(void)
+{
+ int error, cpu, n;
+ struct pmc_binding pb;
+
+ md = NULL;
+ error = 0;
+
+#if DEBUG
+ /* parse debug flags first */
+ if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
+ pmc_debugstr, sizeof(pmc_debugstr)))
+ pmc_debugflags_parse(pmc_debugstr,
+ pmc_debugstr+strlen(pmc_debugstr));
+#endif
+
+ PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
+
+ /*
+ * check sysctl parameters
+ */
+
+ if (pmc_hashsize <= 0) {
+ (void) printf("pmc: sysctl variable \""
+ PMC_SYSCTL_NAME_PREFIX "hashsize\" must be greater than "
+ "zero\n");
+ pmc_hashsize = PMC_HASH_SIZE;
+ }
+
+#if defined(__i386__)
+ /* determine the CPU kind. This is i386 specific */
+ if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+ md = pmc_amd_initialize();
+ else if (strcmp(cpu_vendor, "GenuineIntel") == 0)
+ md = pmc_intel_initialize();
+ /* XXX: what about the other i386 CPU manufacturers? */
+#elif defined(__amd64__)
+ if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+ md = pmc_amd_initialize();
+#else /* other architectures */
+ md = NULL;
+#endif
+
+ if (md == NULL || md->pmd_init == NULL)
+ return ENOSYS;
+
+ /* allocate space for the per-cpu array */
+ MALLOC(pmc_pcpu, struct pmc_cpu **, mp_ncpus * sizeof(struct pmc_cpu *),
+ M_PMC, M_WAITOK|M_ZERO);
+
+ /* per-cpu 'saved values' for managing process-mode PMCs */
+ MALLOC(pmc_pcpu_saved, pmc_value_t *,
+ sizeof(pmc_value_t) * mp_ncpus * md->pmd_npmc, M_PMC, M_WAITOK);
+
+ /* perform cpu dependent initialization */
+ pmc_save_cpu_binding(&pb);
+ for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ if (pmc_cpu_is_disabled(cpu))
+ continue;
+ pmc_select_cpu(cpu);
+ if ((error = md->pmd_init(cpu)) != 0)
+ break;
+ }
+ pmc_restore_cpu_binding(&pb);
+
+ if (error != 0)
+ return error;
+
+ /* allocate space for the row disposition array */
+ pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
+ M_PMC, M_WAITOK|M_ZERO);
+
+ KASSERT(pmc_pmcdisp != NULL,
+ ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
+
+ /* mark all PMCs as available */
+ for (n = 0; n < (int) md->pmd_npmc; n++)
+ PMC_MARK_ROW_FREE(n);
+
+ /* allocate thread hash tables */
+ pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
+ &pmc_ownerhashmask);
+
+ pmc_processhash = hashinit(pmc_hashsize, M_PMC,
+ &pmc_processhashmask);
+ mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc", MTX_SPIN);
+
+ /* allocate a pool of spin mutexes */
+ pmc_mtxpool = mtx_pool_create("pmc", pmc_mtxpool_size, MTX_SPIN);
+
+ PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
+ "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
+ pmc_processhash, pmc_processhashmask);
+
+ /* register process {exit,fork,exec} handlers */
+ pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
+ pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
+ pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
+ pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
+
+ /* set hook functions */
+ pmc_intr = md->pmd_intr;
+ pmc_hook = pmc_hook_handler;
+
+ if (error == 0) {
+ printf(PMC_MODULE_NAME ":");
+ for (n = 0; n < (int) md->pmd_nclass; n++)
+ printf(" %s(%d)",
+ pmc_name_of_pmcclass[md->pmd_classes[n]],
+ md->pmd_nclasspmcs[n]);
+ printf("\n");
+ }
+
+ return error;
+}
+
+/* prepare to be unloaded */
+static void
+pmc_cleanup(void)
+{
+ int cpu;
+ struct pmc_ownerhash *ph;
+ struct pmc_owner *po, *tmp;
+ struct pmc_binding pb;
+#if DEBUG
+ struct pmc_processhash *prh;
+#endif
+
+ PMCDBG(MOD,INI,0, "%s", "cleanup");
+
+ pmc_intr = NULL; /* no more interrupts please */
+
+ sx_xlock(&pmc_sx);
+ if (pmc_hook == NULL) { /* being unloaded already */
+ sx_xunlock(&pmc_sx);
+ return;
+ }
+
+ pmc_hook = NULL; /* prevent new threads from entering module */
+
+ /* deregister event handlers */
+ EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
+ EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
+
+ /* send SIGBUS to all owner threads, free up allocations */
+ if (pmc_ownerhash)
+ for (ph = pmc_ownerhash;
+ ph <= &pmc_ownerhash[pmc_ownerhashmask];
+ ph++) {
+ LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
+ pmc_remove_owner(po);
+
+ /* send SIGBUS to owner processes */
+ PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
+ "(%d, %s)", po->po_owner,
+ po->po_owner->p_pid,
+ po->po_owner->p_comm);
+
+ PROC_LOCK(po->po_owner);
+ psignal(po->po_owner, SIGBUS);
+ PROC_UNLOCK(po->po_owner);
+ FREE(po, M_PMC);
+ }
+ }
+
+ /* reclaim allocated data structures */
+ if (pmc_mtxpool)
+ mtx_pool_destroy(&pmc_mtxpool);
+
+ mtx_destroy(&pmc_processhash_mtx);
+ if (pmc_processhash) {
+#if DEBUG
+ struct pmc_process *pp;
+
+ PMCDBG(MOD,INI,3, "%s", "destroy process hash");
+ for (prh = pmc_processhash;
+ prh <= &pmc_processhash[pmc_processhashmask];
+ prh++)
+ LIST_FOREACH(pp, prh, pp_next)
+ PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
+#endif
+
+ hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
+ pmc_processhash = NULL;
+ }
+
+ if (pmc_ownerhash) {
+ PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
+ hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
+ pmc_ownerhash = NULL;
+ }
+
+ /* do processor dependent cleanup */
+ PMCDBG(MOD,INI,3, "%s", "md cleanup");
+ if (md) {
+ pmc_save_cpu_binding(&pb);
+ for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
+ cpu, pmc_pcpu[cpu]);
+ if (pmc_cpu_is_disabled(cpu))
+ continue;
+ pmc_select_cpu(cpu);
+ if (pmc_pcpu[cpu])
+ (void) md->pmd_cleanup(cpu);
+ }
+ FREE(md, M_PMC);
+ md = NULL;
+ pmc_restore_cpu_binding(&pb);
+ }
+
+ /* deallocate per-cpu structures */
+ FREE(pmc_pcpu, M_PMC);
+ pmc_pcpu = NULL;
+
+ FREE(pmc_pcpu_saved, M_PMC);
+ pmc_pcpu_saved = NULL;
+
+ if (pmc_pmcdisp) {
+ FREE(pmc_pmcdisp, M_PMC);
+ pmc_pmcdisp = NULL;
+ }
+
+ sx_xunlock(&pmc_sx); /* we are done */
+}
+
+/*
+ * The function called at load/unload.
+ */
+
+static int
+load (struct module *module __unused, int cmd, void *arg __unused)
+{
+ int error;
+
+ error = 0;
+
+ switch (cmd) {
+ case MOD_LOAD :
+ /* initialize the subsystem */
+ error = pmc_initialize();
+ if (error != 0)
+ break;
+ PMCDBG(MOD,INI,1, "syscall=%d ncpus=%d",
+ pmc_syscall_num, mp_ncpus);
+ break;
+
+
+ case MOD_UNLOAD :
+ case MOD_SHUTDOWN:
+ pmc_cleanup();
+ PMCDBG(MOD,INI,1, "%s", "unloaded");
+ break;
+
+ default :
+ error = EINVAL; /* XXX should panic(9) */
+ break;
+ }
+
+ return error;
+}
+
+/* memory pool */
+MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");
diff --git a/sys/hwpmc/hwpmc_pentium.c b/sys/hwpmc/hwpmc_pentium.c
new file mode 100644
index 0000000..9a02f41
--- /dev/null
+++ b/sys/hwpmc/hwpmc_pentium.c
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * Intel Pentium PMCs
+ */
+
+int
+pmc_initialize_p5(struct pmc_mdep *pmc_mdep)
+{
+ (void) pmc_mdep;
+ return ENOSYS; /* nothing here yet */
+}
diff --git a/sys/hwpmc/hwpmc_piv.c b/sys/hwpmc/hwpmc_piv.c
new file mode 100644
index 0000000..292fbba
--- /dev/null
+++ b/sys/hwpmc/hwpmc_piv.c
@@ -0,0 +1,1484 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * PENTIUM 4 SUPPORT
+ *
+ * The P4 has 18 PMCs, divided into 4 groups with 4,4,4 and 6 PMCs
+ * respectively. Each PMC comprises of two model specific registers:
+ * a counter configuration control register (CCCR) and a counter
+ * register that holds the actual event counts.
+ *
+ * Configuring an event requires the use of one of 45 event selection
+ * control registers (ESCR). Events are associated with specific
+ * ESCRs. Each PMC group has a set of ESCRs it can use.
+ *
+ * - The BPU counter group (4 PMCs) can use the 16 ESCRs:
+ * BPU_ESCR{0,1}, IS_ESCR{0,1}, MOB_ESCR{0,1}, ITLB_ESCR{0,1},
+ * PMH_ESCR{0,1}, IX_ESCR{0,1}, FSB_ESCR{0,}, BSU_ESCR{0,1}.
+ *
+ * - The MS counter group (4 PMCs) can use the 6 ESCRs: MS_ESCR{0,1},
+ * TC_ESCR{0,1}, TBPU_ESCR{0,1}.
+ *
+ * - The FLAME counter group (4 PMCs) can use the 10 ESCRs:
+ * FLAME_ESCR{0,1}, FIRM_ESCR{0,1}, SAAT_ESCR{0,1}, U2L_ESCR{0,1},
+ * DAC_ESCR{0,1}.
+ *
+ * - The IQ counter group (6 PMCs) can use the 13 ESCRs: IQ_ESCR{0,1},
+ * ALF_ESCR{0,1}, RAT_ESCR{0,1}, SSU_ESCR0, CRU_ESCR{0,1,2,3,4,5}.
+ *
+ * Even-numbered ESCRs can be used with counters 0, 1 and 4 (if
+ * present) of a counter group. Odd-numbers ESCRs can be used with
+ * counters 2, 3 and 5 (if present) of a counter group. The
+ * 'p4_escrs[]' table describes these restrictions in a form that
+ * function 'p4_allocate()' uses for making allocation decisions.
+ *
+ * SYSTEM-MODE AND THREAD-MODE ALLOCATION
+ *
+ * In addition to remembering the state of PMC rows
+ * ('FREE','STANDALONE', or 'THREAD'), we similar need to track the
+ * state of ESCR rows. If an ESCR is allocated to a system-mode PMC
+ * on a CPU we cannot allocate this to a thread-mode PMC. On a
+ * multi-cpu (multiple physical CPUs) system, ESCR allocation on each
+ * CPU is tracked by the pc_escrs[] array.
+ *
+ * Each system-mode PMC that is using an ESCR records its row-index in
+ * the appropriate entry and system-mode allocation attempts check
+ * that an ESCR is available using this array. Process-mode PMCs do
+ * not use the pc_escrs[] array, since ESCR row itself would have been
+ * marked as in 'THREAD' mode.
+ *
+ * HYPERTHREADING SUPPORT
+ *
+ * When HTT is enabled, the FreeBSD kernel treats the two 'logical'
+ * cpus as independent CPUs and can schedule kernel threads on them
+ * independently. However, the two logical CPUs share the same set of
+ * PMC resources. We need to ensure that:
+ * - PMCs that use the PMC_F_DESCENDANTS semantics are handled correctly,
+ * and,
+ * - Threads of multi-threaded processes that get scheduled on the same
+ * physical CPU are handled correctly.
+ *
+ * Not all HTT capable systems will have HTT enabled since users may
+ * have turned HTT support off using the appropriate sysctls
+ * (machdep.hlt_logical_cpus and machdep.logical_cpus_mask). We
+ * detect the presence of HTT by remembering if an initialization was
+ * done for a logical CPU.
+ *
+ */
+
+#define P4_PMCS() \
+ P4_PMC(BPU_COUNTER0) \
+ P4_PMC(BPU_COUNTER1) \
+ P4_PMC(BPU_COUNTER2) \
+ P4_PMC(BPU_COUNTER3) \
+ P4_PMC(MS_COUNTER0) \
+ P4_PMC(MS_COUNTER1) \
+ P4_PMC(MS_COUNTER2) \
+ P4_PMC(MS_COUNTER3) \
+ P4_PMC(FLAME_COUNTER0) \
+ P4_PMC(FLAME_COUNTER1) \
+ P4_PMC(FLAME_COUNTER2) \
+ P4_PMC(FLAME_COUNTER3) \
+ P4_PMC(IQ_COUNTER0) \
+ P4_PMC(IQ_COUNTER1) \
+ P4_PMC(IQ_COUNTER2) \
+ P4_PMC(IQ_COUNTER3) \
+ P4_PMC(IQ_COUNTER4) \
+ P4_PMC(IQ_COUNTER5) \
+ P4_PMC(NONE)
+
+enum pmc_p4pmc {
+#undef P4_PMC
+#define P4_PMC(N) P4_PMC_##N ,
+ P4_PMCS()
+};
+
+/*
+ * P4 ESCR descriptors
+ */
+
+#define P4_ESCRS() \
+ P4_ESCR(BSU_ESCR0, 0x3A0, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(BSU_ESCR1, 0x3A1, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(FSB_ESCR0, 0x3A2, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(FSB_ESCR1, 0x3A3, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(FIRM_ESCR0, 0x3A4, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(FIRM_ESCR1, 0x3A5, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(FLAME_ESCR0, 0x3A6, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(FLAME_ESCR1, 0x3A7, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(DAC_ESCR0, 0x3A8, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(DAC_ESCR1, 0x3A9, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(MOB_ESCR0, 0x3AA, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(MOB_ESCR1, 0x3AB, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(PMH_ESCR0, 0x3AC, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(PMH_ESCR1, 0x3AD, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(SAAT_ESCR0, 0x3AE, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(SAAT_ESCR1, 0x3AF, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(U2L_ESCR0, 0x3B0, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(U2L_ESCR1, 0x3B1, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(BPU_ESCR0, 0x3B2, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(BPU_ESCR1, 0x3B3, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(IS_ESCR0, 0x3B4, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(IS_ESCR1, 0x3B5, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(ITLB_ESCR0, 0x3B6, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(ITLB_ESCR1, 0x3B7, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(CRU_ESCR0, 0x3B8, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR1, 0x3B9, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(IQ_ESCR0, 0x3BA, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(IQ_ESCR1, 0x3BB, IQ_COUNTER1, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(RAT_ESCR0, 0x3BC, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(RAT_ESCR1, 0x3BD, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(SSU_ESCR0, 0x3BE, IQ_COUNTER0, IQ_COUNTER2, IQ_COUNTER4) \
+ P4_ESCR(MS_ESCR0, 0x3C0, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(MS_ESCR1, 0x3C1, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(TBPU_ESCR0, 0x3C2, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(TBPU_ESCR1, 0x3C3, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(TC_ESCR0, 0x3C4, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(TC_ESCR1, 0x3C5, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(IX_ESCR0, 0x3C8, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(IX_ESCR1, 0x3C9, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(ALF_ESCR0, 0x3CA, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(ALF_ESCR1, 0x3CB, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(CRU_ESCR2, 0x3CC, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR3, 0x3CD, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(CRU_ESCR4, 0x3E0, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR5, 0x3E1, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(NONE, ~0, NONE, NONE, NONE)
+
+enum pmc_p4escr {
+#define P4_ESCR(N, MSR, P1, P2, P3) P4_ESCR_##N ,
+ P4_ESCRS()
+#undef P4_ESCR
+};
+
+struct pmc_p4escr_descr {
+ const char pm_escrname[PMC_NAME_MAX];
+ u_short pm_escr_msr;
+ const enum pmc_p4pmc pm_pmcs[P4_MAX_PMC_PER_ESCR];
+};
+
+static struct pmc_p4escr_descr p4_escrs[] =
+{
+#define P4_ESCR(N, MSR, P1, P2, P3) \
+ { \
+ .pm_escrname = #N, \
+ .pm_escr_msr = (MSR), \
+ .pm_pmcs = \
+ { \
+ P4_PMC_##P1, \
+ P4_PMC_##P2, \
+ P4_PMC_##P3 \
+ } \
+ } ,
+
+ P4_ESCRS()
+
+#undef P4_ESCR
+};
+
+/*
+ * P4 Event descriptor
+ */
+
+struct p4_event_descr {
+ const enum pmc_event pm_event;
+ const uint32_t pm_escr_eventselect;
+ const uint32_t pm_cccr_select;
+ const char pm_is_ti_event;
+ enum pmc_p4escr pm_escrs[P4_MAX_ESCR_PER_EVENT];
+};
+
+static struct p4_event_descr p4_events[] = {
+
+#define P4_EVDESCR(NAME, ESCREVENTSEL, CCCRSEL, TI_EVENT, ESCR0, ESCR1) \
+ { \
+ .pm_event = PMC_EV_P4_##NAME, \
+ .pm_escr_eventselect = (ESCREVENTSEL), \
+ .pm_cccr_select = (CCCRSEL), \
+ .pm_is_ti_event = (TI_EVENT), \
+ .pm_escrs = \
+ { \
+ P4_ESCR_##ESCR0, \
+ P4_ESCR_##ESCR1 \
+ } \
+ }
+
+P4_EVDESCR(TC_DELIVER_MODE, 0x01, 0x01, TRUE, TC_ESCR0, TC_ESCR1),
+P4_EVDESCR(BPU_FETCH_REQUEST, 0x03, 0x00, FALSE, BPU_ESCR0, BPU_ESCR1),
+P4_EVDESCR(ITLB_REFERENCE, 0x18, 0x03, FALSE, ITLB_ESCR0, ITLB_ESCR1),
+P4_EVDESCR(MEMORY_CANCEL, 0x02, 0x05, FALSE, DAC_ESCR0, DAC_ESCR1),
+P4_EVDESCR(MEMORY_COMPLETE, 0x08, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(LOAD_PORT_REPLAY, 0x04, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(STORE_PORT_REPLAY, 0x05, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(MOB_LOAD_REPLAY, 0x03, 0x02, FALSE, MOB_ESCR0, MOB_ESCR1),
+P4_EVDESCR(PAGE_WALK_TYPE, 0x01, 0x04, TRUE, PMH_ESCR0, PMH_ESCR1),
+P4_EVDESCR(BSQ_CACHE_REFERENCE, 0x0C, 0x07, FALSE, BSU_ESCR0, BSU_ESCR1),
+P4_EVDESCR(IOQ_ALLOCATION, 0x03, 0x06, FALSE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(IOQ_ACTIVE_ENTRIES, 0x1A, 0x06, FALSE, FSB_ESCR1, NONE),
+P4_EVDESCR(FSB_DATA_ACTIVITY, 0x17, 0x06, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(BSQ_ALLOCATION, 0x05, 0x07, FALSE, BSU_ESCR0, NONE),
+P4_EVDESCR(BSQ_ACTIVE_ENTRIES, 0x06, 0x07, FALSE, BSU_ESCR1, NONE),
+ /* BSQ_ACTIVE_ENTRIES inherits CPU specificity from BSQ_ALLOCATION */
+P4_EVDESCR(SSE_INPUT_ASSIST, 0x34, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(PACKED_SP_UOP, 0x08, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(PACKED_DP_UOP, 0x0C, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(SCALAR_SP_UOP, 0x0A, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(SCALAR_DP_UOP, 0x0E, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(64BIT_MMX_UOP, 0x02, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(128BIT_MMX_UOP, 0x1A, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(X87_FP_UOP, 0x04, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(X87_SIMD_MOVES_UOP, 0x2E, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(GLOBAL_POWER_EVENTS, 0x13, 0x06, FALSE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(TC_MS_XFER, 0x05, 0x00, FALSE, MS_ESCR0, MS_ESCR1),
+P4_EVDESCR(UOP_QUEUE_WRITES, 0x09, 0x00, FALSE, MS_ESCR0, MS_ESCR1),
+P4_EVDESCR(RETIRED_MISPRED_BRANCH_TYPE,
+ 0x05, 0x02, FALSE, TBPU_ESCR0, TBPU_ESCR1),
+P4_EVDESCR(RETIRED_BRANCH_TYPE, 0x04, 0x02, FALSE, TBPU_ESCR0, TBPU_ESCR1),
+P4_EVDESCR(RESOURCE_STALL, 0x01, 0x01, FALSE, ALF_ESCR0, ALF_ESCR1),
+P4_EVDESCR(WC_BUFFER, 0x05, 0x05, TRUE, DAC_ESCR0, DAC_ESCR1),
+P4_EVDESCR(B2B_CYCLES, 0x16, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(BNR, 0x08, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(SNOOP, 0x06, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(RESPONSE, 0x04, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(FRONT_END_EVENT, 0x08, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(EXECUTION_EVENT, 0x0C, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(REPLAY_EVENT, 0x09, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(INSTR_RETIRED, 0x02, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(UOPS_RETIRED, 0x01, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(UOP_TYPE, 0x02, 0x02, FALSE, RAT_ESCR0, RAT_ESCR1),
+P4_EVDESCR(BRANCH_RETIRED, 0x06, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(MISPRED_BRANCH_RETIRED, 0x03, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(X87_ASSIST, 0x03, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(MACHINE_CLEAR, 0x02, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3)
+
+#undef P4_EVDESCR
+};
+
+#define P4_EVENT_IS_TI(E) ((E)->pm_is_ti_event == TRUE)
+
+#define P4_NEVENTS (PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1)
+
+/*
+ * P4 PMC descriptors
+ */
+
+struct p4pmc_descr {
+ struct pmc_descr pm_descr; /* common information */
+ enum pmc_p4pmc pm_pmcnum; /* PMC number */
+ uint32_t pm_pmc_msr; /* PERFCTR MSR address */
+ uint32_t pm_cccr_msr; /* CCCR MSR address */
+};
+
+static struct p4pmc_descr p4_pmcdesc[P4_NPMCS] = {
+
+ /*
+ * TSC descriptor
+ */
+
+ {
+ .pm_descr =
+ {
+ .pd_name = "TSC",
+ .pd_class = PMC_CLASS_TSC,
+ .pd_caps = PMC_CAP_READ | PMC_CAP_WRITE,
+ .pd_width = 64
+ },
+ .pm_pmcnum = ~0,
+ .pm_cccr_msr = ~0,
+ .pm_pmc_msr = 0x10,
+ },
+
+ /*
+ * P4 PMCS
+ */
+
+#define P4_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
+ PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
+ PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE | \
+ PMC_CAP_TAGGING | PMC_CAP_CASCADE)
+
+#define P4_PMCDESCR(N, PMC, CCCR) \
+ { \
+ .pm_descr = \
+ { \
+ .pd_name = #N, \
+ .pd_class = PMC_CLASS_P4, \
+ .pd_caps = P4_PMC_CAPS, \
+ .pd_width = 40 \
+ }, \
+ .pm_pmcnum = P4_PMC_##N, \
+ .pm_cccr_msr = (CCCR), \
+ .pm_pmc_msr = (PMC) \
+ }
+
+ P4_PMCDESCR(BPU_COUNTER0, 0x300, 0x360),
+ P4_PMCDESCR(BPU_COUNTER1, 0x301, 0x361),
+ P4_PMCDESCR(BPU_COUNTER2, 0x302, 0x362),
+ P4_PMCDESCR(BPU_COUNTER3, 0x303, 0x363),
+ P4_PMCDESCR(MS_COUNTER0, 0x304, 0x364),
+ P4_PMCDESCR(MS_COUNTER1, 0x305, 0x365),
+ P4_PMCDESCR(MS_COUNTER2, 0x306, 0x366),
+ P4_PMCDESCR(MS_COUNTER3, 0x307, 0x367),
+ P4_PMCDESCR(FLAME_COUNTER0, 0x308, 0x368),
+ P4_PMCDESCR(FLAME_COUNTER1, 0x309, 0x369),
+ P4_PMCDESCR(FLAME_COUNTER2, 0x30A, 0x36A),
+ P4_PMCDESCR(FLAME_COUNTER3, 0x30B, 0x36B),
+ P4_PMCDESCR(IQ_COUNTER0, 0x30C, 0x36C),
+ P4_PMCDESCR(IQ_COUNTER1, 0x30D, 0x36D),
+ P4_PMCDESCR(IQ_COUNTER2, 0x30E, 0x36E),
+ P4_PMCDESCR(IQ_COUNTER3, 0x30F, 0x36F),
+ P4_PMCDESCR(IQ_COUNTER4, 0x310, 0x370),
+ P4_PMCDESCR(IQ_COUNTER5, 0x311, 0x371),
+
+#undef P4_PMCDESCR
+};
+
+/* HTT support */
+#define P4_NHTT 2 /* logical processors/chip */
+#define P4_HTT_CPU_INDEX_0 0
+#define P4_HTT_CPU_INDEX_1 1
+
+static int p4_system_has_htt;
+
+/*
+ * Per-CPU data structure for P4 class CPUs
+ *
+ * [common stuff]
+ * [19 struct pmc_hw pointers]
+ * [19 struct pmc_hw structures]
+ * [45 ESCRs status bytes]
+ * [per-cpu spin mutex]
+ * [19 flags for holding the config count and runcount]
+ * [19*2 saved value fields] (Thread mode PMC support)
+ * [19*2 pmc value fields] (-do-)
+ */
+
+struct p4_cpu {
+ struct pmc_cpu pc_common;
+ struct pmc_hw *pc_hwpmcs[P4_NPMCS];
+ struct pmc_hw pc_p4pmcs[P4_NPMCS];
+ char pc_escrs[P4_NESCR];
+ struct mtx pc_mtx; /* spin lock */
+ unsigned char pc_flags[P4_NPMCS]; /* 4 bits each: {cfg,run}count */
+ pmc_value_t pc_saved[P4_NPMCS * P4_NHTT];
+ pmc_value_t pc_pmc_values[P4_NPMCS * P4_NHTT];
+};
+
+#define P4_PCPU_SAVED_VALUE(PC,RI,CPU) (PC)->pc_saved[(RI)*((CPU) & 1)]
+#define P4_PCPU_PMC_VALUE(P,R,C) (P)->pc_pmc_values[(R)*((C) & 1)]
+
+#define P4_PCPU_GET_FLAGS(PC,RI,MASK) ((PC)->pc_flags[(RI)] & (MASK))
+#define P4_PCPU_SET_FLAGS(PC,RI,MASK,VAL) do { \
+ char _tmp; \
+ _tmp = (PC)->pc_flags[(RI)]; \
+ _tmp &= ~(MASK); \
+ _tmp |= (VAL) & (MASK); \
+ (PC)->pc_flags[(RI)] = _tmp; \
+} while (0)
+
+#define P4_PCPU_GET_RUNCOUNT(PC,RI) P4_PCPU_GET_FLAGS(PC,RI,0x0F)
+#define P4_PCPU_SET_RUNCOUNT(PC,RI,V) P4_PCPU_SET_FLAGS(PC,RI,0x0F,V)
+
+#define P4_PCPU_GET_CFGCOUNT(PC,RI) (P4_PCPU_GET_FLAGS(PC,RI,0xF0) >> 4)
+#define P4_PCPU_SET_CFGCOUNT(PC,RI,C) P4_PCPU_SET_FLAGS(PC,RI,0xF0,((C) <<4))
+
+/* ESCR row disposition */
+static int p4_escrdisp[P4_NESCR];
+
+#define P4_ESCR_ROW_DISP_IS_THREAD(E) (p4_escrdisp[(E)] > 0)
+#define P4_ESCR_ROW_DISP_IS_STANDALONE(E) (p4_escrdisp[(E)] < 0)
+#define P4_ESCR_ROW_DISP_IS_FREE(E) (p4_escrdisp[(E)] == 0)
+
+#define P4_ESCR_MARK_ROW_STANDALONE(E) do { \
+ KASSERT(p4_escrdisp[(E)] <= 0, ("[p4,%d] row disposition error",\
+ __LINE__)); \
+ atomic_add_int(&p4_escrdisp[(E)], -1); \
+ KASSERT(p4_escrdisp[(E)] >= (-mp_ncpus), ("[p4,%d] row " \
+ "disposition error", __LINE__)); \
+} while (0)
+
+#define P4_ESCR_UNMARK_ROW_STANDALONE(E) do { \
+ atomic_add_int(&p4_escrdisp[(E)], 1); \
+ KASSERT(p4_escrdisp[(E)] <= 0, ("[p4,%d] row disposition error",\
+ __LINE__)); \
+} while (0)
+
+#define P4_ESCR_MARK_ROW_THREAD(E) do { \
+ KASSERT(p4_escrdisp[(E)] >= 0, ("[p4,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&p4_escrdisp[(E)], 1); \
+} while (0)
+
+#define P4_ESCR_UNMARK_ROW_THREAD(E) do { \
+ atomic_add_int(&p4_escrdisp[(E)], -1); \
+ KASSERT(p4_escrdisp[(E)] >= 0, ("[p4,%d] row disposition error",\
+ __LINE__)); \
+} while (0)
+
+#define P4_PMC_IS_STOPPED(cccr) ((rdmsr(cccr) & P4_CCCR_ENABLE) == 0)
+
+#define P4_TO_PHYSICAL_CPU(cpu) (pmc_cpu_is_logical(cpu) ? \
+ ((cpu) & ~1) : (cpu))
+
+#define P4_CCCR_Tx_MASK (~(P4_CCCR_OVF_PMI_T0|P4_CCCR_OVF_PMI_T1| \
+ P4_CCCR_ENABLE|P4_CCCR_OVF))
+#define P4_ESCR_Tx_MASK (~(P4_ESCR_T0_OS|P4_ESCR_T0_USR|P4_ESCR_T1_OS| \
+ P4_ESCR_T1_USR))
+
+/*
+ * support routines
+ */
+
+static struct p4_event_descr *
+p4_find_event(enum pmc_event ev)
+{
+ int n;
+
+ for (n = 0; n < P4_NEVENTS; n++)
+ if (p4_events[n].pm_event == ev)
+ break;
+ if (n == P4_NEVENTS)
+ return NULL;
+ return &p4_events[n];
+}
+
+/*
+ * Initialize per-cpu state
+ */
+
+static int
+p4_init(int cpu)
+{
+ int n, phycpu;
+ char *pescr;
+ struct p4_cpu *pcs;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0, "p4-init cpu=%d logical=%d", cpu,
+ pmc_cpu_is_logical(cpu) != 0);
+
+ /*
+ * A 'logical' CPU shares its per-cpu state with its physical
+ * CPU. The physical CPU would have been initialized prior to
+ * the initialization for this cpu.
+ */
+
+ if (pmc_cpu_is_logical(cpu)) {
+ phycpu = P4_TO_PHYSICAL_CPU(cpu);
+ pcs = (struct p4_cpu *) pmc_pcpu[phycpu];
+ PMCDBG(MDP,INI,1, "p4-init cpu=%d phycpu=%d pcs=%p",
+ cpu, phycpu, pcs);
+ KASSERT(pcs,
+ ("[p4,%d] Null Per-Cpu state cpu=%d phycpu=%d", __LINE__,
+ cpu, phycpu));
+ if (pcs == NULL) /* decline to init */
+ return ENXIO;
+ p4_system_has_htt = 1;
+ pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
+ return 0;
+ }
+
+ MALLOC(pcs, struct p4_cpu *, sizeof(struct p4_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ if (pcs == NULL)
+ return ENOMEM;
+ phw = pcs->pc_p4pmcs;
+
+ for (n = 0; n < P4_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pcs->pc_hwpmcs[n] = phw;
+ }
+
+ /* Mark the TSC as shareable */
+ pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
+
+ pescr = pcs->pc_escrs;
+ for (n = 0; n < P4_NESCR; n++)
+ *pescr++ = P4_INVALID_PMC_INDEX;
+ pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
+
+ mtx_init(&pcs->pc_mtx, "p4-pcpu", "pmc", MTX_SPIN);
+
+ return 0;
+}
+
+/*
+ * Destroy per-cpu state.
+ */
+
+static int
+p4_cleanup(int cpu)
+{
+ struct p4_cpu *pcs;
+
+ PMCDBG(MDP,INI,0, "p4-cleanup cpu=%d", cpu);
+
+ /*
+ * Free up the per-cpu structure for the given cpu if
+ * allocated, and if this is a physical CPU.
+ */
+
+ if ((pcs = (struct p4_cpu *) pmc_pcpu[cpu]) != NULL &&
+ !pmc_cpu_is_logical(cpu)) {
+ mtx_destroy(&pcs->pc_mtx);
+ FREE(pcs, M_PMC);
+ }
+
+ pmc_pcpu[cpu] = NULL;
+
+ return 0;
+}
+
+/*
+ * Context switch in.
+ */
+
+static int
+p4_switch_in(struct pmc_cpu *pc)
+{
+ (void) pc;
+ /* enable the RDPMC instruction */
+ load_cr4(rcr4() | CR4_PCE);
+ return 0;
+}
+
+/*
+ * Context switch out.
+ */
+
+static int
+p4_switch_out(struct pmc_cpu *pc)
+{
+ (void) pc;
+ /* disallow RDPMC instruction */
+ load_cr4(rcr4() & ~CR4_PCE);
+ return 0;
+}
+
+/*
+ * Read a PMC
+ */
+
+static int
+p4_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ enum pmc_mode mode;
+ struct p4pmc_descr *pd;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ struct pmc_hw *phw;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+ pd = &p4_pmcdesc[ri];
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ mode = pm->pm_mode;
+
+ PMCDBG(MDP,REA,1, "p4-read cpu=%d ri=%d mode=%d", cpu, ri, mode);
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
+ KASSERT(PMC_IS_COUNTING_MODE(mode),
+ ("[p4,%d] TSC counter in non-counting mode", __LINE__));
+ *v = rdtsc();
+ PMCDBG(MDP,REA,2, "p4-read -> %jx", *v);
+ return 0;
+ }
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class));
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode))
+ tmp = rdmsr(p4_pmcdesc[ri].pm_pmc_msr);
+ else
+ tmp = P4_PCPU_PMC_VALUE(pc,ri,cpu);
+
+ if (PMC_IS_SAMPLING_MODE(mode))
+ *v = -(tmp + 1); /* undo transformation */
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,2, "p4-read -> %jx", *v);
+ return 0;
+}
+
+/*
+ * Write a PMC
+ */
+
+static int
+p4_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ const struct pmc_hw *phw;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p4_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,WRI,1, "p4-write cpu=%d ri=%d mode=%d v=%jx", cpu, ri,
+ pm->pm_mode, v);
+
+ /*
+ * The P4's TSC register is writeable, but we don't allow a
+ * write as changing the TSC's value could interfere with
+ * other parts of the system.
+ */
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ /*
+ * write the PMC value to the register/saved value: for
+ * sampling mode PMCs, the value to be programmed into the PMC
+ * counter is -(C+1) where 'C' is the requested sample rate.
+ */
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ v = -(v + 1);
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode))
+ wrmsr(pd->pm_pmc_msr, v);
+ else
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) = v;
+
+ return 0;
+}
+
+/*
+ * Configure a PMC 'pm' on the given CPU and row-index.
+ *
+ * 'pm' may be NULL to indicate de-configuration.
+ *
+ * On HTT systems, a PMC may get configured twice, once for each
+ * "logical" CPU.
+ */
+
+static int
+p4_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+ struct p4_cpu *pc;
+ int cfgcount;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL ||
+ (p4_system_has_htt && phw->phw_pmc == pm),
+ ("[p4,%d] hwpmc not unconfigured before re-config", __LINE__));
+
+ mtx_lock_spin(&pc->pc_mtx);
+ cfgcount = P4_PCPU_GET_CFGCOUNT(pc,ri);
+
+ KASSERT(cfgcount >= 0 || cfgcount <= 2,
+ ("[p4,%d] illegal cfgcount cfg=%d on cpu=%d ri=%d", __LINE__,
+ cfgcount, cpu, ri));
+
+ KASSERT(cfgcount == 0 || phw->phw_pmc,
+ ("[p4,%d] cpu=%d ri=%d pmc configured with zero cfg count",
+ __LINE__, cpu, ri));
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d cfg=%d pm=%p", cpu, ri, cfgcount,
+ pm);
+
+ if (pm) { /* config */
+ if (cfgcount == 0)
+ phw->phw_pmc = pm;
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[p4,%d] cpu=%d ri=%d config %p != hw %p",
+ __LINE__, cpu, ri, pm, phw->phw_pmc));
+
+ cfgcount++;
+ } else { /* unconfig */
+ --cfgcount;
+ if (cfgcount == 0)
+ phw->phw_pmc = NULL;
+ }
+
+ KASSERT(cfgcount >= 0 || cfgcount <= 2,
+ ("[p4,%d] illegal runcount cfg=%d on cpu=%d ri=%d", __LINE__,
+ cfgcount, cpu, ri));
+
+ P4_PCPU_SET_CFGCOUNT(pc,ri,cfgcount);
+
+ mtx_unlock_spin(&pc->pc_mtx);
+
+ return 0;
+}
+
+/*
+ * Allocate a PMC.
+ *
+ * The allocation strategy differs between HTT and non-HTT systems.
+ *
+ * The non-HTT case:
+ * - Given the desired event and the PMC row-index, lookup the
+ * list of valid ESCRs for the event.
+ * - For each valid ESCR:
+ * - Check if the ESCR is free and the ESCR row is in a compatible
+ * mode (i.e., system or process))
+ * - Check if the ESCR is usable with a P4 PMC at the desired row-index.
+ * If everything matches, we determine the appropriate bit values for the
+ * ESCR and CCCR registers.
+ *
+ * The HTT case:
+ *
+ * - Process mode PMCs require special care. The FreeBSD scheduler could
+ * schedule any two processes on the same physical CPU. We need to ensure
+ * that a given PMC row-index is never allocated to two different
+ * PMCs owned by different user-processes.
+ * This is ensured by always allocating a PMC from a 'FREE' PMC row
+ * if the system has HTT active.
+ * - A similar check needs to be done for ESCRs; we do not want two PMCs
+ * using the same ESCR to be scheduled at the same time. Thus ESCR
+ * allocation is also restricted to FREE rows if the system has HTT
+ * enabled.
+ * - Thirdly, some events are 'thread-independent' terminology, i.e.,
+ * the PMC hardware cannot distinguish between events caused by
+ * different logical CPUs. This makes it impossible to assign events
+ * to a given thread of execution. If the system has HTT enabled,
+ * these events are not allowed for process-mode PMCs.
+ */
+
+static int
+p4_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ int found, n, m;
+ uint32_t caps, cccrvalue, escrvalue, tflags;
+ enum pmc_p4escr escr;
+ struct p4_cpu *pc;
+ struct p4_event_descr *pevent;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index value %d", __LINE__, ri));
+
+ pd = &p4_pmcdesc[ri];
+
+ PMCDBG(MDP,ALL,1, "p4-allocate ri=%d class=%d pmccaps=0x%x "
+ "reqcaps=0x%x\n", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
+ pm->pm_caps);
+
+ /* check class */
+ if (pd->pm_descr.pd_class != pm->pm_class)
+ return EINVAL;
+
+ /* check requested capabilities */
+ caps = a->pm_caps;
+ if ((pd->pm_descr.pd_caps & caps) != caps)
+ return EPERM;
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
+ /* TSC's are always allocated in system-wide counting mode */
+ if (a->pm_ev != PMC_EV_TSC_TSC ||
+ a->pm_mode != PMC_MODE_SC)
+ return EINVAL;
+ return 0;
+ }
+
+ /*
+ * If the system has HTT enabled, and the desired allocation
+ * mode is process-private, and the PMC row disposition is not
+ * FREE (0), decline the allocation.
+ */
+
+ if (p4_system_has_htt &&
+ PMC_IS_VIRTUAL_MODE(pm->pm_mode) &&
+ pmc_getrowdisp(ri) != 0)
+ return EBUSY;
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] unknown PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ if (pm->pm_event < PMC_EV_P4_FIRST ||
+ pm->pm_event > PMC_EV_P4_LAST)
+ return EINVAL;
+
+ if ((pevent = p4_find_event(pm->pm_event)) == NULL)
+ return ESRCH;
+
+ PMCDBG(MDP,ALL,2, "pevent={ev=%d,escrsel=0x%x,cccrsel=0x%x,isti=%d}",
+ pevent->pm_event, pevent->pm_escr_eventselect,
+ pevent->pm_cccr_select, pevent->pm_is_ti_event);
+
+ /*
+ * Some PMC events are 'thread independent'and therefore
+ * cannot be used for process-private modes if HTT is being
+ * used.
+ */
+
+ if (P4_EVENT_IS_TI(pevent) &&
+ PMC_IS_VIRTUAL_MODE(pm->pm_mode) && p4_system_has_htt)
+ return EINVAL;
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+
+ found = 0;
+
+ /* look for a suitable ESCR for this event */
+ for (n = 0; n < P4_MAX_ESCR_PER_EVENT && !found; n++) {
+ if ((escr = pevent->pm_escrs[n]) == P4_ESCR_NONE)
+ break; /* out of ESCRs */
+ /*
+ * Check ESCR row disposition.
+ *
+ * If the request is for a system-mode PMC, then the
+ * ESCR row should not be in process-virtual mode, and
+ * should also be free on the current CPU.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ if (P4_ESCR_ROW_DISP_IS_THREAD(escr) ||
+ pc->pc_escrs[escr] != P4_INVALID_PMC_INDEX)
+ continue;
+ }
+
+ /*
+ * If the request is for a process-virtual PMC, and if
+ * HTT is not enabled, we can use an ESCR row that is
+ * either FREE or already in process mode.
+ *
+ * If HTT is enabled, then we need to ensure that a
+ * given ESCR is never allocated to two PMCS that
+ * could run simultaneously on the two logical CPUs of
+ * a CPU package. We ensure this be only allocating
+ * ESCRs from rows marked as 'FREE'.
+ */
+
+ if (PMC_IS_VIRTUAL_MODE(pm->pm_mode)) {
+ if (p4_system_has_htt) {
+ if (!P4_ESCR_ROW_DISP_IS_FREE(escr))
+ continue;
+ } else
+ if (P4_ESCR_ROW_DISP_IS_STANDALONE(escr))
+ continue;
+ }
+
+ /*
+ * We found a suitable ESCR for this event. Now check if
+ * this escr can work with the PMC at row-index 'ri'.
+ */
+
+ for (m = 0; m < P4_MAX_PMC_PER_ESCR; m++)
+ if (p4_escrs[escr].pm_pmcs[m] == pd->pm_pmcnum) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found == 0)
+ return ESRCH;
+
+ KASSERT((int) escr >= 0 && escr < P4_NESCR,
+ ("[p4,%d] illegal ESCR value %d", __LINE__, escr));
+
+ /* mark ESCR row mode */
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ pc->pc_escrs[escr] = ri; /* mark ESCR as in use on this cpu */
+ P4_ESCR_MARK_ROW_STANDALONE(escr);
+ } else {
+ KASSERT(pc->pc_escrs[escr] == P4_INVALID_PMC_INDEX,
+ ("[p4,%d] escr[%d] already in use", __LINE__, escr));
+ P4_ESCR_MARK_ROW_THREAD(escr);
+ }
+
+ pm->pm_md.pm_p4.pm_p4_escrmsr = p4_escrs[escr].pm_escr_msr;
+ pm->pm_md.pm_p4.pm_p4_escr = escr;
+
+ cccrvalue = P4_CCCR_TO_ESCR_SELECT(pevent->pm_cccr_select);
+ escrvalue = P4_ESCR_TO_EVENT_SELECT(pevent->pm_escr_eventselect);
+
+ /* CCCR fields */
+ if (caps & PMC_CAP_THRESHOLD)
+ cccrvalue |= (a->pm_p4_cccrconfig & P4_CCCR_THRESHOLD_MASK) |
+ P4_CCCR_COMPARE;
+
+ if (caps & PMC_CAP_EDGE)
+ cccrvalue |= P4_CCCR_EDGE;
+
+ if (caps & PMC_CAP_INVERT)
+ cccrvalue |= P4_CCCR_COMPLEMENT;
+
+ if (p4_system_has_htt)
+ cccrvalue |= a->pm_p4_cccrconfig & P4_CCCR_ACTIVE_THREAD_MASK;
+ else /* no HTT; thread field should be '11b' */
+ cccrvalue |= P4_CCCR_TO_ACTIVE_THREAD(0x3);
+
+ if (caps & PMC_CAP_CASCADE)
+ cccrvalue |= P4_CCCR_CASCADE;
+
+ /* On HTT systems the PMI T0 field may get moved to T1 at pmc start */
+ if (caps & PMC_CAP_INTERRUPT)
+ cccrvalue |= P4_CCCR_OVF_PMI_T0;
+
+ /* ESCR fields */
+ if (caps & PMC_CAP_QUALIFIER)
+ escrvalue |= a->pm_p4_escrconfig & P4_ESCR_EVENT_MASK_MASK;
+ if (caps & PMC_CAP_TAGGING)
+ escrvalue |= (a->pm_p4_escrconfig & P4_ESCR_TAG_VALUE_MASK) |
+ P4_ESCR_TAG_ENABLE;
+ if (caps & PMC_CAP_QUALIFIER)
+ escrvalue |= (a->pm_p4_escrconfig & P4_ESCR_EVENT_MASK_MASK);
+
+ /* HTT: T0_{OS,USR} bits may get moved to T1 at pmc start */
+ tflags = 0;
+ if (caps & PMC_CAP_SYSTEM)
+ tflags |= P4_ESCR_T0_OS;
+ if (caps & PMC_CAP_USER)
+ tflags |= P4_ESCR_T0_USR;
+ if (tflags == 0)
+ tflags = (P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+ escrvalue |= tflags;
+
+ pm->pm_md.pm_p4.pm_p4_cccrvalue = cccrvalue;
+ pm->pm_md.pm_p4.pm_p4_escrvalue = escrvalue;
+
+ PMCDBG(MDP,ALL,2, "p4-allocate cccrsel=0x%x cccrval=0x%x "
+ "escr=%d escrmsr=0x%x escrval=0x%x\n", pevent->pm_cccr_select,
+ cccrvalue, escr, pm->pm_md.pm_p4.pm_p4_escrmsr, escrvalue);
+
+ return 0;
+}
+
+/*
+ * release a PMC.
+ */
+
+static int
+p4_release_pmc(int cpu, int ri, struct pmc *pm)
+{
+ enum pmc_p4escr escr;
+ struct pmc_hw *phw;
+ struct p4_cpu *pc;
+
+ if (p4_pmcdesc[ri].pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ escr = pm->pm_md.pm_p4.pm_p4_escr;
+
+ PMCDBG(MDP,REL,1, "p4-release cpu=%d ri=%d escr=%d", cpu, ri, escr);
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[p4,%d] releasing configured PMC ri=%d", __LINE__, ri));
+
+ P4_ESCR_UNMARK_ROW_STANDALONE(escr);
+ KASSERT(pc->pc_escrs[escr] == ri,
+ ("[p4,%d] escr[%d] not allocated to ri %d", __LINE__,
+ escr, ri));
+ pc->pc_escrs[escr] = P4_INVALID_PMC_INDEX; /* mark as free */
+ } else
+ P4_ESCR_UNMARK_ROW_THREAD(escr);
+
+ return 0;
+}
+
+/*
+ * Start a PMC
+ */
+
+static int
+p4_start_pmc(int cpu, int ri)
+{
+ int rc;
+ uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ struct pmc_hw *phw;
+ struct p4pmc_descr *pd;
+#if DEBUG
+ pmc_value_t tmp;
+#endif
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p4_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] starting cpu%d,pmc%d with null pmc", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,STA,1, "p4-start cpu=%d ri=%d", cpu, ri);
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) /* TSC are always on */
+ return 0;
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] wrong PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ /* retrieve the desired CCCR/ESCR values from the PMC */
+ cccrvalue = pm->pm_md.pm_p4.pm_p4_cccrvalue;
+ escrvalue = pm->pm_md.pm_p4.pm_p4_escrvalue;
+ escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr;
+
+ /* extract and zero the logical processor selection bits */
+ cccrtbits = cccrvalue & P4_CCCR_OVF_PMI_T0;
+ escrtbits = escrvalue & (P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+ cccrvalue &= ~P4_CCCR_OVF_PMI_T0;
+ escrvalue &= ~(P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+
+ if (pmc_cpu_is_logical(cpu)) { /* shift T0 bits to T1 position */
+ cccrtbits <<= 1;
+ escrtbits >>= 2;
+ }
+
+ /* start system mode PMCs directly */
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ wrmsr(escrmsr, escrvalue | escrtbits);
+ wrmsr(pd->pm_cccr_msr, cccrvalue | cccrtbits | P4_CCCR_ENABLE);
+ return 0;
+ }
+
+ /*
+ * Thread mode PMCs
+ *
+ * On HTT machines, the same PMC could be scheduled on the
+ * same physical CPU twice (once for each logical CPU), for
+ * example, if two threads of a multi-threaded process get
+ * scheduled on the same CPU.
+ *
+ */
+
+ mtx_lock_spin(&pc->pc_mtx);
+
+ rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
+ KASSERT(rc == 0 || rc == 1,
+ ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri,
+ rc));
+
+ if (rc == 0) { /* 1st CPU and the non-HTT case */
+ /*
+ * Enable the correct bits for this CPU.
+ */
+ escrvalue |= escrtbits;
+ cccrvalue |= cccrtbits | P4_CCCR_ENABLE;
+
+ KASSERT(P4_PMC_IS_STOPPED(pd->pm_cccr_msr),
+ ("[p4,%d] cpu=%d ri=%d cccr=0x%x not stopped", __LINE__,
+ cpu, ri, pd->pm_cccr_msr));
+
+ /* write out the low 40 bits of the saved value to hardware */
+ wrmsr(pd->pm_pmc_msr,
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) & P4_PERFCTR_MASK);
+ P4_PCPU_SAVED_VALUE(pc,ri,cpu) = P4_PCPU_PMC_VALUE(pc,ri,cpu) &
+ P4_PERFCTR_MASK;
+
+ /* Program the ESCR and CCCR and start the PMC */
+ wrmsr(escrmsr, escrvalue);
+ wrmsr(pd->pm_cccr_msr, cccrvalue);
+
+ PMCDBG(MDP,STA,2,"p4-start cpu=%d rc=%d ri=%d escr=%d "
+ "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x\n", cpu, rc,
+ ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr, escrvalue,
+ cccrvalue);
+
+ } else if (rc == 1) { /* 2nd CPU */
+
+ /*
+ * Retrieve the CCCR and ESCR values from their MSRs,
+ * and turn on the addition T[0/1] bits for the 2nd
+ * CPU. Remember the difference between the saved
+ * value from the previous 'write()' operation to this
+ * (PMC,CPU) pair and the current PMC reading; this is
+ * used at PMCSTOP time to derive the correct
+ * increment.
+ */
+
+ cccrvalue = rdmsr(pd->pm_cccr_msr);
+
+ KASSERT((cccrvalue & P4_CCCR_Tx_MASK) ==
+ (pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK),
+ ("[p4,%d] cpu=%d rc=%d ri=%d CCCR bits 0x%x PMC 0x%x",
+ __LINE__, cpu, rc, ri, cccrvalue & P4_CCCR_Tx_MASK,
+ pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK));
+ KASSERT(cccrvalue & P4_CCCR_ENABLE,
+ ("[p4,%d] 2nd cpu rc=%d cpu=%d ri=%d not running",
+ __LINE__, rc, cpu, ri));
+ KASSERT((cccrvalue & cccrtbits) == 0,
+ ("[p4,%d] CCCR T0/T1 mismatch rc=%d cpu=%d ri=%d"
+ "cccrvalue=0x%x tbits=0x%x", __LINE__, rc, cpu, ri,
+ cccrvalue, cccrtbits));
+
+ /* stop PMC */
+ wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE);
+
+ escrvalue = rdmsr(escrmsr);
+
+ KASSERT((escrvalue & P4_ESCR_Tx_MASK) ==
+ (pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK),
+ ("[p4,%d] Extra ESCR bits cpu=%d rc=%d ri=%d "
+ "escr=0x%x pm=0x%x", __LINE__, cpu, rc, ri,
+ escrvalue & P4_ESCR_Tx_MASK,
+ pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK));
+
+ KASSERT((escrvalue & escrtbits) == 0,
+ ("[p4,%d] ESCR T0/T1 mismatch rc=%d cpu=%d ri=%d "
+ "escrmsr=0x%x escrvalue=0x%x tbits=0x%x", __LINE__,
+ rc, cpu, ri, escrmsr, escrvalue, escrtbits));
+
+ /* read current value and save it */
+ P4_PCPU_SAVED_VALUE(pc,ri,cpu) =
+ rdmsr(pd->pm_pmc_msr) & P4_PERFCTR_MASK;
+
+ /*
+ * program the new bits into the ESCR and CCCR,
+ * starting the PMC in the process.
+ */
+
+ escrvalue |= escrtbits;
+ cccrvalue |= cccrvalue;
+
+ wrmsr(escrmsr, escrvalue);
+ wrmsr(pd->pm_cccr_msr, cccrvalue);
+
+ PMCDBG(MDP,STA,2,"p4-start/2 cpu=%d rc=%d ri=%d escr=%d"
+ "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x pmc=0x%jx",
+ cpu, rc, ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr,
+ escrvalue, cccrvalue, tmp);
+
+ } else
+ panic("invalid runcount %d\n", rc);
+
+ ++rc;
+ P4_PCPU_SET_RUNCOUNT(pc,ri,rc);
+
+ mtx_unlock_spin(&pc->pc_mtx);
+
+ return 0;
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+p4_stop_pmc(int cpu, int ri)
+{
+ int rc;
+ uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ struct pmc_hw *phw;
+ struct p4pmc_descr *pd;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row index %d", __LINE__, ri));
+
+ pd = &p4_pmcdesc[ri];
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ pc = (struct p4_cpu *) pmc_pcpu[P4_TO_PHYSICAL_CPU(cpu)];
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(phw != NULL,
+ ("[p4,%d] null phw for cpu%d, ri%d", __LINE__, cpu, ri));
+
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] null pmc for cpu%d, ri%d", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STO,1, "p4-stop cpu=%d ri=%d", cpu, ri);
+
+ if (PMC_IS_SYSTEM_MODE(pm->pm_mode)) {
+ wrmsr(pd->pm_cccr_msr,
+ pm->pm_md.pm_p4.pm_p4_cccrvalue & ~P4_CCCR_ENABLE);
+ return 0;
+ }
+
+ /*
+ * Thread mode PMCs.
+ *
+ * On HTT machines, this PMC may be in use by two threads
+ * running on two logical CPUS. Thus we look at the
+ * 'pm_runcount' field and only turn off the appropriate TO/T1
+ * bits (and keep the PMC running).
+ *
+ * The 'pc_saved' field has the 'diff' between the value in
+ * the hardware register at PMCSTART time and the nominal
+ * start value for the PMC. This diff is added to the current
+ * PMC reading to derived the correct (absolute) return value.
+ */
+
+ /* bits to mask */
+ cccrtbits = P4_CCCR_OVF_PMI_T0;
+ escrtbits = P4_ESCR_T0_OS | P4_ESCR_T0_USR;
+ if (pmc_cpu_is_logical(cpu)) {
+ cccrtbits <<= 1;
+ escrtbits >>= 2;
+ }
+
+ mtx_lock_spin(&pc->pc_mtx);
+
+ rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
+
+ KASSERT(rc == 2 || rc == 1,
+ ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri,
+ rc));
+
+ --rc;
+
+ P4_PCPU_SET_RUNCOUNT(pc,ri,rc);
+
+ /* Stop this PMC */
+ cccrvalue = rdmsr(pd->pm_cccr_msr);
+ wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE);
+
+ escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr;
+ escrvalue = rdmsr(escrmsr);
+
+ /* get the current PMC reading */
+ tmp = rdmsr(pd->pm_pmc_msr) & P4_PERFCTR_MASK;
+
+ if (rc == 1) { /* need to keep the PMC running */
+
+ KASSERT(escrvalue & escrtbits,
+ ("[p4,%d] ESCR T0/T1 mismatch cpu=%d ri=%d escrmsr=0x%x "
+ "escrvalue=0x%x tbits=0x%x", __LINE__, cpu, ri, escrmsr,
+ escrvalue, escrtbits));
+
+ KASSERT(PMC_IS_COUNTING_MODE(pm->pm_mode) ||
+ (cccrvalue & cccrtbits),
+ ("[p4,%d] CCCR T0/T1 mismatch cpu=%d ri=%d cccrvalue=0x%x "
+ "tbits=0x%x", __LINE__, cpu, ri, cccrvalue, cccrtbits));
+
+ escrvalue &= ~escrtbits;
+ cccrvalue &= ~cccrtbits;
+
+ wrmsr(escrmsr, escrvalue);
+ wrmsr(pd->pm_cccr_msr, cccrvalue);
+
+ }
+
+ PMCDBG(MDP,STO,2, "p4-stop/2 cpu=%d rc=%d ri=%d escrmsr=0x%x escrval=0x%x "
+ "cccrval=0x%x", cpu, rc, ri, escrmsr, escrvalue, cccrvalue);
+
+ /* get the incremental count from this context switch */
+ tmp -= P4_PCPU_SAVED_VALUE(pc,ri,cpu);
+ if ((int64_t) tmp < 0) /* counter wrap-around */
+ tmp = -tmp + 1;
+
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) += tmp;
+
+ mtx_unlock_spin(&pc->pc_mtx);
+ return 0;
+}
+
+/*
+ * Handle an interrupt.
+ */
+
+static int
+p4_intr(int cpu, uintptr_t eip)
+{
+ (void) cpu;
+ (void) eip;
+
+ return 0;
+}
+
+/*
+ * Describe a CPU's PMC state.
+ */
+
+static int
+p4_describe(int cpu, int ri, struct pmc_info *pi,
+ struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ struct pmc_hw *phw;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] row-index %d out of range", __LINE__, ri));
+
+ PMCDBG(MDP,OPS,1,"p4-describe cpu=%d ri=%d", cpu, ri);
+
+ if (pmc_cpu_is_logical(cpu))
+ return EINVAL;
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &p4_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return error;
+
+ pi->pm_class = pd->pm_descr.pd_class;
+ pi->pm_caps = pd->pm_descr.pd_caps;
+ pi->pm_width = pd->pm_descr.pd_width;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Get MSR# for use with RDPMC.
+ */
+
+static int
+p4_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = p4_pmcdesc[ri].pm_pmc_msr;
+ return 0;
+}
+
+
+int
+pmc_initialize_p4(struct pmc_mdep *pmc_mdep)
+{
+ struct p4_event_descr *pe;
+
+ KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
+ ("[p4,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "p4-initialize");
+
+ switch (pmc_mdep->pmd_cputype) {
+ case PMC_CPU_INTEL_PIV:
+
+ pmc_mdep->pmd_npmc = P4_NPMCS;
+ pmc_mdep->pmd_classes[1] = PMC_CLASS_P4;
+ pmc_mdep->pmd_nclasspmcs[1] = 18;
+
+ pmc_mdep->pmd_init = p4_init;
+ pmc_mdep->pmd_cleanup = p4_cleanup;
+ pmc_mdep->pmd_switch_in = p4_switch_in;
+ pmc_mdep->pmd_switch_out = p4_switch_out;
+ pmc_mdep->pmd_read_pmc = p4_read_pmc;
+ pmc_mdep->pmd_write_pmc = p4_write_pmc;
+ pmc_mdep->pmd_config_pmc = p4_config_pmc;
+ pmc_mdep->pmd_allocate_pmc = p4_allocate_pmc;
+ pmc_mdep->pmd_release_pmc = p4_release_pmc;
+ pmc_mdep->pmd_start_pmc = p4_start_pmc;
+ pmc_mdep->pmd_stop_pmc = p4_stop_pmc;
+ pmc_mdep->pmd_intr = p4_intr;
+ pmc_mdep->pmd_describe = p4_describe;
+ pmc_mdep->pmd_get_msr = p4_get_msr; /* i386 */
+
+ /* model specific munging */
+ if ((cpu_id & 0xFFF) < 0xF27) {
+
+ /*
+ * On P4 and Xeon with CPUID < (Family 15,
+ * Model 2, Stepping 7), only one ESCR is
+ * available for the IOQ_ALLOCATION event.
+ */
+
+ pe = p4_find_event(PMC_EV_P4_IOQ_ALLOCATION);
+ pe->pm_escrs[1] = P4_ESCR_NONE;
+ }
+
+ break;
+
+ default:
+ KASSERT(0,("[p4,%d] Unknown CPU type", __LINE__));
+ return ENOSYS;
+ }
+
+ return 0;
+}
diff --git a/sys/hwpmc/hwpmc_ppro.c b/sys/hwpmc/hwpmc_ppro.c
new file mode 100644
index 0000000..3a289a5
--- /dev/null
+++ b/sys/hwpmc/hwpmc_ppro.c
@@ -0,0 +1,742 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * PENTIUM PRO SUPPORT
+ */
+
+struct p6pmc_descr {
+ struct pmc_descr pm_descr; /* common information */
+ uint32_t pm_pmc_msr;
+ uint32_t pm_evsel_msr;
+};
+
+static struct p6pmc_descr p6_pmcdesc[P6_NPMCS] = {
+
+ /* TSC */
+ {
+ .pm_descr =
+ {
+ .pd_name = "TSC",
+ .pd_class = PMC_CLASS_TSC,
+ .pd_caps = PMC_CAP_READ,
+ .pd_width = 64
+ },
+ .pm_pmc_msr = 0x10,
+ .pm_evsel_msr = ~0
+ },
+
+#define P6_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
+ PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
+ PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
+
+ /* PMC 0 */
+ {
+ .pm_descr =
+ {
+ .pd_name ="P6-0",
+ .pd_class = PMC_CLASS_P6,
+ .pd_caps = P6_PMC_CAPS,
+ .pd_width = 40
+ },
+ .pm_pmc_msr = P6_MSR_PERFCTR0,
+ .pm_evsel_msr = P6_MSR_EVSEL0
+ },
+
+ /* PMC 1 */
+ {
+ .pm_descr =
+ {
+ .pd_name ="P6-1",
+ .pd_class = PMC_CLASS_P6,
+ .pd_caps = P6_PMC_CAPS,
+ .pd_width = 40
+ },
+ .pm_pmc_msr = P6_MSR_PERFCTR1,
+ .pm_evsel_msr = P6_MSR_EVSEL1
+ }
+};
+
+static enum pmc_cputype p6_cputype;
+
+/*
+ * P6 Event descriptor
+ */
+
+struct p6_event_descr {
+ const enum pmc_event pm_event;
+ uint32_t pm_evsel;
+ uint32_t pm_flags;
+ uint32_t pm_unitmask;
+};
+
+static const struct p6_event_descr p6_events[] = {
+
+#define P6_EVDESCR(NAME, EVSEL, FLAGS, UMASK) \
+ { \
+ .pm_event = PMC_EV_P6_##NAME, \
+ .pm_evsel = (EVSEL), \
+ .pm_flags = (FLAGS), \
+ .pm_unitmask = (UMASK) \
+ }
+
+#define P6F_P6 (1 << PMC_CPU_INTEL_P6)
+#define P6F_CL (1 << PMC_CPU_INTEL_CL)
+#define P6F_PII (1 << PMC_CPU_INTEL_PII)
+#define P6F_PIII (1 << PMC_CPU_INTEL_PIII)
+#define P6F_PM (1 << PMC_CPU_INTEL_PM)
+#define P6F_CTR0 0x0001
+#define P6F_CTR1 0x0002
+#define P6F_ALL_CPUS (P6F_P6 | P6F_PII | P6F_CL | P6F_PIII | P6F_PM)
+#define P6F_ALL_CTRS (P6F_CTR0 | P6F_CTR1)
+#define P6F_ALL (P6F_ALL_CPUS | P6F_ALL_CTRS)
+
+#define P6_EVENT_VALID_FOR_CPU(P,CPU) ((P)->pm_flags & (1 << (CPU)))
+#define P6_EVENT_VALID_FOR_CTR(P,CTR) ((P)->pm_flags & (1 << (CTR)))
+
+P6_EVDESCR(DATA_MEM_REFS, 0x43, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_LINES_IN, 0x45, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_M_LINES_IN, 0x46, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_M_LINES_OUT, 0x47, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_MISS_OUTSTANDING, 0x47, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_FETCH, 0x80, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_FETCH_MISS, 0x81, P6F_ALL, 0x00),
+P6_EVDESCR(ITLB_MISS, 0x85, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_MEM_STALL, 0x86, P6F_ALL, 0x00),
+P6_EVDESCR(ILD_STALL, 0x87, P6F_ALL, 0x00),
+P6_EVDESCR(L2_IFETCH, 0x28, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LD, 0x29, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_ST, 0x2A, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LINES_IN, 0x24, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LINES_OUT, 0x26, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_M_LINES_INM, 0x25, P6F_ALL, 0x00),
+P6_EVDESCR(L2_M_LINES_OUTM, 0x27, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_RQSTS, 0x2E, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_ADS, 0x21, P6F_ALL, 0x00),
+P6_EVDESCR(L2_DBUS_BUSY, 0x22, P6F_ALL, 0x00),
+P6_EVDESCR(L2_DBUS_BUSY_RD, 0x23, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_DRDY_CLOCKS, 0x62, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_LOCK_CLOCKS, 0x63, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_REQ_OUTSTANDING, 0x60, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_TRAN_BRD, 0x65, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_RFO, 0x66, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_WB, 0x67, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_IFETCH, 0x68, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_INVAL, 0x69, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_PWR, 0x6A, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_P, 0x6B, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_IO, 0x6C, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_DEF, 0x6D, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_BURST, 0x6E, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_ANY, 0x70, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_MEM, 0x6F, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_DATA_RCV, 0x64, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_BNR_DRV, 0x61, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_HIT_DRV, 0x7A, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_HITM_DRV, 0x7B, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_SNOOP_STALL, 0x7E, P6F_ALL, 0x00),
+P6_EVDESCR(FLOPS, 0xC1, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(FP_COMPS_OPS_EXE, 0x10, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(FP_ASSIST, 0x11, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(MUL, 0x12, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(DIV, 0x13, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(CYCLES_DIV_BUSY, 0x14, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(LD_BLOCKS, 0x03, P6F_ALL, 0x00),
+P6_EVDESCR(SB_DRAINS, 0x04, P6F_ALL, 0x00),
+P6_EVDESCR(MISALIGN_MEM_REF, 0x05, P6F_ALL, 0x00),
+P6_EVDESCR(EMON_KNI_PREF_DISPATCHED, 0x07, P6F_PIII | P6F_ALL_CTRS, 0x03),
+P6_EVDESCR(EMON_KNI_PREF_MISS, 0x4B, P6F_PIII | P6F_ALL_CTRS, 0x03),
+P6_EVDESCR(INST_RETIRED, 0xC0, P6F_ALL, 0x00),
+P6_EVDESCR(UOPS_RETIRED, 0xC2, P6F_ALL, 0x00),
+P6_EVDESCR(INST_DECODED, 0xD0, P6F_ALL, 0x00),
+P6_EVDESCR(EMON_KNI_INST_RETIRED, 0xD8, P6F_PIII | P6F_ALL_CTRS, 0x01),
+P6_EVDESCR(EMON_KNI_COMP_INST_RET, 0xD9, P6F_PIII | P6F_ALL_CTRS, 0x01),
+P6_EVDESCR(HW_INT_RX, 0xC8, P6F_ALL, 0x00),
+P6_EVDESCR(CYCLES_INT_MASKED, 0xC6, P6F_ALL, 0x00),
+P6_EVDESCR(CYCLES_INT_PENDING_AND_MASKED, 0xC7, P6F_ALL, 0x00),
+P6_EVDESCR(BR_INST_RETIRED, 0xC4, P6F_ALL, 0x00),
+P6_EVDESCR(BR_MISS_PRED_RETIRED, 0xC5, P6F_ALL, 0x00),
+P6_EVDESCR(BR_TAKEN_RETIRED, 0xC9, P6F_ALL, 0x00),
+P6_EVDESCR(BR_MISS_PRED_TAKEN_RET, 0xCA, P6F_ALL, 0x00),
+P6_EVDESCR(BR_INST_DECODED, 0xE0, P6F_ALL, 0x00),
+P6_EVDESCR(BTB_MISSES, 0xE2, P6F_ALL, 0x00),
+P6_EVDESCR(BR_BOGUS, 0xE4, P6F_ALL, 0x00),
+P6_EVDESCR(BACLEARS, 0xE6, P6F_ALL, 0x00),
+P6_EVDESCR(RESOURCE_STALLS, 0xA2, P6F_ALL, 0x00),
+P6_EVDESCR(PARTIAL_RAT_STALLS, 0xD2, P6F_ALL, 0x00),
+P6_EVDESCR(SEGMENT_REG_LOADS, 0x06, P6F_ALL, 0x00),
+P6_EVDESCR(CPU_CLK_UNHALTED, 0x79, P6F_ALL, 0x00),
+P6_EVDESCR(MMX_INSTR_EXEC, 0xB0,
+ P6F_ALL_CTRS | P6F_CL | P6F_PII, 0x00),
+P6_EVDESCR(MMX_SAT_INSTR_EXEC, 0xB1,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(MMX_UOPS_EXEC, 0xB2,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(MMX_INSTR_TYPE_EXEC, 0xB3,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x3F),
+P6_EVDESCR(FP_MMX_TRANS, 0xCC,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x01),
+P6_EVDESCR(MMX_ASSIST, 0xCD,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(MMX_INSTR_RET, 0xCE, P6F_ALL_CTRS | P6F_PII, 0x00),
+P6_EVDESCR(SEG_RENAME_STALLS, 0xD4,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(SEG_REG_RENAMES, 0xD5,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(RET_SEG_RENAMES, 0xD6,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(EMON_EST_TRANS, 0x58, P6F_ALL_CTRS | P6F_PM, 0x02),
+P6_EVDESCR(EMON_THERMAL_TRIP, 0x59, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_INST_EXEC, 0x88, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_MISSP_EXEC, 0x89, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_BAC_MISSP_EXEC, 0x8A, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CND_EXEC, 0x8B, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CND_MISSP_EXEC, 0x8C, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_EXEC, 0x8D, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_MISSP_EXEC, 0x8E, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_EXEC, 0x8F, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_MISSP_EXEC, 0x90, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_BAC_MISSP_EXEC, 0x91, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CALL_EXEC, 0x92, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CALL_MISSP_EXEC, 0x93, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_CALL_EXEC, 0x94, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SIMD_INSTR_RETIRED, 0xCE, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SYNCH_UOPS, 0xD3, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_ESP_UOPS, 0xD7, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_FUSED_UOPS_RET, 0xDA, P6F_ALL_CTRS | P6F_PM, 0x03),
+P6_EVDESCR(EMON_UNFUSION, 0xDB, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_PREF_RQSTS_UP, 0xF0, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_PREF_RQSTS_DN, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SSE_SSE2_INST_RETIRED, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x03),
+P6_EVDESCR(EMON_SSE_SSE2_COMP_INST_RETIRED, 0xD9, P6F_ALL_CTRS | P6F_PM, 0x03)
+
+#undef P6_EVDESCR
+};
+
+#define P6_NEVENTS (PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1)
+
+static const struct p6_event_descr *
+p6_find_event(enum pmc_event ev)
+{
+ int n;
+
+ for (n = 0; n < P6_NEVENTS; n++)
+ if (p6_events[n].pm_event == ev)
+ break;
+ if (n == P6_NEVENTS)
+ return NULL;
+ return &p6_events[n];
+}
+
+/*
+ * Per-CPU data structure for P6 class CPUs
+ *
+ * [common stuff]
+ * [3 struct pmc_hw pointers]
+ * [3 struct pmc_hw structures]
+ */
+
+struct p6_cpu {
+ struct pmc_cpu pc_common;
+ struct pmc_hw *pc_hwpmcs[P6_NPMCS];
+ struct pmc_hw pc_p6pmcs[P6_NPMCS];
+};
+
+static int
+p6_init(int cpu)
+{
+ int n;
+ struct p6_cpu *pcs;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] bad cpu %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0,"p6-init cpu=%d", cpu);
+
+ MALLOC(pcs, struct p6_cpu *, sizeof(struct p6_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ if (pcs == NULL)
+ return ENOMEM;
+
+ phw = pcs->pc_p6pmcs;
+
+ for (n = 0; n < P6_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pcs->pc_hwpmcs[n] = phw;
+ }
+
+ /* Mark the TSC as shareable */
+ pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
+
+ pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
+
+ return 0;
+}
+
+static int
+p6_cleanup(int cpu)
+{
+ struct pmc_cpu *pcs;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] bad cpu %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0,"p6-cleanup cpu=%d", cpu);
+
+ if ((pcs = pmc_pcpu[cpu]) != NULL)
+ FREE(pcs, M_PMC);
+ pmc_pcpu[cpu] = NULL;
+
+ return 0;
+}
+
+static int
+p6_switch_in(struct pmc_cpu *pc)
+{
+ (void) pc;
+ return 0;
+}
+
+static int
+p6_switch_out(struct pmc_cpu *pc)
+{
+ (void) pc;
+ return 0;
+}
+
+static int
+p6_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc_hw *phw;
+ struct pmc *pm;
+ struct p6pmc_descr *pd;
+ pmc_value_t tmp;
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ tmp = rdmsr(pd->pm_pmc_msr) & P6_PERFCTR_MASK;
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ *v = -tmp;
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,1, "p6-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
+ pd->pm_pmc_msr, *v);
+
+ return 0;
+}
+
+static int
+p6_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc_hw *phw;
+ struct pmc *pm;
+ struct p6pmc_descr *pd;
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ PMCDBG(MDP,WRI,1, "p6-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
+ pd->pm_pmc_msr, v);
+
+ if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
+ v = -v;
+
+ wrmsr(pd->pm_pmc_msr, v & P6_PERFCTR_MASK);
+
+ return 0;
+}
+
+static int
+p6_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "p6-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+/*
+ * A pmc may be allocated to a given row index if:
+ * - the event is valid for this CPU
+ * - the event is valid for this counter index
+ */
+
+static int
+p6_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ uint32_t allowed_unitmask, caps, config, unitmask;
+ const struct p6pmc_descr *pd;
+ const struct p6_event_descr *pevent;
+ enum pmc_event ev;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p4,%d] illegal row-index value %d", __LINE__, ri));
+
+ pd = &p6_pmcdesc[ri];
+
+ PMCDBG(MDP,ALL,1, "p6-allocate ri=%d class=%d pmccaps=0x%x "
+ "reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
+ pm->pm_caps);
+
+ /* check class */
+ if (pd->pm_descr.pd_class != pm->pm_class)
+ return EINVAL;
+
+ /* check requested capabilities */
+ caps = a->pm_caps;
+ if ((pd->pm_descr.pd_caps & caps) != caps)
+ return EPERM;
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
+ /* TSC's are always allocated in system-wide counting mode */
+ if (a->pm_ev != PMC_EV_TSC_TSC ||
+ a->pm_mode != PMC_MODE_SC)
+ return EINVAL;
+ return 0;
+ }
+
+ /*
+ * P6 class events
+ */
+
+ ev = pm->pm_event;
+
+ if (ev < PMC_EV_P6_FIRST || ev > PMC_EV_P6_LAST)
+ return EINVAL;
+
+ if ((pevent = p6_find_event(ev)) == NULL)
+ return ESRCH;
+
+ if (!P6_EVENT_VALID_FOR_CPU(pevent, p6_cputype) ||
+ !P6_EVENT_VALID_FOR_CTR(pevent, (ri-1)))
+ return EINVAL;
+
+ /* For certain events, Pentium M differs from the stock P6 */
+ allowed_unitmask = 0;
+ if (p6_cputype == PMC_CPU_INTEL_PM) {
+ if (ev == PMC_EV_P6_L2_LD || ev == PMC_EV_P6_L2_LINES_IN ||
+ ev == PMC_EV_P6_L2_LINES_OUT)
+ allowed_unitmask = P6_EVSEL_TO_UMASK(0x3F);
+ else if (ev == PMC_EV_P6_L2_M_LINES_OUTM)
+ allowed_unitmask = P6_EVSEL_TO_UMASK(0x30);
+ } else
+ allowed_unitmask = P6_EVSEL_TO_UMASK(pevent->pm_unitmask);
+
+ unitmask = a->pm_p6_config & P6_EVSEL_UMASK_MASK;
+ if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
+ return EINVAL;
+
+ if (ev == PMC_EV_P6_MMX_UOPS_EXEC) /* hardcoded mask */
+ unitmask = P6_EVSEL_TO_UMASK(0x0F);
+
+ config = 0;
+
+ config |= P6_EVSEL_EVENT_SELECT(pevent->pm_evsel);
+
+ if (unitmask & (caps & PMC_CAP_QUALIFIER))
+ config |= unitmask;
+
+ if (caps & PMC_CAP_THRESHOLD)
+ config |= a->pm_p6_config & P6_EVSEL_CMASK_MASK;
+
+ /* set at least one of the 'usr' or 'os' caps */
+ if (caps & PMC_CAP_USER)
+ config |= P6_EVSEL_USR;
+ if (caps & PMC_CAP_SYSTEM)
+ config |= P6_EVSEL_OS;
+ if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
+ config |= (P6_EVSEL_USR|P6_EVSEL_OS);
+
+ if (caps & PMC_CAP_EDGE)
+ config |= P6_EVSEL_E;
+ if (caps & PMC_CAP_INVERT)
+ config |= P6_EVSEL_INV;
+ if (caps & PMC_CAP_INTERRUPT)
+ config |= P6_EVSEL_INT;
+
+ pm->pm_md.pm_p6.pm_p6_evsel = config;
+
+ PMCDBG(MDP,ALL,2, "p6-allocate config=0x%x", config);
+
+ return 0;
+}
+
+static int
+p6_release_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ (void) pm;
+
+ PMCDBG(MDP,REL,1, "p6-release cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[p6,%d] PHW pmc %p != pmc %p", __LINE__, phw->phw_pmc, pm));
+
+ return 0;
+}
+
+static int
+p6_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ const struct p6pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] starting cpu%d,ri%d with no pmc configured",
+ __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STA,1, "p6-start cpu=%d ri=%d", cpu, ri);
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0; /* TSC are always running */
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P6,
+ ("[p6,%d] unknown PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ config = pm->pm_md.pm_p6.pm_p6_evsel;
+
+ PMCDBG(MDP,STA,2, "p6-start/2 cpu=%d ri=%d evselmsr=0x%x config=0x%x",
+ cpu, ri, pd->pm_evsel_msr, config);
+
+ if (pd->pm_evsel_msr == P6_MSR_EVSEL0) /* CTR 0 */
+ wrmsr(pd->pm_evsel_msr, config | P6_EVSEL_EN);
+ else { /* CTR1 shares the enable bit CTR 0 */
+ wrmsr(pd->pm_evsel_msr, config);
+ wrmsr(P6_MSR_EVSEL0, rdmsr(P6_MSR_EVSEL0) | P6_EVSEL_EN);
+ }
+ return 0;
+}
+
+static int
+p6_stop_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ struct p6pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ ("[p6,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row index %d", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
+ cpu, ri));
+
+ if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
+ return 0;
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P6,
+ ("[p6,%d] unknown PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ PMCDBG(MDP,STO,1, "p6-stop cpu=%d ri=%d", cpu, ri);
+
+ /*
+ * If CTR0 is being turned off but CTR1 is active, we need
+ * leave CTR0's EN field set. If CTR1 is being stopped, it
+ * suffices to zero its EVSEL register.
+ */
+
+ if (ri == 1 &&
+ pmc_pcpu[cpu]->pc_hwpmcs[2]->phw_pmc != NULL)
+ config = P6_EVSEL_EN;
+ else
+ config = 0;
+ wrmsr(pd->pm_evsel_msr, config);
+
+ PMCDBG(MDP,STO,2, "p6-stop/2 cpu=%d ri=%d config=0x%x", cpu, ri,
+ config);
+ return 0;
+}
+
+static int
+p6_intr(int cpu, uintptr_t eip)
+{
+ (void) cpu;
+ (void) eip;
+ return 0;
+}
+
+static int
+p6_describe(int cpu, int ri, struct pmc_info *pi,
+ struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ struct pmc_hw *phw;
+ struct p6pmc_descr *pd;
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &p6_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return error;
+
+ pi->pm_class = pd->pm_descr.pd_class;
+ pi->pm_caps = pd->pm_descr.pd_caps;
+ pi->pm_width = pd->pm_descr.pd_width;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return 0;
+}
+
+static int
+p6_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d ri %d out of range", __LINE__, ri));
+
+ *msr = p6_pmcdesc[ri].pm_pmc_msr;
+ return 0;
+}
+
+int
+pmc_initialize_p6(struct pmc_mdep *pmc_mdep)
+{
+ KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
+ ("[p6,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "p6-initialize");
+
+ switch (pmc_mdep->pmd_cputype) {
+
+ /*
+ * P6 Family Processors
+ */
+
+ case PMC_CPU_INTEL_P6:
+ case PMC_CPU_INTEL_CL:
+ case PMC_CPU_INTEL_PII:
+ case PMC_CPU_INTEL_PIII:
+ case PMC_CPU_INTEL_PM:
+
+ p6_cputype = pmc_mdep->pmd_cputype;
+
+ pmc_mdep->pmd_npmc = P6_NPMCS;
+ pmc_mdep->pmd_classes[1] = PMC_CLASS_P6;
+ pmc_mdep->pmd_nclasspmcs[1] = 2;
+
+ pmc_mdep->pmd_init = p6_init;
+ pmc_mdep->pmd_cleanup = p6_cleanup;
+ pmc_mdep->pmd_switch_in = p6_switch_in;
+ pmc_mdep->pmd_switch_out = p6_switch_out;
+ pmc_mdep->pmd_read_pmc = p6_read_pmc;
+ pmc_mdep->pmd_write_pmc = p6_write_pmc;
+ pmc_mdep->pmd_config_pmc = p6_config_pmc;
+ pmc_mdep->pmd_allocate_pmc = p6_allocate_pmc;
+ pmc_mdep->pmd_release_pmc = p6_release_pmc;
+ pmc_mdep->pmd_start_pmc = p6_start_pmc;
+ pmc_mdep->pmd_stop_pmc = p6_stop_pmc;
+ pmc_mdep->pmd_intr = p6_intr;
+ pmc_mdep->pmd_describe = p6_describe;
+ pmc_mdep->pmd_get_msr = p6_get_msr; /* i386 */
+
+ break;
+ default:
+ KASSERT(0,("[p6,%d] Unknown CPU type", __LINE__));
+ return ENOSYS;
+ }
+
+ return 0;
+}
diff --git a/sys/i386/i386/local_apic.c b/sys/i386/i386/local_apic.c
index 07c6ded..3e96461 100644
--- a/sys/i386/i386/local_apic.c
+++ b/sys/i386/i386/local_apic.c
@@ -108,7 +108,7 @@ static struct lvt lvts[LVT_MAX + 1] = {
{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
- { 1, 1, 1, 1, APIC_LVT_DM_FIXED, 0 }, /* PMC */
+ { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* PMC */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
};
@@ -304,6 +304,11 @@ lapic_setup(void)
/* Program LINT[01] LVT entries. */
lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0);
lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1);
+#ifdef HWPMC_HOOKS
+ /* Program the PMC LVT entry if present. */
+ if (maxlvt >= LVT_PMC)
+ lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);
+#endif
/* Program timer LVT and setup handler. */
lapic->lvt_timer = lvt_mode(la, LVT_TIMER, lapic->lvt_timer);
diff --git a/sys/i386/include/pmc_mdep.h b/sys/i386/include/pmc_mdep.h
new file mode 100644
index 0000000..06adf4c
--- /dev/null
+++ b/sys/i386/include/pmc_mdep.h
@@ -0,0 +1,184 @@
+/*-
+ * Copyright (c) 2003, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* Machine dependent interfaces */
+
+#ifndef _MACHINE_PMC_MDEP_H
+#define _MACHINE_PMC_MDEP_H 1
+
+#include <sys/pmc.h>
+
+/* AMD K7 PMCs */
+
+#define K7_NPMCS 5 /* 1 TSC + 4 PMCs */
+
+#define K7_PMC_COUNTERMASK 0xFF000000
+#define K7_PMC_TO_COUNTER(x) (((x) << 24) & K7_PMC_COUNTERMASK)
+#define K7_PMC_INVERT (1 << 23)
+#define K7_PMC_ENABLE (1 << 22)
+#define K7_PMC_INT (1 << 20)
+#define K7_PMC_PC (1 << 19)
+#define K7_PMC_EDGE (1 << 18)
+#define K7_PMC_OS (1 << 17)
+#define K7_PMC_USR (1 << 16)
+
+#define K7_PMC_UNITMASK_M 0x10
+#define K7_PMC_UNITMASK_O 0x08
+#define K7_PMC_UNITMASK_E 0x04
+#define K7_PMC_UNITMASK_S 0x02
+#define K7_PMC_UNITMASK_I 0x01
+#define K7_PMC_UNITMASK_MOESI 0x1F
+
+#define K7_PMC_UNITMASK 0xFF00
+#define K7_PMC_EVENTMASK 0x00FF
+#define K7_PMC_TO_UNITMASK(x) (((x) << 8) & K7_PMC_UNITMASK)
+#define K7_PMC_TO_EVENTMASK(x) ((x) & 0xFF)
+#define K7_VALID_BITS (K7_PMC_COUNTERMASK | K7_PMC_INVERT | \
+ K7_PMC_ENABLE | K7_PMC_INT | K7_PMC_PC | K7_PMC_EDGE | K7_PMC_OS | \
+ K7_PMC_USR | K7_PMC_UNITMASK | K7_PMC_EVENTMASK)
+
+/* Intel P4 PMCs */
+
+#define P4_NPMCS 19 /* 1 TSC + 18 PMCS */
+#define P4_NESCR 45
+#define P4_INVALID_PMC_INDEX -1
+#define P4_MAX_ESCR_PER_EVENT 2
+#define P4_MAX_PMC_PER_ESCR 3
+
+#define P4_CCCR_OVF (1 << 31)
+#define P4_CCCR_CASCADE (1 << 30)
+#define P4_CCCR_OVF_PMI_T1 (1 << 27)
+#define P4_CCCR_OVF_PMI_T0 (1 << 26)
+#define P4_CCCR_FORCE_OVF (1 << 25)
+#define P4_CCCR_EDGE (1 << 24)
+#define P4_CCCR_THRESHOLD_SHIFT 20
+#define P4_CCCR_THRESHOLD_MASK 0x00F00000
+#define P4_CCCR_TO_THRESHOLD(C) (((C) << P4_CCCR_THRESHOLD_SHIFT) & \
+ P4_CCCR_THRESHOLD_MASK)
+#define P4_CCCR_COMPLEMENT (1 << 19)
+#define P4_CCCR_COMPARE (1 << 18)
+#define P4_CCCR_ACTIVE_THREAD_SHIFT 16
+#define P4_CCCR_ACTIVE_THREAD_MASK 0x00030000
+#define P4_CCCR_TO_ACTIVE_THREAD(T) (((T) << P4_CCCR_ACTIVE_THREAD_SHIFT) & \
+ P4_CCCR_ACTIVE_THREAD_MASK)
+#define P4_CCCR_ESCR_SELECT_SHIFT 13
+#define P4_CCCR_ESCR_SELECT_MASK 0x0000E000
+#define P4_CCCR_TO_ESCR_SELECT(E) (((E) << P4_CCCR_ESCR_SELECT_SHIFT) & \
+ P4_CCCR_ESCR_SELECT_MASK)
+#define P4_CCCR_ENABLE (1 << 12)
+#define P4_CCCR_VALID_BITS (P4_CCCR_OVF | P4_CCCR_CASCADE | \
+ P4_CCCR_OVF_PMI_T1 | P4_CCCR_OVF_PMI_T0 | P4_CCCR_FORCE_OVF | \
+ P4_CCCR_EDGE | P4_CCCR_THRESHOLD_MASK | P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE | P4_CCCR_ESCR_SELECT_MASK | P4_CCCR_ENABLE)
+
+#define P4_ESCR_EVENT_SELECT_SHIFT 25
+#define P4_ESCR_EVENT_SELECT_MASK 0x7E000000
+#define P4_ESCR_TO_EVENT_SELECT(E) (((E) << P4_ESCR_EVENT_SELECT_SHIFT) & \
+ P4_ESCR_EVENT_SELECT_MASK)
+#define P4_ESCR_EVENT_MASK_SHIFT 9
+#define P4_ESCR_EVENT_MASK_MASK 0x01FFFE00
+#define P4_ESCR_TO_EVENT_MASK(M) (((M) << P4_ESCR_EVENT_MASK_SHIFT) & \
+ P4_ESCR_EVENT_MASK_MASK)
+#define P4_ESCR_TAG_VALUE_SHIFT 5
+#define P4_ESCR_TAG_VALUE_MASK 0x000001E0
+#define P4_ESCR_TO_TAG_VALUE(T) (((T) << P4_ESCR_TAG_VALUE_SHIFT) & \
+ P4_ESCR_TAG_VALUE_MASK)
+#define P4_ESCR_TAG_ENABLE 0x00000010
+#define P4_ESCR_T0_OS 0x00000008
+#define P4_ESCR_T0_USR 0x00000004
+#define P4_ESCR_T1_OS 0x00000002
+#define P4_ESCR_T1_USR 0x00000001
+#define P4_ESCR_OS P4_ESCR_T0_OS
+#define P4_ESCR_USR P4_ESCR_T0_USR
+#define P4_ESCR_VALID_BITS (P4_ESCR_EVENT_SELECT_MASK | \
+ P4_ESCR_EVENT_MASK_MASK | P4_ESCR_TAG_VALUE_MASK | \
+ P4_ESCR_TAG_ENABLE | P4_ESCR_T0_OS | P4_ESCR_T0_USR | P4_ESCR_T1_OS \
+ P4_ESCR_T1_USR)
+
+#define P4_PERFCTR_MASK 0xFFFFFFFFFFLL /* 40 bits */
+
+/* Intel PPro, Celeron, P-II, P-III, Pentium-M PMCS */
+
+#define P6_NPMCS 3 /* 1 TSC + 2 PMCs */
+
+#define P6_EVSEL_CMASK_MASK 0xFF000000
+#define P6_EVSEL_TO_CMASK(C) (((C) & 0xFF) << 24)
+#define P6_EVSEL_INV (1 << 23)
+#define P6_EVSEL_EN (1 << 22)
+#define P6_EVSEL_INT (1 << 20)
+#define P6_EVSEL_PC (1 << 19)
+#define P6_EVSEL_E (1 << 18)
+#define P6_EVSEL_OS (1 << 17)
+#define P6_EVSEL_USR (1 << 16)
+#define P6_EVSEL_UMASK_MASK 0x0000FF00
+#define P6_EVSEL_TO_UMASK(U) (((U) & 0xFF) << 8)
+#define P6_EVSEL_EVENT_SELECT(ES) ((ES) & 0xFF)
+#define P6_EVSEL_RESERVED (1 << 21)
+
+#define P6_MSR_EVSEL0 0x0186
+#define P6_MSR_EVSEL1 0x0187
+#define P6_MSR_PERFCTR0 0x00C1
+#define P6_MSR_PERFCTR1 0x00C2
+
+#define P6_PERFCTR_MASK 0xFFFFFFFFFFLL /* 40 bits */
+
+/* Intel Pentium PMCs */
+
+#define PENTIUM_NPMCS 3 /* 1 TSC + 2 PMCs */
+#define PENTIUM_CESR_PC1 (1 << 25)
+#define PENTIUM_CESR_CC1_MASK 0x01C00000
+#define PENTIUM_CESR_TO_CC1(C) (((C) & 0x07) << 22)
+#define PENTIUM_CESR_ES1_MASK 0x003F0000
+#define PENTIUM_CESR_TO_ES1(E) (((E) & 0x3F) << 16)
+#define PENTIUM_CESR_PC0 (1 << 9)
+#define PENTIUM_CESR_CC0_MASK 0x000001C0
+#define PENTIUM_CESR_TO_CC0(C) (((C) & 0x07) << 6)
+#define PENTIUM_CESR_ES0_MASK 0x0000003F
+#define PENTIUM_CESR_TO_ES0(E) ((E) & 0x3F)
+#define PENTIUM_CESR_RESERVED 0xFC00FC00
+
+#define PENTIUM_MSR_CESR 0x11
+#define PENTIUM_MSR_CTR0 0x12
+#define PENTIUM_MSR_CTR1 0x13
+
+#ifdef _KERNEL
+
+/*
+ * Prototypes
+ */
+
+#if defined(__i386__)
+struct pmc_mdep *pmc_amd_initialize(void); /* AMD K7/K8 PMCs */
+struct pmc_mdep *pmc_intel_initialize(void); /* Intel PMCs */
+int pmc_initialize_p4(struct pmc_mdep *); /* Pentium IV PMCs */
+int pmc_initialize_p5(struct pmc_mdep *); /* Pentium PMCs */
+int pmc_initialize_p6(struct pmc_mdep *); /* Pentium Pro PMCs */
+#endif /* defined(__i386__) */
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_PMC_MDEP_H */
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 7a15f75..9783615 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -72,6 +72,10 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
+#ifdef HWPMC_HOOKS
+#include <sys/pmckern.h>
+#endif
+
#include <machine/reg.h>
MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
@@ -662,7 +666,25 @@ interpret:
p->p_args = newargs;
newargs = NULL;
}
+
+#ifdef HWPMC_HOOKS
+ /*
+ * Check if the process is using PMCs and if so do exec() time
+ * processing. This processing needs to happen AFTER the
+ * P_INEXEC flag is cleared.
+ *
+ * The proc lock needs to be released before taking the PMC
+ * SX.
+ */
+ if (PMC_PROC_IS_USING_PMCS(p)) {
+ PROC_UNLOCK(p);
+ PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC,
+ (void *) &credential_changing);
+ } else
+ PROC_UNLOCK(p);
+#else /* !HWPMC_HOOKS */
PROC_UNLOCK(p);
+#endif
/* Set values passed into the program in registers. */
if (p->p_sysent->sv_setregs)
diff --git a/sys/kern/kern_pmc.c b/sys/kern/kern_pmc.c
new file mode 100644
index 0000000..29be9c1
--- /dev/null
+++ b/sys/kern/kern_pmc.c
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 2003 Joseph Koshy
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef HWPMC_HOOKS
+
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+
+struct sx pmc_sx;
+
+/* Hook variable. */
+int (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
+
+/* Interrupt handler */
+int (*pmc_intr)(int cpu, uintptr_t pc) = NULL;
+
+/*
+ * Since PMC(4) may not be loaded in the current kernel, the
+ * convention followed is that a non-NULL value of 'pmc_hook' implies
+ * the presence of this kernel module.
+ *
+ * This requires us to protect 'pmc_hook' with a
+ * shared (sx) lock -- thus making the process of calling into PMC(4)
+ * somewhat more expensive than a simple 'if' check and indirect call.
+ */
+
+
+SX_SYSINIT(pmc, &pmc_sx, "pmc shared lock");
+
+/*
+ * pmc_cpu_is_disabled
+ *
+ * return TRUE if the cpu specified has been disabled.
+ */
+
+int
+pmc_cpu_is_disabled(int cpu)
+{
+#ifdef SMP
+ return ((hlt_cpus_mask & (1 << cpu)) != 0);
+#else
+ return 0;
+#endif
+}
+
+int
+pmc_cpu_is_logical(int cpu)
+{
+#ifdef SMP
+ return ((logical_cpus_mask & (1 << cpu)) != 0);
+#else
+ return 0;
+#endif
+}
+
+#endif /* HWPMC_HOOKS */
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 552ab56..e56d156 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -53,6 +53,10 @@ __FBSDID("$FreeBSD$");
#include <sys/turnstile.h>
#include <machine/smp.h>
+#ifdef HWPMC_HOOKS
+#include <sys/pmckern.h>
+#endif
+
/*
* INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
* the range 100-256 Hz (approximately).
@@ -959,8 +963,18 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
newtd = choosethread();
}
- if (td != newtd)
+ if (td != newtd) {
+#ifdef HWPMC_HOOKS
+ if (PMC_PROC_IS_USING_PMCS(td->td_proc))
+ PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
+#endif
cpu_switch(td, newtd);
+#ifdef HWPMC_HOOKS
+ if (PMC_PROC_IS_USING_PMCS(td->td_proc))
+ PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
+#endif
+ }
+
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
}
@@ -1284,6 +1298,13 @@ sched_unbind(struct thread* td)
}
int
+sched_is_bound(struct thread *td)
+{
+ mtx_assert(&sched_lock, MA_OWNED);
+ return (td->td_kse->ke_flags & KEF_BOUND);
+}
+
+int
sched_load(void)
{
return (sched_tdcnt);
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 850f07e..19a40ae 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -53,6 +53,10 @@ __FBSDID("$FreeBSD$");
#include <sys/ktrace.h>
#endif
+#ifdef HWPMC_HOOKS
+#include <sys/pmckern.h>
+#endif
+
#include <machine/cpu.h>
#include <machine/smp.h>
@@ -1391,8 +1395,18 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
kseq_load_add(KSEQ_SELF(), newtd->td_kse);
} else
newtd = choosethread();
- if (td != newtd)
+ if (td != newtd) {
+#ifdef HWPMC_HOOKS
+ if (PMC_PROC_IS_USING_PMCS(td->td_proc))
+ PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
+#endif
cpu_switch(td, newtd);
+#ifdef HWPMC_HOOKS
+ if (PMC_PROC_IS_USING_PMCS(td->td_proc))
+ PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
+#endif
+ }
+
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
@@ -1952,6 +1966,13 @@ sched_unbind(struct thread *td)
}
int
+sched_is_bound(struct thread *td)
+{
+ mtx_assert(&sched_lock, MA_OWNED);
+ return (td->td_kse->ke_flags & KEF_BOUND);
+}
+
+int
sched_load(void)
{
#ifdef SMP
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index b99c6aa..7bbc1a4 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -91,6 +91,7 @@ SUBDIR= ${_3dfx} \
hifn \
hme \
${_hptmv} \
+ hwpmc \
${_i2c} \
${_ibcs2} \
${_ichwd} \
diff --git a/sys/modules/hwpmc/Makefile b/sys/modules/hwpmc/Makefile
new file mode 100644
index 0000000..b345dee
--- /dev/null
+++ b/sys/modules/hwpmc/Makefile
@@ -0,0 +1,21 @@
+#
+# $FreeBSD$
+#
+
+.PATH: ${.CURDIR}/../../hwpmc
+
+KMOD= hwpmc
+
+SRCS= hwpmc_mod.c
+
+WARNS?= 2
+
+.if ${MACHINE_ARCH} == "i386"
+SRCS+= hwpmc_amd.c hwpmc_intel.c hwpmc_piv.c hwpmc_ppro.c hwpmc_pentium.c
+.endif
+
+.if ${MACHINE_ARCH} == "amd64"
+SRCS+= hwpmc_amd.c
+.endif
+
+.include <bsd.kmod.mk>
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 0193499..d36a20b 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -57,7 +57,7 @@
* is created, otherwise 1.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 600023 /* Master, propagated to newvers */
+#define __FreeBSD_version 600024 /* Master, propagated to newvers */
#ifndef LOCORE
#include <sys/types.h>
diff --git a/sys/sys/pmc.h b/sys/sys/pmc.h
new file mode 100644
index 0000000..ceabca4
--- /dev/null
+++ b/sys/sys/pmc.h
@@ -0,0 +1,1418 @@
+/*-
+ * Copyright (c) 2003, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_PMC_H_
+#define _SYS_PMC_H_
+
+#define PMC_MODULE_NAME "hwpmc"
+#define PMC_NAME_MAX 16 /* HW counter name size */
+#define PMC_CLASS_MAX 4 /* #classes of PMCs in a CPU */
+
+/* Kernel<->userland API version number [MMmmpppp] */
+
+#define PMC_VERSION_MAJOR 0x01
+#define PMC_VERSION_MINOR 0x01
+#define PMC_VERSION_PATCH 0x0001
+
+#define PMC_VERSION (PMC_VERSION_MAJOR << 24 | \
+ PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH)
+
+/*
+ * Kinds of CPUs known
+ */
+
+#define __PMC_CPUS() \
+ __PMC_CPU(AMD_K7, "AMD K7") \
+ __PMC_CPU(AMD_K8, "AMD K8") \
+ __PMC_CPU(INTEL_P5, "Intel Pentium") \
+ __PMC_CPU(INTEL_P6, "Intel Pentium Pro") \
+ __PMC_CPU(INTEL_CL, "Intel Celeron") \
+ __PMC_CPU(INTEL_PII, "Intel Pentium II") \
+ __PMC_CPU(INTEL_PIII, "Intel Pentium III") \
+ __PMC_CPU(INTEL_PM, "Intel Pentium M") \
+ __PMC_CPU(INTEL_PIV, "Intel Pentium IV")
+
+enum pmc_cputype {
+#undef __PMC_CPU
+#define __PMC_CPU(S,D) PMC_CPU_##S ,
+ __PMC_CPUS()
+};
+
+#define PMC_CPU_FIRST PMC_CPU_AMD_K7
+#define PMC_CPU_LAST PMC_CPU_INTEL_PIV
+
+/*
+ * Classes of PMCs
+ */
+
+#define __PMC_CLASSES() \
+ __PMC_CLASS(TSC) /* CPU Timestamp counter */ \
+ __PMC_CLASS(K7) /* AMD K7 performance counters */ \
+ __PMC_CLASS(K8) /* AMD K8 performance counters */ \
+ __PMC_CLASS(P5) /* Intel Pentium counters */ \
+ __PMC_CLASS(P6) /* Intel Pentium Pro counters */ \
+ __PMC_CLASS(P4) /* Intel Pentium-IV counters */
+
+enum pmc_class {
+#undef __PMC_CLASS
+#define __PMC_CLASS(N) PMC_CLASS_##N ,
+ __PMC_CLASSES()
+};
+
+#define PMC_CLASS_FIRST PMC_CLASS_TSC
+#define PMC_CLASS_LAST PMC_CLASS_P4
+
+/*
+ * A PMC can be in the following states:
+ *
+ * Hardware states:
+ * DISABLED -- administratively prohibited from being used.
+ * FREE -- HW available for use
+ * Software states:
+ * ALLOCATED -- allocated
+ * STOPPED -- allocated, but not counting events
+ * RUNNING -- allocated, and in operation; 'pm_runcount'
+ * holds the number of CPUs using this PMC at
+ * a given instant
+ * DELETED -- being destroyed
+ */
+
+#define __PMC_HWSTATES() \
+ __PMC_STATE(DISABLED) \
+ __PMC_STATE(FREE)
+
+#define __PMC_SWSTATES() \
+ __PMC_STATE(ALLOCATED) \
+ __PMC_STATE(STOPPED) \
+ __PMC_STATE(RUNNING) \
+ __PMC_STATE(DELETED)
+
+#define __PMC_STATES() \
+ __PMC_HWSTATES() \
+ __PMC_SWSTATES()
+
+enum pmc_state {
+#undef __PMC_STATE
+#define __PMC_STATE(S) PMC_STATE_##S,
+ __PMC_STATES()
+ __PMC_STATE(MAX)
+};
+
+#define PMC_STATE_FIRST PMC_STATE_DISABLED
+#define PMC_STATE_LAST PMC_STATE_DELETED
+
+/*
+ * An allocated PMC may used as a 'global' counter or as a
+ * 'thread-private' one. Each such mode of use can be in either
+ * statistical sampling mode or in counting mode. Thus a PMC in use
+ *
+ * SS i.e., SYSTEM STATISTICAL -- system-wide statistical profiling
+ * SC i.e., SYSTEM COUNTER -- system-wide counting mode
+ * TS i.e., THREAD STATISTICAL -- thread virtual, statistical profiling
+ * TC i.e., THREAD COUNTER -- thread virtual, counting mode
+ *
+ * Statistical profiling modes rely on the PMC periodically delivering
+ * a interrupt to the CPU (when the configured number of events have
+ * been measured), so the PMC must have the ability to generate
+ * interrupts.
+ *
+ * In counting modes, the PMC counts its configured events, with the
+ * value of the PMC being read whenever needed by its owner process.
+ *
+ * The thread specific modes "virtualize" the PMCs -- the PMCs appear
+ * to be thread private and count events only when the profiled thread
+ * actually executes on the CPU.
+ *
+ * The system-wide "global" modes keep the PMCs running all the time
+ * and are used to measure the behaviour of the whole system.
+ */
+
+#define __PMC_MODES() \
+ __PMC_MODE(SS, 0) \
+ __PMC_MODE(SC, 1) \
+ __PMC_MODE(TS, 2) \
+ __PMC_MODE(TC, 3)
+
+enum pmc_mode {
+#undef __PMC_MODE
+#define __PMC_MODE(M,N) PMC_MODE_##M = N,
+ __PMC_MODES()
+};
+
+#define PMC_MODE_FIRST PMC_MODE_SS
+#define PMC_MODE_LAST PMC_MODE_TC
+
+#define PMC_IS_COUNTING_MODE(mode) \
+ ((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC)
+#define PMC_IS_SYSTEM_MODE(mode) \
+ ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)
+#define PMC_IS_SAMPLING_MODE(mode) \
+ ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS)
+#define PMC_IS_VIRTUAL_MODE(mode) \
+ ((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC)
+
+/*
+ * PMC row disposition
+ */
+
+#define __PMC_DISPOSITIONS(N) \
+ __PMC_DISP(STANDALONE) /* global/disabled counters */ \
+ __PMC_DISP(FREE) /* free/available */ \
+ __PMC_DISP(THREAD) /* thread-virtual PMCs */ \
+ __PMC_DISP(UNKNOWN) /* sentinel */
+
+enum pmc_disp {
+#undef __PMC_DISP
+#define __PMC_DISP(D) PMC_DISP_##D ,
+ __PMC_DISPOSITIONS()
+};
+
+#define PMC_DISP_FIRST PMC_DISP_STANDALONE
+#define PMC_DISP_LAST PMC_DISP_THREAD
+
+/*
+ * PMC event codes
+ *
+ * __PMC_EV(CLASS, SYMBOLIC-NAME, VALUE, READABLE-NAME)
+ */
+
+/*
+ * AMD K7 Events, from "The AMD Athlon(tm) Processor x86 Code
+ * Optimization Guide" [Doc#22007K, Feb 2002]
+ */
+
+#define __PMC_EV_K7() \
+__PMC_EV(K7, DC_ACCESSES, k7-dc-accesses) \
+__PMC_EV(K7, DC_MISSES, k7-dc-misses) \
+__PMC_EV(K7, DC_REFILLS_FROM_L2, k7-dc-refills-from-l2) \
+__PMC_EV(K7, DC_REFILLS_FROM_SYSTEM, k7-dc-refills-from-system) \
+__PMC_EV(K7, DC_WRITEBACKS, k7-dc-writebacks) \
+__PMC_EV(K7, L1_DTLB_MISS_AND_L2_DTLB_HITS, \
+ k7-l1-dtlb-miss-and-l2-dtlb-hits) \
+__PMC_EV(K7, L1_AND_L2_DTLB_MISSES, k7-l1-and-l2-dtlb-misses) \
+__PMC_EV(K7, MISALIGNED_REFERENCES, k7-misaligned-references) \
+__PMC_EV(K7, IC_FETCHES, k7-ic-fetches) \
+__PMC_EV(K7, IC_MISSES, k7-ic-misses) \
+__PMC_EV(K7, L1_ITLB_MISSES, k7-l1-itlb-misses) \
+__PMC_EV(K7, L1_L2_ITLB_MISSES, k7-l1-l2-itlb-misses) \
+__PMC_EV(K7, RETIRED_INSTRUCTIONS, k7-retired-instructions) \
+__PMC_EV(K7, RETIRED_OPS, k7-retired-ops) \
+__PMC_EV(K7, RETIRED_BRANCHES, k7-retired-branches) \
+__PMC_EV(K7, RETIRED_BRANCHES_MISPREDICTED, \
+ k7-retired-branches-mispredicted) \
+__PMC_EV(K7, RETIRED_TAKEN_BRANCHES, k7-retired-taken-branches) \
+__PMC_EV(K7, RETIRED_TAKEN_BRANCHES_MISPREDICTED, \
+ k7-retired-taken-branches-mispredicted) \
+__PMC_EV(K7, RETIRED_FAR_CONTROL_TRANSFERS, \
+ k7-retired-far-control-transfers) \
+__PMC_EV(K7, RETIRED_RESYNC_BRANCHES, k7-retired-resync-branches) \
+__PMC_EV(K7, INTERRUPTS_MASKED_CYCLES, k7-interrupts-masked-cycles) \
+__PMC_EV(K7, INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, \
+ k7-interrupts-masked-while-pending-cycles) \
+__PMC_EV(K7, HARDWARE_INTERRUPTS, k7-hardware-interrupts)
+
+#define PMC_EV_K7_FIRST PMC_EV_K7_DC_ACCESSES
+#define PMC_EV_K7_LAST PMC_EV_K7_HARDWARE_INTERRUPTS
+
+/*
+ * Intel P4 Events, from "IA-32 Intel(r) Architecture Software
+ * Developer's Manual, Volume 3: System Programming Guide" [245472-012]
+ */
+
+#define __PMC_EV_P4() \
+__PMC_EV(P4, TC_DELIVER_MODE, p4-tc-deliver-mode) \
+__PMC_EV(P4, BPU_FETCH_REQUEST, p4-bpu-fetch-request) \
+__PMC_EV(P4, ITLB_REFERENCE, p4-itlb-reference) \
+__PMC_EV(P4, MEMORY_CANCEL, p4-memory-cancel) \
+__PMC_EV(P4, MEMORY_COMPLETE, p4-memory-complete) \
+__PMC_EV(P4, LOAD_PORT_REPLAY, p4-load-port-replay) \
+__PMC_EV(P4, STORE_PORT_REPLAY, p4-store-port-replay) \
+__PMC_EV(P4, MOB_LOAD_REPLAY, p4-mob-load-replay) \
+__PMC_EV(P4, PAGE_WALK_TYPE, p4-page-walk-type) \
+__PMC_EV(P4, BSQ_CACHE_REFERENCE, p4-bsq-cache-reference) \
+__PMC_EV(P4, IOQ_ALLOCATION, p4-ioq-allocation) \
+__PMC_EV(P4, IOQ_ACTIVE_ENTRIES, p4-ioq-active-entries) \
+__PMC_EV(P4, FSB_DATA_ACTIVITY, p4-fsb-data-activity) \
+__PMC_EV(P4, BSQ_ALLOCATION, p4-bsq-allocation) \
+__PMC_EV(P4, BSQ_ACTIVE_ENTRIES, p4-bsq-active-entries) \
+__PMC_EV(P4, SSE_INPUT_ASSIST, p4-sse-input-assist) \
+__PMC_EV(P4, PACKED_SP_UOP, p4-packed-sp-uop) \
+__PMC_EV(P4, PACKED_DP_UOP, p4-packed-dp-uop) \
+__PMC_EV(P4, SCALAR_SP_UOP, p4-scalar-sp-uop) \
+__PMC_EV(P4, SCALAR_DP_UOP, p4-scalar-dp-uop) \
+__PMC_EV(P4, 64BIT_MMX_UOP, p4-64bit-mmx-uop) \
+__PMC_EV(P4, 128BIT_MMX_UOP, p4-128bit-mmx-uop) \
+__PMC_EV(P4, X87_FP_UOP, p4-x87-fp-uop) \
+__PMC_EV(P4, X87_SIMD_MOVES_UOP, p4-x87-simd-moves-uop) \
+__PMC_EV(P4, GLOBAL_POWER_EVENTS, p4-global-power-events) \
+__PMC_EV(P4, TC_MS_XFER, p4-tc-ms-xfer) \
+__PMC_EV(P4, UOP_QUEUE_WRITES, p4-uop-queue-writes) \
+__PMC_EV(P4, RETIRED_MISPRED_BRANCH_TYPE, \
+ p4-retired-mispred-branch-type) \
+__PMC_EV(P4, RETIRED_BRANCH_TYPE, p4-retired-branch-type) \
+__PMC_EV(P4, RESOURCE_STALL, p4-resource-stall) \
+__PMC_EV(P4, WC_BUFFER, p4-wc-buffer) \
+__PMC_EV(P4, B2B_CYCLES, p4-b2b-cycles) \
+__PMC_EV(P4, BNR, p4-bnr) \
+__PMC_EV(P4, SNOOP, p4-snoop) \
+__PMC_EV(P4, RESPONSE, p4-response) \
+__PMC_EV(P4, FRONT_END_EVENT, p4-front-end-event) \
+__PMC_EV(P4, EXECUTION_EVENT, p4-execution-event) \
+__PMC_EV(P4, REPLAY_EVENT, p4-replay-event) \
+__PMC_EV(P4, INSTR_RETIRED, p4-instr-retired) \
+__PMC_EV(P4, UOPS_RETIRED, p4-uops-retired) \
+__PMC_EV(P4, UOP_TYPE, p4-uop-type) \
+__PMC_EV(P4, BRANCH_RETIRED, p4-branch-retired) \
+__PMC_EV(P4, MISPRED_BRANCH_RETIRED, p4-mispred-branch-retired) \
+__PMC_EV(P4, X87_ASSIST, p4-x87-assist) \
+__PMC_EV(P4, MACHINE_CLEAR, p4-machine-clear)
+
+#define PMC_EV_P4_FIRST PMC_EV_P4_TC_DELIVER_MODE
+#define PMC_EV_P4_LAST PMC_EV_P4_MACHINE_CLEAR
+
+/* Intel Pentium Pro, P-II, P-III and Pentium-M style events */
+
+#define __PMC_EV_P6() \
+__PMC_EV(P6, DATA_MEM_REFS, p6-data-mem-refs) \
+__PMC_EV(P6, DCU_LINES_IN, p6-dcu-lines-in) \
+__PMC_EV(P6, DCU_M_LINES_IN, p6-dcu-m-lines-in) \
+__PMC_EV(P6, DCU_M_LINES_OUT, p6-dcu-m-lines-out) \
+__PMC_EV(P6, DCU_MISS_OUTSTANDING, p6-dcu-miss-outstanding) \
+__PMC_EV(P6, IFU_FETCH, p6-ifu-fetch) \
+__PMC_EV(P6, IFU_FETCH_MISS, p6-ifu-fetch-miss) \
+__PMC_EV(P6, ITLB_MISS, p6-itlb-miss) \
+__PMC_EV(P6, IFU_MEM_STALL, p6-ifu-mem-stall) \
+__PMC_EV(P6, ILD_STALL, p6-ild-stall) \
+__PMC_EV(P6, L2_IFETCH, p6-l2-ifetch) \
+__PMC_EV(P6, L2_LD, p6-l2-ld) \
+__PMC_EV(P6, L2_ST, p6-l2-st) \
+__PMC_EV(P6, L2_LINES_IN, p6-l2-lines-in) \
+__PMC_EV(P6, L2_LINES_OUT, p6-l2-lines-out) \
+__PMC_EV(P6, L2_M_LINES_INM, p6-l2-m-lines-inm) \
+__PMC_EV(P6, L2_M_LINES_OUTM, p6-l2-m-lines-outm) \
+__PMC_EV(P6, L2_RQSTS, p6-l2-rqsts) \
+__PMC_EV(P6, L2_ADS, p6-l2-ads) \
+__PMC_EV(P6, L2_DBUS_BUSY, p6-l2-dbus-busy) \
+__PMC_EV(P6, L2_DBUS_BUSY_RD, p6-l2-dbus-busy-rd) \
+__PMC_EV(P6, BUS_DRDY_CLOCKS, p6-bus-drdy-clocks) \
+__PMC_EV(P6, BUS_LOCK_CLOCKS, p6-bus-lock-clocks) \
+__PMC_EV(P6, BUS_REQ_OUTSTANDING, p6-bus-req-outstanding) \
+__PMC_EV(P6, BUS_TRAN_BRD, p6-bus-tran-brd) \
+__PMC_EV(P6, BUS_TRAN_RFO, p6-bus-tran-rfo) \
+__PMC_EV(P6, BUS_TRANS_WB, p6-bus-trans-wb) \
+__PMC_EV(P6, BUS_TRAN_IFETCH, p6-bus-tran-ifetch) \
+__PMC_EV(P6, BUS_TRAN_INVAL, p6-bus-tran-inval) \
+__PMC_EV(P6, BUS_TRAN_PWR, p6-bus-tran-pwr) \
+__PMC_EV(P6, BUS_TRANS_P, p6-bus-trans-p) \
+__PMC_EV(P6, BUS_TRANS_IO, p6-bus-trans-io) \
+__PMC_EV(P6, BUS_TRAN_DEF, p6-bus-tran-def) \
+__PMC_EV(P6, BUS_TRAN_BURST, p6-bus-tran-burst) \
+__PMC_EV(P6, BUS_TRAN_ANY, p6-bus-tran-any) \
+__PMC_EV(P6, BUS_TRAN_MEM, p6-bus-tran-mem) \
+__PMC_EV(P6, BUS_DATA_RCV, p6-bus-data-rcv) \
+__PMC_EV(P6, BUS_BNR_DRV, p6-bus-bnr-drv) \
+__PMC_EV(P6, BUS_HIT_DRV, p6-bus-hit-drv) \
+__PMC_EV(P6, BUS_HITM_DRV, p6-bus-hitm-drv) \
+__PMC_EV(P6, BUS_SNOOP_STALL, p6-bus-snoop-stall) \
+__PMC_EV(P6, FLOPS, p6-flops) \
+__PMC_EV(P6, FP_COMPS_OPS_EXE, p6-fp-comps-ops-exe) \
+__PMC_EV(P6, FP_ASSIST, p6-fp-assist) \
+__PMC_EV(P6, MUL, p6-mul) \
+__PMC_EV(P6, DIV, p6-div) \
+__PMC_EV(P6, CYCLES_DIV_BUSY, p6-cycles-div-busy) \
+__PMC_EV(P6, LD_BLOCKS, p6-ld-blocks) \
+__PMC_EV(P6, SB_DRAINS, p6-sb-drains) \
+__PMC_EV(P6, MISALIGN_MEM_REF, p6-misalign-mem-ref) \
+__PMC_EV(P6, EMON_KNI_PREF_DISPATCHED, p6-emon-kni-pref-dispatched) \
+__PMC_EV(P6, EMON_KNI_PREF_MISS, p6-emon-kni-pref-miss) \
+__PMC_EV(P6, INST_RETIRED, p6-inst-retired) \
+__PMC_EV(P6, UOPS_RETIRED, p6-uops-retired) \
+__PMC_EV(P6, INST_DECODED, p6-inst-decoded) \
+__PMC_EV(P6, EMON_KNI_INST_RETIRED, p6-emon-kni-inst-retired) \
+__PMC_EV(P6, EMON_KNI_COMP_INST_RET, p6-emon-kni-comp-inst-ret) \
+__PMC_EV(P6, HW_INT_RX, p6-hw-int-rx) \
+__PMC_EV(P6, CYCLES_INT_MASKED, p6-cycles-int-masked) \
+__PMC_EV(P6, CYCLES_INT_PENDING_AND_MASKED, \
+ p6-cycles-in-pending-and-masked) \
+__PMC_EV(P6, BR_INST_RETIRED, p6-br-inst-retired) \
+__PMC_EV(P6, BR_MISS_PRED_RETIRED, p6-br-miss-pred-retired) \
+__PMC_EV(P6, BR_TAKEN_RETIRED, p6-br-taken-retired) \
+__PMC_EV(P6, BR_MISS_PRED_TAKEN_RET, p6-br-miss-pred-taken-ret) \
+__PMC_EV(P6, BR_INST_DECODED, p6-br-inst-decoded) \
+__PMC_EV(P6, BTB_MISSES, p6-btb-misses) \
+__PMC_EV(P6, BR_BOGUS, p6-br-bogus) \
+__PMC_EV(P6, BACLEARS, p6-baclears) \
+__PMC_EV(P6, RESOURCE_STALLS, p6-resource-stalls) \
+__PMC_EV(P6, PARTIAL_RAT_STALLS, p6-partial-rat-stalls) \
+__PMC_EV(P6, SEGMENT_REG_LOADS, p6-segment-reg-loads) \
+__PMC_EV(P6, CPU_CLK_UNHALTED, p6-cpu-clk-unhalted) \
+__PMC_EV(P6, MMX_INSTR_EXEC, p6-mmx-instr-exec) \
+__PMC_EV(P6, MMX_SAT_INSTR_EXEC, p6-mmx-sat-instr-exec) \
+__PMC_EV(P6, MMX_UOPS_EXEC, p6-mmx-uops-exec) \
+__PMC_EV(P6, MMX_INSTR_TYPE_EXEC, p6-mmx-instr-type-exec) \
+__PMC_EV(P6, FP_MMX_TRANS, p6-fp-mmx-trans) \
+__PMC_EV(P6, MMX_ASSIST, p6-mmx-assist) \
+__PMC_EV(P6, MMX_INSTR_RET, p6-mmx-instr-ret) \
+__PMC_EV(P6, SEG_RENAME_STALLS, p6-seg-rename-stalls) \
+__PMC_EV(P6, SEG_REG_RENAMES, p6-seg-reg-renames) \
+__PMC_EV(P6, RET_SEG_RENAMES, p6-ret-seg-renames) \
+__PMC_EV(P6, EMON_EST_TRANS, p6-emon-est-trans) \
+__PMC_EV(P6, EMON_THERMAL_TRIP, p6-emon-thermal-trip) \
+__PMC_EV(P6, BR_INST_EXEC, p6-br-inst-exec) \
+__PMC_EV(P6, BR_MISSP_EXEC, p6-br-missp-exec) \
+__PMC_EV(P6, BR_BAC_MISSP_EXEC, p6-br-bac-missp-exec) \
+__PMC_EV(P6, BR_CND_EXEC, p6-br-cnd-exec) \
+__PMC_EV(P6, BR_CND_MISSP_EXEC, p6-br-cnd-missp-exec) \
+__PMC_EV(P6, BR_IND_EXEC, p6-br-ind-exec) \
+__PMC_EV(P6, BR_IND_MISSP_EXEC, p6-br-ind-missp-exec) \
+__PMC_EV(P6, BR_RET_EXEC, p6-br-ret-exec) \
+__PMC_EV(P6, BR_RET_MISSP_EXEC, p6-br-ret-missp-exec) \
+__PMC_EV(P6, BR_RET_BAC_MISSP_EXEC, p6-br-ret-bac-missp-exec) \
+__PMC_EV(P6, BR_CALL_EXEC, p6-br-call-exec) \
+__PMC_EV(P6, BR_CALL_MISSP_EXEC, p6-br-call-missp-exec) \
+__PMC_EV(P6, BR_IND_CALL_EXEC, p6-br-ind-call-exec) \
+__PMC_EV(P6, EMON_SIMD_INSTR_RETIRED, p6-emon-simd-instr-retired) \
+__PMC_EV(P6, EMON_SYNCH_UOPS, p6-emon-synch-uops) \
+__PMC_EV(P6, EMON_ESP_UOPS, p6-emon-esp-uops) \
+__PMC_EV(P6, EMON_FUSED_UOPS_RET, p6-emon-fused-uops-ret) \
+__PMC_EV(P6, EMON_UNFUSION, p6-emon-unfusion) \
+__PMC_EV(P6, EMON_PREF_RQSTS_UP, p6-emon-pref-rqsts-up) \
+__PMC_EV(P6, EMON_PREF_RQSTS_DN, p6-emon-pref-rqsts-dn) \
+__PMC_EV(P6, EMON_SSE_SSE2_INST_RETIRED, \
+ p6-emon-sse-sse2-inst-retired) \
+__PMC_EV(P6, EMON_SSE_SSE2_COMP_INST_RETIRED, \
+ p6-emon-sse-sse2-comp-inst-retired)
+
+
+#define PMC_EV_P6_FIRST PMC_EV_P6_DATA_MEM_REFS
+#define PMC_EV_P6_LAST PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED
+
+/* AMD K8 PMCs */
+
+#define __PMC_EV_K8() \
+__PMC_EV(K8, FP_DISPATCHED_FPU_OPS, k8-fp-dispatched-fpu-ops) \
+__PMC_EV(K8, FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, \
+ k8-fp-cycles-with-no-fpu-ops-retired) \
+__PMC_EV(K8, FP_DISPATCHED_FPU_FAST_FLAG_OPS, \
+ k8-fp-dispatched-fpu-fast-flag-ops) \
+__PMC_EV(K8, LS_SEGMENT_REGISTER_LOAD, k8-ls-segment-register-load) \
+__PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE, \
+ k8-ls-microarchitectural-resync-by-self-modifying-code) \
+__PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, \
+ k8-ls-microarchitectural-resync-by-snoop) \
+__PMC_EV(K8, LS_BUFFER2_FULL, k8-ls-buffer2-full) \
+__PMC_EV(K8, LS_LOCKED_OPERATION, k8-ls-locked-operation) \
+__PMC_EV(K8, LS_MICROARCHITECTURAL_LATE_CANCEL, \
+ k8-ls-microarchitectural-late-cancel) \
+__PMC_EV(K8, LS_RETIRED_CFLUSH_INSTRUCTIONS, \
+ k8-ls-retired-cflush-instructions) \
+__PMC_EV(K8, LS_RETIRED_CPUID_INSTRUCTIONS, \
+ k8-ls-retired-cpuid-instructions) \
+__PMC_EV(K8, DC_ACCESS, k8-dc-access) \
+__PMC_EV(K8, DC_MISS, k8-dc-miss) \
+__PMC_EV(K8, DC_REFILL_FROM_L2, k8-dc-refill-from-l2) \
+__PMC_EV(K8, DC_REFILL_FROM_SYSTEM, k8-dc-refill-from-system) \
+__PMC_EV(K8, DC_COPYBACK, k8-dc-copyback) \
+__PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, \
+ k8-dc-l1-dtlb-miss-and-l2-dtlb-hit) \
+__PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, \
+ k8-dc-l1-dtlb-miss-and-l2-dtlb-miss) \
+__PMC_EV(K8, DC_MISALIGNED_DATA_REFERENCE, \
+ k8-dc-misaligned-data-reference) \
+__PMC_EV(K8, DC_MICROARCHITECTURAL_LATE_CANCEL, \
+ k8-dc-microarchitectural-late-cancel-of-an-access) \
+__PMC_EV(K8, DC_MICROARCHITECTURAL_EARLY_CANCEL, \
+ k8-dc-microarchitectural-early-cancel-of-an-access) \
+__PMC_EV(K8, DC_ONE_BIT_ECC_ERROR, k8-dc-one-bit-ecc-error) \
+__PMC_EV(K8, DC_DISPATCHED_PREFETCH_INSTRUCTIONS, \
+ k8-dc-dispatched-prefetch-instructions) \
+__PMC_EV(K8, DC_DCACHE_ACCESSES_BY_LOCKS, \
+ k8-dc-dcache-accesses-by-locks) \
+__PMC_EV(K8, BU_CPU_CLK_UNHALTED, k8-bu-cpu-clk-unhalted) \
+__PMC_EV(K8, BU_INTERNAL_L2_REQUEST, k8-bu-internal-l2-request) \
+__PMC_EV(K8, BU_FILL_REQUEST_L2_MISS, k8-bu-fill-request-l2-miss) \
+__PMC_EV(K8, BU_FILL_INTO_L2, k8-bu-fill-into-l2) \
+__PMC_EV(K8, IC_FETCH, k8-ic-fetch) \
+__PMC_EV(K8, IC_MISS, k8-ic-miss) \
+__PMC_EV(K8, IC_REFILL_FROM_L2, k8-ic-refill-from-l2) \
+__PMC_EV(K8, IC_REFILL_FROM_SYSTEM, k8-ic-refill-from-system) \
+__PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, \
+ k8-ic-l1-itlb-miss-and-l2-itlb-hit) \
+__PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, \
+ k8-ic-l1-itlb-miss-and-l2-itlb-miss) \
+__PMC_EV(K8, IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, \
+ k8-ic-microarchitectural-resync-by-snoop) \
+__PMC_EV(K8, IC_INSTRUCTION_FETCH_STALL, \
+ k8-ic-instruction-fetch-stall) \
+__PMC_EV(K8, IC_RETURN_STACK_HIT, k8-ic-return-stack-hit) \
+__PMC_EV(K8, IC_RETURN_STACK_OVERFLOW, k8-ic-return-stack-overflow) \
+__PMC_EV(K8, FR_RETIRED_X86_INSTRUCTIONS, \
+ k8-fr-retired-x86-instructions) \
+__PMC_EV(K8, FR_RETIRED_UOPS, k8-fr-retired-uops) \
+__PMC_EV(K8, FR_RETIRED_BRANCHES, k8-fr-retired-branches) \
+__PMC_EV(K8, FR_RETIRED_BRANCHES_MISPREDICTED, \
+ k8-fr-retired-branches-mispredicted) \
+__PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES, \
+ k8-fr-retired-taken-branches) \
+__PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, \
+ k8-fr-retired-taken-branches-mispredicted) \
+__PMC_EV(K8, FR_RETIRED_FAR_CONTROL_TRANSFERS, \
+ k8-fr-retired-far-control-transfers) \
+__PMC_EV(K8, FR_RETIRED_RESYNCS, k8-fr-retired-resyncs) \
+__PMC_EV(K8, FR_RETIRED_NEAR_RETURNS, k8-fr-retired-near-returns) \
+__PMC_EV(K8, FR_RETIRED_NEAR_RETURNS_MISPREDICTED, \
+ k8-fr-retired-near-returns-mispredicted) \
+__PMC_EV(K8, \
+ FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE, \
+ k8-fr-retired-taken-branches-mispredicted-by-addr-miscompare) \
+__PMC_EV(K8, FR_RETIRED_FPU_INSTRUCTIONS, \
+ k8-fr-retired-fpu-instructions) \
+__PMC_EV(K8, FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS, \
+ k8-fr-retired-fastpath-double-op-instructions) \
+__PMC_EV(K8, FR_INTERRUPTS_MASKED_CYCLES, \
+ k8-fr-interrupts-masked-cycles) \
+__PMC_EV(K8, FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, \
+ k8-fr-interrupts-masked-while-pending-cycles) \
+__PMC_EV(K8, FR_TAKEN_HARDWARE_INTERRUPTS, \
+ k8-fr-taken-hardware-interrupts) \
+__PMC_EV(K8, FR_DECODER_EMPTY, k8-fr-decoder-empty) \
+__PMC_EV(K8, FR_DISPATCH_STALLS, k8-fr-dispatch-stalls) \
+__PMC_EV(K8, FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE, \
+ k8-fr-dispatch-stall-from-branch-abort-to-retire) \
+__PMC_EV(K8, FR_DISPATCH_STALL_FOR_SERIALIZATION, \
+ k8-fr-dispatch-stall-for-serialization) \
+__PMC_EV(K8, FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, \
+ k8-fr-dispatch-stall-for-segment-load) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL, \
+ k8-fr-dispatch-stall-when-reorder-buffer-is-full) \
+__PMC_EV(K8, \
+ FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL, \
+ k8-fr-dispatch-stall-when-reservation-stations-are-full) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, \
+ k8-fr-dispatch-stall-when-fpu-is-full) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_LS_IS_FULL, \
+ k8-fr-dispatch-stall-when-ls-is-full) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET, \
+ k8-fr-dispatch-stall-when-waiting-for-all-to-be-quiet) \
+__PMC_EV(K8, \
+ FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING, \
+ k8-fr-dispatch-stall-when-far-xfer-or-resync-branch-pending) \
+__PMC_EV(K8, FR_FPU_EXCEPTIONS, k8-fr-fpu-exceptions) \
+__PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, \
+ k8-fr-number-of-breakpoints-for-dr0) \
+__PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, \
+ k8-fr-number-of-breakpoints-for-dr1) \
+__PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, \
+ k8-fr-number-of-breakpoints-for-dr2) \
+__PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, \
+ k8-fr-number-of-breakpoints-for-dr3) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, \
+ k8-nb-memory-controller-page-access-event) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, \
+ k8-nb-memory-controller-page-table-overflow) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED, \
+ k8-nb-memory-controller-dram-slots-missed) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_TURNAROUND, \
+ k8-nb-memory-controller-turnaround) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_BYPASS_SATURATION, \
+ k8-nb-memory-controller-bypass-saturation) \
+__PMC_EV(K8, NB_SIZED_COMMANDS, k8-nb-sized-commands) \
+__PMC_EV(K8, NB_PROBE_RESULT, k8-nb-probe-result) \
+__PMC_EV(K8, NB_HT_BUS0_BANDWIDTH, k8-nb-ht-bus0-bandwidth) \
+__PMC_EV(K8, NB_HT_BUS1_BANDWIDTH, k8-nb-ht-bus1-bandwidth) \
+__PMC_EV(K8, NB_HT_BUS2_BANDWIDTH, k8-nb-ht-bus2-bandwidth)
+
+#define PMC_EV_K8_FIRST PMC_EV_K8_FP_DISPATCHED_FPU_OPS
+#define PMC_EV_K8_LAST PMC_EV_K8_NB_HT_BUS2_BANDWIDTH
+
+
+/* Intel Pentium Events */
+#define __PMC_EV_P5() \
+__PMC_EV(P5, DATA_READ, p5-data-read) \
+__PMC_EV(P5, DATA_WRITE, p5-data-write) \
+__PMC_EV(P5, DATA_TLB_MISS, p5-data-tlb-miss) \
+__PMC_EV(P5, DATA_READ_MISS, p5-data-read-miss) \
+__PMC_EV(P5, DATA_WRITE_MISS, p5-data-write-miss) \
+__PMC_EV(P5, WRITE_HIT_TO_M_OR_E_STATE_LINES, \
+ p5-write-hit-to-m-or-e-state-lines) \
+__PMC_EV(P5, DATA_CACHE_LINES_WRITTEN_BACK, \
+ p4-data-cache-lines-written-back) \
+__PMC_EV(P5, EXTERNAL_SNOOPS, p5-external-snoops) \
+__PMC_EV(P5, EXTERNAL_DATA_CACHE_SNOOP_HITS, \
+ p5-external-data-cache-snoop-hits) \
+__PMC_EV(P5, MEMORY_ACCESSES_IN_BOTH_PIPES, \
+ p5-memory-accesses-in-both-pipes) \
+__PMC_EV(P5, BANK_CONFLICTS, p5-bank-conflicts) \
+__PMC_EV(P5, MISALIGNED_DATA_OR_IO_REFERENCES, \
+ p5-misaligned-data-or-io-references) \
+__PMC_EV(P5, CODE_READ, p5-code-read) \
+__PMC_EV(P5, CODE_TLB_MISS, p5-code-tlb-miss) \
+__PMC_EV(P5, CODE_CACHE_MISS, p5-code-cache-miss) \
+__PMC_EV(P5, ANY_SEGMENT_REGISTER_LOADED, \
+ p5-any-segment-register-loaded) \
+__PMC_EV(P5, BRANCHES, p5-branches) \
+__PMC_EV(P5, BTB_HITS, p5-btb-hits) \
+__PMC_EV(P5, TAKEN_BRANCH_OR_BTB_HIT, \
+ p5-taken-branch-or-btb-hit) \
+__PMC_EV(P5, PIPELINE_FLUSHES, p5-pipeline-flushes) \
+__PMC_EV(P5, INSTRUCTIONS_EXECUTED, p5-instructions-executed) \
+__PMC_EV(P5, INSTRUCTIONS_EXECUTED_V_PIPE, \
+ p5-instructions-executed-v-pipe) \
+__PMC_EV(P5, BUS_CYCLE_DURATION, p5-bus-cycle-duration) \
+__PMC_EV(P5, WRITE_BUFFER_FULL_STALL_DURATION, \
+ p5-write-buffer-full-stall-duration) \
+__PMC_EV(P5, WAITING_FOR_DATA_MEMORY_READ_STALL_DURATION, \
+ p5-waiting-for-data-memory-read-stall-duration) \
+__PMC_EV(P5, STALL_ON_WRITE_TO_AN_E_OR_M_STATE_LINE, \
+ p5-stall-on-write-to-an-e-or-m-state-line) \
+__PMC_EV(P5, LOCKED_BUS_CYCLE, p5-locked-bus-cycle) \
+__PMC_EV(P5, IO_READ_OR_WRITE_CYCLE, p5-io-read-or-write-cycle) \
+__PMC_EV(P5, NONCACHEABLE_MEMORY_READS, \
+ p5-noncacheable-memory-reads) \
+__PMC_EV(P5, PIPELINE_AGI_STALLS, p5-pipeline-agi-stalls) \
+__PMC_EV(P5, FLOPS, p5-flops) \
+__PMC_EV(P5, BREAKPOINT_MATCH_ON_DR0_REGISTER, \
+ p5-breakpoint-match-on-dr0-register) \
+__PMC_EV(P5, BREAKPOINT_MATCH_ON_DR1_REGISTER, \
+ p5-breakpoint-match-on-dr1-register) \
+__PMC_EV(P5, BREAKPOINT_MATCH_ON_DR2_REGISTER, \
+ p5-breakpoint-match-on-dr2-register) \
+__PMC_EV(P5, BREAKPOINT_MATCH_ON_DR3_REGISTER, \
+ p5-breakpoint-match-on-dr3-register) \
+__PMC_EV(P5, HARDWARE_INTERRUPTS, p5-hardware-interrupts) \
+__PMC_EV(P5, DATA_READ_OR_WRITE, p5-data-read-or-write) \
+__PMC_EV(P5, DATA_READ_MISS_OR_WRITE_MISS, \
+ p5-data-read-miss-or-write-miss) \
+__PMC_EV(P5, BUS_OWNERSHIP_LATENCY, p5-bus-ownership-latency) \
+__PMC_EV(P5, BUS_OWNERSHIP_TRANSFERS, p5-bus-ownership-transfers) \
+__PMC_EV(P5, MMX_INSTRUCTIONS_EXECUTED_U_PIPE, \
+ p5-mmx-instructions-executed-u-pipe) \
+__PMC_EV(P5, MMX_INSTRUCTIONS_EXECUTED_V_PIPE, \
+ p5-mmx-instructions-executed-v-pipe) \
+__PMC_EV(P5, CACHE_M_LINE_SHARING, p5-cache-m-line-sharing) \
+__PMC_EV(P5, CACHE_LINE_SHARING, p5-cache-line-sharing) \
+__PMC_EV(P5, EMMS_INSTRUCTIONS_EXECUTED, \
+ p5-emms-instructions-executed) \
+__PMC_EV(P5, TRANSITIONS_BETWEEN_MMX_AND_FP_INSTRUCTIONS, \
+ p5-transitions-between-mmx-and-fp-instructions) \
+__PMC_EV(P5, BUS_UTILIZATION_DUE_TO_PROCESSOR_ACTIVITY, \
+ p5-bus-utilization-due-to-processor-activity) \
+__PMC_EV(P5, WRITES_TO_NONCACHEABLE_MEMORY, \
+ p5-writes-to-noncacheable-memory) \
+__PMC_EV(P5, SATURATING_MMX_INSTRUCTIONS_EXECUTED, \
+ p5-saturating-mmx-instructions-executed) \
+__PMC_EV(P5, SATURATIONS_PERFORMED, p5-saturations-performed) \
+__PMC_EV(P5, NUMBER_OF_CYCLES_NOT_IN_HALT_STATE, \
+ p5-number-of-cycles-not-in-halt-state) \
+__PMC_EV(P5, DATA_CACHE_TLB_MISS_STALL_DURATION, \
+ p5-data-cache-tlb-miss-stall-duration) \
+__PMC_EV(P5, MMX_INSTRUCTION_DATA_READS, \
+ p5-mmx-instruction-data-reads) \
+__PMC_EV(P5, MMX_INSTRUCTION_DATA_READ_MISSES, \
+ p5-mmx-instruction-data-read-misses) \
+__PMC_EV(P5, FLOATING_POINT_STALLS_DURATION, \
+ p5-floating-point-stalls-duration) \
+__PMC_EV(P5, TAKEN_BRANCHES, p5-taken-branches) \
+__PMC_EV(P5, D1_STARVATION_AND_FIFO_IS_EMPTY, \
+ p5-d1-starvation-and-fifo-is-empty) \
+__PMC_EV(P5, D1_STARVATION_AND_ONLY_ONE_INSTRUCTION_IN_FIFO, \
+ p5-d1-starvation-and-only-instruction-in-fifo) \
+__PMC_EV(P5, MMX_INSTRUCTION_DATA_WRITES, \
+ p5-mmx-instruction-data-writes) \
+__PMC_EV(P5, MMX_INSTRUCTION_DATA_WRITE_MISSES, \
+ p5-mmx-instruction-data-write-misses) \
+__PMC_EV(P5, PIPELINE_FLUSHES_DUE_TO_WRONG_BRANCH_PREDICTIONS, \
+ p5-pipeline-flushes-due-to-wrong-branch-predictions) \
+__PMC_EV(P5, \
+ PIPELINE_FLUSHES_DUE_TO_WRONG_BRANCH_PREDICTIONS_RESOLVED_IN_WB_STAGE, \
+ p5-pipeline-flushes-due-to-wrong-branch-predictions-resolved-in-wb-stage) \
+__PMC_EV(P5, MISALIGNED_DATA_MEMORY_REFERENCE_ON_MMX_INSTRUCTIONS, \
+ p5-misaligned-data-memory-reference-on-mmx-instructions) \
+__PMC_EV(P5, PIPELINE_STALL_FOR_MMX_INSTRUCTION_DATA_MEMORY_READS, \
+ p5-pipeline-stall-for-mmx-instruction-data-memory-reads) \
+__PMC_EV(P5, MISPREDICTED_OR_UNPREDICTED_RETURNS, \
+ p5-mispredicted-or-unpredicted-returns) \
+__PMC_EV(P5, PREDICTED_RETURNS, p5-predicted-returns) \
+__PMC_EV(P5, MMX_MULTIPLY_UNIT_INTERLOCK, \
+ p5-mmx-multiply-unit-interlock) \
+__PMC_EV(P5, MOVD_MOVQ_STORE_STALL_DUE_TO_PREVIOUS_MMX_OPERATION, \
+ p5-movd-movq-store-stall-due-to-previous-mmx-operation) \
+__PMC_EV(P5, RETURNS, p5-returns) \
+__PMC_EV(P5, BTB_FALSE_ENTRIES, p5-btb-false-entries) \
+__PMC_EV(P5, BTB_MISS_PREDICTION_ON_NOT_TAKEN_BRANCH, \
+ p5-btb-miss-prediction-on-not-taken-branch) \
+__PMC_EV(P5, \
+ FULL_WRITE_BUFFER_STALL_DURATION_WHILE_EXECUTING_MMX_INSTRUCTIONS, \
+ p5-full-write-buffer-stall-duration-while-executing-mmx-instructions) \
+__PMC_EV(P5, STALL_ON_MMX_INSTRUCTION_WRITE_TO_E_OR_M_STATE_LINE, \
+ p5-stall-on-mmx-instruction-write-to-e-o-m-state-line)
+
+#define PMC_EV_P5_FIRST PMC_EV_P5_DATA_READ
+#define PMC_EV_P5_LAST \
+ PMC_EV_P5_STALL_ON_MMX_INSTRUCTION_WRITE_TO_E_OR_M_STATE_LINE
+
+/* timestamp counters. */
+#define __PMC_EV_TSC() \
+ __PMC_EV(TSC, TSC, tsc)
+
+/* All known PMC events */
+#define __PMC_EVENTS() \
+ __PMC_EV_TSC() \
+ __PMC_EV_K7() \
+ __PMC_EV_P6() \
+ __PMC_EV_P4() \
+ __PMC_EV_K8() \
+ __PMC_EV_P5() \
+
+
+
+enum pmc_event {
+#undef __PMC_EV
+#define __PMC_EV(C,N,D) PMC_EV_ ## C ## _ ## N ,
+ __PMC_EVENTS()
+};
+
+#define PMC_EVENT_FIRST PMC_EV_TSC_TSC
+#define PMC_EVENT_LAST PMC_EV_P5_LAST
+
+/*
+ * Counter capabilities
+ *
+ * __PMC_CAPS(NAME, VALUE, DESCRIPTION)
+ */
+
+#define __PMC_CAPS() \
+ __PMC_CAP(INTERRUPT, 0, "generate interrupts") \
+ __PMC_CAP(USER, 1, "count user-mode events") \
+ __PMC_CAP(SYSTEM, 2, "count system-mode events") \
+ __PMC_CAP(EDGE, 3, "do edge detection of events") \
+ __PMC_CAP(THRESHOLD, 4, "ignore events below a threshold") \
+ __PMC_CAP(READ, 5, "read PMC counter") \
+ __PMC_CAP(WRITE, 6, "reprogram PMC counter") \
+ __PMC_CAP(INVERT, 7, "invert comparision sense") \
+ __PMC_CAP(QUALIFIER, 8, "further qualify monitored events") \
+ __PMC_CAP(PRECISE, 9, "perform precise sampling") \
+ __PMC_CAP(TAGGING, 10, "tag upstream events") \
+ __PMC_CAP(CASCADE, 11, "cascade counters")
+
+enum pmc_caps
+{
+#undef __PMC_CAP
+#define __PMC_CAP(NAME, VALUE, DESCR) PMC_CAP_##NAME = (1 << VALUE) ,
+ __PMC_CAPS()
+};
+
+#define PMC_CAP_FIRST PMC_CAP_INTERRUPT
+#define PMC_CAP_LAST PMC_CAP_CASCADE
+
+/*
+ * PMC SYSCALL INTERFACE
+ */
+
+/*
+ * "PMC_OPS" -- these are the commands recognized by the kernel
+ * module, and are used when performing a system call from userland.
+ */
+
+#define __PMC_OPS() \
+ __PMC_OP(CONFIGURELOG, "Set log file") \
+ __PMC_OP(GETCPUINFO, "Get system CPU information") \
+ __PMC_OP(GETDRIVERSTATS, "Get driver statistics") \
+ __PMC_OP(GETMODULEVERSION, "Get module version") \
+ __PMC_OP(GETPMCINFO, "Get per-cpu PMC information") \
+ __PMC_OP(PMCADMIN, "Set PMC state") \
+ __PMC_OP(PMCALLOCATE, "Allocate and configure a PMC") \
+ __PMC_OP(PMCATTACH, "Attach a PMC to a process") \
+ __PMC_OP(PMCDETACH, "Detach a PMC from a process") \
+ __PMC_OP(PMCRELEASE, "Release a PMC") \
+ __PMC_OP(PMCRW, "Read/Set a PMC") \
+ __PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate") \
+ __PMC_OP(PMCSTART, "Start a PMC") \
+ __PMC_OP(PMCSTOP, "Start a PMC") \
+ __PMC_OP(WRITELOG, "Write a log file entry") \
+ __PMC_OP(PMCX86GETMSR, "(x86 architectures) retrieve MSR")
+
+enum pmc_ops {
+#undef __PMC_OP
+#define __PMC_OP(N, D) PMC_OP_##N,
+ __PMC_OPS()
+};
+
+
+/*
+ * Flags used in operations.
+ */
+
+#define PMC_F_FORCE 0x00000001 /*OP ADMIN force operation */
+#define PMC_F_DESCENDANTS 0x00000002 /*OP ALLOCATE track descendants */
+#define PMC_F_LOG_TC_CSW 0x00000004 /*OP CONFIGURELOG ctx switches */
+#define PMC_F_LOG_TC_PROCEXIT 0x00000008 /*OP CONFIGURELOG log proc exits */
+#define PMC_F_NEWVALUE 0x00000010 /*OP RW write new value */
+#define PMC_F_OLDVALUE 0x00000020 /*OP RW get old value */
+
+/*
+ * Cookies used to denote allocated PMCs, and the values of PMCs.
+ */
+
+typedef uint32_t pmc_id_t;
+typedef uint64_t pmc_value_t;
+
+#define PMC_ID_INVALID (~ (pmc_id_t) 0)
+
+/*
+ * Data structures for system calls supported by the pmc driver.
+ */
+
+/*
+ * OP PMCALLOCATE
+ *
+ * Allocate a PMC on the named CPU.
+ */
+
+#define PMC_CPU_ANY ~0
+
+struct pmc_op_pmcallocate {
+ uint32_t pm_caps; /* PMC_CAP_* */
+ uint32_t pm_cpu; /* CPU number or PMC_CPU_ANY */
+ enum pmc_class pm_class; /* class of PMC desired */
+ enum pmc_event pm_ev; /* [enum pmc_event] desired */
+ uint32_t pm_flags; /* additional modifiers PMC_F_* */
+ enum pmc_mode pm_mode; /* desired mode */
+ pmc_id_t pm_pmcid; /* [return] process pmc id */
+
+ /*
+ * Machine dependent extensions
+ */
+
+#if __i386__
+ uint32_t pm_config1;
+ uint32_t pm_config2;
+#define pm_amd_config pm_config1
+#define pm_p4_cccrconfig pm_config1
+#define pm_p4_escrconfig pm_config2
+#define pm_p6_config pm_config1
+
+#elif __amd64__
+ uint32_t pm_k8_config;
+#define pm_amd_config pm_k8_config
+#endif
+};
+
+/*
+ * OP PMCADMIN
+ *
+ * Set the administrative state (i.e., whether enabled or disabled) of
+ * a PMC 'pm_pmc' on CPU 'pm_cpu'. Note that 'pm_pmc' specifies an
+ * absolute PMC number and need not have been first allocated by the
+ * calling process.
+ */
+
+struct pmc_op_pmcadmin {
+ int pm_cpu; /* CPU# */
+ uint32_t pm_flags; /* flags */
+ int pm_pmc; /* PMC# */
+ enum pmc_state pm_state; /* desired state */
+};
+
+/*
+ * OP PMCATTACH / OP PMCDETACH
+ *
+ * Attach/detach a PMC and a process.
+ */
+
+struct pmc_op_pmcattach {
+ pmc_id_t pm_pmc; /* PMC to attach to */
+ pid_t pm_pid; /* target process */
+};
+
+/*
+ * OP PMCSETCOUNT
+ *
+ * Set the sampling rate (i.e., the reload count) for statistical counters.
+ * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE.
+ */
+
+struct pmc_op_pmcsetcount {
+ pmc_value_t pm_count; /* initial/sample count */
+ pmc_id_t pm_pmcid; /* PMC id to set */
+};
+
+
+/*
+ * OP PMCRW
+ *
+ * Read the value of a PMC named by 'pm_pmcid'. 'pm_pmcid' needs
+ * to have been previously allocated using PMCALLOCATE.
+ */
+
+
+struct pmc_op_pmcrw {
+ uint32_t pm_flags; /* PMC_F_{OLD,NEW}VALUE*/
+ pmc_id_t pm_pmcid; /* pmc id */
+ pmc_value_t pm_value; /* new&returned value */
+};
+
+
+/*
+ * OP GETPMCINFO
+ *
+ * retrieve PMC state for a named CPU. The caller is expected to
+ * allocate 'npmc' * 'struct pmc_info' bytes of space for the return
+ * values.
+ */
+
+struct pmc_info {
+ uint32_t pm_caps; /* counter capabilities */
+ enum pmc_class pm_class; /* enum pmc_class */
+ int pm_enabled; /* whether enabled */
+ enum pmc_event pm_event; /* current event */
+ uint32_t pm_flags; /* counter flags */
+ enum pmc_mode pm_mode; /* current mode [enum pmc_mode] */
+ pid_t pm_ownerpid; /* owner, or -1 */
+ pmc_value_t pm_reloadcount; /* sampling counters only */
+ enum pmc_disp pm_rowdisp; /* FREE, THREAD or STANDLONE */
+ uint32_t pm_width; /* width of the PMC */
+ char pm_name[PMC_NAME_MAX]; /* pmc name */
+};
+
+struct pmc_op_getpmcinfo {
+ int32_t pm_cpu; /* 0 <= cpu < mp_maxid */
+ struct pmc_info pm_pmcs[]; /* space for 'npmc' structures */
+};
+
+
+/*
+ * OP GETCPUINFO
+ *
+ * Retrieve system CPU information.
+ */
+
+struct pmc_op_getcpuinfo {
+ enum pmc_cputype pm_cputype; /* what kind of CPU */
+ uint32_t pm_nclass; /* #classes of PMCs */
+ uint32_t pm_ncpu; /* number of CPUs */
+ uint32_t pm_npmc; /* #PMCs per CPU */
+ enum pmc_class pm_classes[PMC_CLASS_MAX];
+};
+
+/*
+ * OP CONFIGURELOG
+ *
+ * Configure a log file for writing system-wide statistics to.
+ */
+
+struct pmc_op_configurelog {
+ int pm_flags;
+ int pm_logfd; /* logfile fd (or -1) */
+};
+
+/*
+ * OP GETDRIVERSTATS
+ *
+ * Retrieve pmc(4) driver-wide statistics.
+ */
+
+struct pmc_op_getdriverstats {
+ int pm_intr_ignored; /* #interrupts ignored */
+ int pm_intr_processed; /* #interrupts processed */
+ int pm_syscalls; /* #syscalls */
+ int pm_syscall_errors; /* #syscalls with errors */
+};
+
+/*
+ * OP RELEASE / OP START / OP STOP
+ *
+ * Simple operations on a PMC id.
+ */
+
+struct pmc_op_simple {
+ pmc_id_t pm_pmcid;
+};
+
+#if __i386__ || __amd64__
+
+/*
+ * OP X86_GETMSR
+ *
+ * Retrieve the model specific register assoicated with the
+ * allocated PMC. This number can be used subsequently with
+ * RDPMC instructions.
+ */
+
+struct pmc_op_x86_getmsr {
+ uint32_t pm_msr; /* MSR for the PMC */
+ pmc_id_t pm_pmcid; /* allocated pmc id */
+};
+#endif
+
+
+#ifdef _KERNEL
+
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+
+#define PMC_REQUEST_POOL_SIZE 128
+#define PMC_HASH_SIZE 16
+#define PMC_PCPU_BUFFER_SIZE 4096
+#define PMC_MTXPOOL_SIZE 32
+
+/*
+ * PMC commands
+ */
+
+struct pmc_syscall_args {
+ uint32_t pmop_code; /* one of PMC_OP_* */
+ void *pmop_data; /* syscall parameter */
+};
+
+/*
+ * Interface to processor specific s1tuff
+ */
+
+/*
+ * struct pmc_descr
+ *
+ * Machine independent (i.e., the common parts) of a human readable
+ * PMC description.
+ */
+
+struct pmc_descr {
+ const char pd_name[PMC_NAME_MAX]; /* name */
+ uint32_t pd_caps; /* capabilities */
+ enum pmc_class pd_class; /* class of the PMC */
+ uint32_t pd_width; /* width in bits */
+};
+
+/*
+ * struct pmc_target
+ *
+ * This structure records all the target processes associated with a
+ * PMC.
+ */
+
+struct pmc_target {
+ LIST_ENTRY(pmc_target) pt_next;
+ struct pmc_process *pt_process; /* target descriptor */
+};
+
+/*
+ * struct pmc
+ *
+ * Describes each allocated PMC.
+ *
+ * Each PMC has precisely one owner, namely the process that allocated
+ * the PMC.
+ *
+ * Multiple target process may be being monitored by a PMC. The
+ * 'pm_targets' field links all the target processes being monitored
+ * by this PMC.
+ *
+ * The 'pm_savedvalue' field is protected by a mutex.
+ *
+ * On a multi-cpu machine, multiple target threads associated with a
+ * process-virtual PMC could be concurrently executing on different
+ * CPUs. The 'pm_runcount' field is atomically incremented every time
+ * the PMC gets scheduled on a CPU and atomically decremented when it
+ * get descheduled. Deletion of a PMC is only permitted when this
+ * field is '0'.
+ *
+ */
+
+struct pmc {
+ LIST_HEAD(,pmc_target) pm_targets; /* list of target processes */
+
+ /*
+ * Global PMCs are allocated on a CPU and are not moved around.
+ * For global PMCs we need to record the CPU the PMC was allocated
+ * on.
+ *
+ * Virtual PMCs run on whichever CPU is currently executing
+ * their owner threads. For these PMCs we need to save their
+ * current PMC counter values when they are taken off CPU.
+ */
+
+ union {
+ uint32_t pm_cpu; /* System-wide PMCs */
+ pmc_value_t pm_savedvalue; /* Virtual PMCS */
+ } pm_gv;
+
+ /*
+ * for sampling modes, we keep track of the PMC's "reload
+ * count", which is the counter value to be loaded in when
+ * arming the PMC for the next counting session. For counting
+ * modes on PMCs that are read-only (e.g., the x86 TSC), we
+ * keep track of the initial value at the start of
+ * counting-mode operation.
+ */
+
+ union {
+ pmc_value_t pm_reloadcount; /* sampling PMC modes */
+ pmc_value_t pm_initial; /* counting PMC modes */
+ } pm_sc;
+
+ uint32_t pm_caps; /* PMC capabilities */
+ enum pmc_class pm_class; /* class of PMC */
+ enum pmc_event pm_event; /* event being measured */
+ uint32_t pm_flags; /* additional flags PMC_F_... */
+ enum pmc_mode pm_mode; /* current mode */
+ struct pmc_owner *pm_owner; /* owner thread state */
+ uint32_t pm_rowindex; /* row index */
+ uint32_t pm_runcount; /* #cpus currently on */
+ enum pmc_state pm_state; /* state (active/inactive only) */
+
+ /* md extensions */
+#if __i386__
+ union {
+ /* AMD Athlon counters */
+ struct {
+ uint32_t pm_amd_evsel;
+ } pm_amd;
+
+ /* Intel P4 counters */
+ struct {
+ uint32_t pm_p4_cccrvalue;
+ uint32_t pm_p4_escrvalue;
+ uint32_t pm_p4_escr;
+ uint32_t pm_p4_escrmsr;
+ } pm_p4;
+
+ /* Intel P6 counters */
+ struct {
+ uint32_t pm_p6_evsel;
+ } pm_p6;
+ } pm_md;
+
+#elif __amd64__
+ union {
+ /* AMD Athlon counters */
+ struct {
+ uint32_t pm_amd_evsel;
+ } pm_amd;
+ } pm_md;
+
+#else
+
+#error Unsupported PMC architecture.
+
+#endif
+};
+
+/*
+ * struct pmc_list
+ *
+ * Describes a list of PMCs.
+ */
+
+struct pmc_list {
+ LIST_ENTRY(pmc_list) pl_next;
+ struct pmc *pl_pmc; /* PMC descriptor */
+};
+
+/*
+ * struct pmc_process
+ *
+ * Record a 'target' process being profiled.
+ *
+ * The target process being profiled could be different from the owner
+ * process which allocated the PMCs. Each target process descriptor
+ * is associated with NHWPMC 'struct pmc *' pointers. Each PMC at a
+ * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]'
+ * array. The size of this structure is thus PMC architecture
+ * dependent.
+ *
+ * TODO: Only process-private counting mode PMCs may be attached to a
+ * process different from the allocator process (since we do not have
+ * the infrastructure to make sense of an interrupted PC value from a
+ * 'target' process (yet)).
+ *
+ */
+
+struct pmc_targetstate {
+ struct pmc *pp_pmc; /* target PMC */
+ pmc_value_t pp_pmcval; /* per-process value */
+};
+
+struct pmc_process {
+ LIST_ENTRY(pmc_process) pp_next; /* hash chain */
+ int pp_refcnt; /* reference count */
+ struct proc *pp_proc; /* target thread */
+ struct pmc_targetstate pp_pmcs[]; /* NHWPMCs */
+};
+
+
+/*
+ * struct pmc_owner
+ *
+ * We associate a PMC with an 'owner' process.
+ *
+ * A process can be associated with 0..NCPUS*NHWPMC PMCs during its
+ * lifetime, where NCPUS is the numbers of CPUS in the system and
+ * NHWPMC is the number of hardware PMCs per CPU. These are
+ * maintained in the list headed by the 'po_pmcs' to save on space.
+ *
+ */
+
+struct pmc_owner {
+ LIST_ENTRY(pmc_owner) po_next; /* hash chain */
+ LIST_HEAD(, pmc_list) po_pmcs; /* list of owned PMCs */
+ uint32_t po_flags; /* PMC_FLAG_* */
+ struct proc *po_owner; /* owner proc */
+ int po_logfd; /* XXX for now */
+};
+
+#define PMC_FLAG_IS_OWNER 0x01
+#define PMC_FLAG_HAS_TS_PMC 0x02
+#define PMC_FLAG_OWNS_LOGFILE 0x04 /* owns system-sampling log file */
+
+/*
+ * struct pmc_hw -- describe the state of the PMC hardware
+ *
+ * When in use, a HW PMC is associated with one allocated 'struct pmc'
+ * pointed to by field 'phw_pmc'. When inactive, this field is NULL.
+ *
+ * On an SMP box, one or more HW PMC's in process virtual mode with
+ * the same 'phw_pmc' could be executing on different CPUs. In order
+ * to handle this case correctly, we need to ensure that only
+ * incremental counts get added to the saved value in the associated
+ * 'struct pmc'. The 'phw_save' field is used to keep the saved PMC
+ * value at the time the hardware is started during this context
+ * switch (i.e., the difference between the new (hardware) count and
+ * the saved count is atomically added to the count field in 'struct
+ * pmc' at context switch time).
+ *
+ */
+
+struct pmc_hw {
+ uint32_t phw_state; /* see PHW_* macros below */
+ struct pmc *phw_pmc; /* current thread PMC */
+};
+
+#define PMC_PHW_RI_MASK 0x000000FF
+#define PMC_PHW_CPU_SHIFT 8
+#define PMC_PHW_CPU_MASK 0x0000FF00
+#define PMC_PHW_FLAGS_SHIFT 16
+#define PMC_PHW_FLAGS_MASK 0xFFFF0000
+
+#define PMC_PHW_INDEX_TO_STATE(ri) ((ri) & PMC_PHW_RI_MASK)
+#define PMC_PHW_STATE_TO_INDEX(state) ((state) & PMC_PHW_RI_MASK)
+#define PMC_PHW_CPU_TO_STATE(cpu) (((cpu) << PMC_PHW_CPU_SHIFT) & \
+ PMC_PHW_CPU_MASK)
+#define PMC_PHW_STATE_TO_CPU(state) (((state) & PMC_PHW_CPU_MASK) >> \
+ PMC_PHW_CPU_SHIFT)
+#define PMC_PHW_FLAGS_TO_STATE(flags) (((flags) << PMC_PHW_FLAGS_SHIFT) & \
+ PMC_PHW_FLAGS_MASK)
+#define PMC_PHW_STATE_TO_FLAGS(state) (((state) & PMC_PHW_FLAGS_MASK) >> \
+ PMC_PHW_FLAGS_SHIFT)
+#define PMC_PHW_FLAG_IS_ENABLED (PMC_PHW_FLAGS_TO_STATE(0x01))
+#define PMC_PHW_FLAG_IS_SHAREABLE (PMC_PHW_FLAGS_TO_STATE(0x02))
+
+/*
+ * struct pmc_cpustate
+ *
+ * A CPU is modelled as a collection of HW PMCs with space for additional
+ * flags.
+ */
+
+struct pmc_cpu {
+ uint32_t pc_state; /* physical cpu number + flags */
+ struct pmc_hw *pc_hwpmcs[]; /* 'npmc' pointers */
+ /* other machine dependent fields come here */
+};
+
+#define PMC_PCPU_CPU_MASK 0x000000FF
+#define PMC_PCPU_FLAGS_MASK 0xFFFFFF00
+#define PMC_PCPU_FLAGS_SHIFT 8
+#define PMC_PCPU_STATE_TO_CPU(S) ((S) & PMC_PCPU_CPU_MASK)
+#define PMC_PCPU_STATE_TO_FLAGS(S) (((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT)
+#define PMC_PCPU_FLAGS_TO_STATE(F) (((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK)
+#define PMC_PCPU_CPU_TO_STATE(C) ((C) & PMC_PCPU_CPU_MASK)
+#define PMC_PCPU_FLAG_HTT (PMC_PCPU_FLAGS_TO_STATE(0x1))
+
+/*
+ * struct pmc_binding
+ *
+ * CPU binding information.
+ */
+
+struct pmc_binding {
+ int pb_bound; /* is bound? */
+ int pb_cpu; /* if so, to which CPU */
+};
+
+/*
+ * struct pmc_mdep
+ *
+ * Machine dependent bits needed per CPU type.
+ */
+
+struct pmc_mdep {
+ enum pmc_class pmd_classes[PMC_CLASS_MAX];
+ int pmd_nclasspmcs[PMC_CLASS_MAX];
+
+ uint32_t pmd_cputype; /* from enum pmc_cputype */
+ uint32_t pmd_nclass; /* # PMC classes supported */
+ uint32_t pmd_npmc; /* max PMCs per CPU */
+
+ /*
+ * Methods
+ */
+
+ int (*pmd_init)(int _cpu); /* machine dependent initialization */
+ int (*pmd_cleanup)(int _cpu); /* machine dependent cleanup */
+
+ /* thread context switch in */
+ int (*pmd_switch_in)(struct pmc_cpu *_p);
+
+ /* thread context switch out */
+ int (*pmd_switch_out)(struct pmc_cpu *_p);
+
+ /* configuring/reading/writing the hardware PMCs */
+ int (*pmd_config_pmc)(int _cpu, int _ri, struct pmc *_pm);
+ int (*pmd_read_pmc)(int _cpu, int _ri, pmc_value_t *_value);
+ int (*pmd_write_pmc)(int _cpu, int _ri, pmc_value_t _value);
+
+ /* pmc allocation/release */
+ int (*pmd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t,
+ const struct pmc_op_pmcallocate *_a);
+ int (*pmd_release_pmc)(int _cpu, int _ri, struct pmc *_pm);
+
+ /* starting and stopping PMCs */
+ int (*pmd_start_pmc)(int _cpu, int _ri);
+ int (*pmd_stop_pmc)(int _cpu, int _ri);
+
+ /* handle a PMC interrupt */
+ int (*pmd_intr)(int _cpu, uintptr_t _pc);
+
+ int (*pmd_describe)(int _cpu, int _ri, struct pmc_info *_pi,
+ struct pmc **_ppmc);
+
+ /* Machine dependent methods */
+#if __i386__ || __amd64__
+ int (*pmd_get_msr)(int _ri, uint32_t *_msr);
+#endif
+
+};
+
+/*
+ * Per-CPU state. This is an array of 'mp_ncpu' pointers
+ * to struct pmc_cpu descriptors.
+ */
+
+extern struct pmc_cpu **pmc_pcpu;
+
+/* driver statistics */
+extern struct pmc_op_getdriverstats pmc_stats;
+
+#if DEBUG
+
+/* debug flags */
+extern unsigned int pmc_debugflags; /* [Maj:12bits] [Min:16bits] [level:4] */
+
+#define PMC_DEBUG_DEFAULT_FLAGS 0
+#define PMC_DEBUG_STRSIZE 128
+
+#define __PMCDFMAJ(M) (1 << (PMC_DEBUG_MAJ_##M+20))
+#define __PMCDFMIN(M) (1 << (PMC_DEBUG_MIN_##M+4))
+
+#define __PMCDF(M,N) (__PMCDFMAJ(M) | __PMCDFMIN(N))
+#define PMCDBG(M,N,L,F,...) do { \
+ if (((pmc_debugflags & __PMCDF(M,N)) == __PMCDF(M,N)) && \
+ ((pmc_debugflags & 0xF) > (L))) \
+ printf(#M ":" #N ": " F "\n", __VA_ARGS__); \
+} while (0)
+
+/* Major numbers */
+#define PMC_DEBUG_MAJ_MOD 0 /* misc module infrastructure */
+#define PMC_DEBUG_MAJ_PMC 1 /* pmc management */
+#define PMC_DEBUG_MAJ_CTX 2 /* context switches */
+#define PMC_DEBUG_MAJ_OWN 3 /* owner */
+#define PMC_DEBUG_MAJ_PRC 4 /* processes */
+#define PMC_DEBUG_MAJ_MDP 5 /* machine dependent */
+#define PMC_DEBUG_MAJ_CPU 6 /* cpu switches */
+
+/* Minor numbers */
+
+/* Common (8 bits) */
+#define PMC_DEBUG_MIN_ALL 0 /* allocation */
+#define PMC_DEBUG_MIN_REL 1 /* release */
+#define PMC_DEBUG_MIN_OPS 2 /* ops: start, stop, ... */
+#define PMC_DEBUG_MIN_INI 3 /* init */
+#define PMC_DEBUG_MIN_FND 4 /* find */
+
+/* MODULE */
+#define PMC_DEBUG_MIN_PMH 14 /* pmc_hook */
+#define PMC_DEBUG_MIN_PMS 15 /* pmc_syscall */
+
+/* OWN */
+#define PMC_DEBUG_MIN_ORM 8 /* owner remove */
+#define PMC_DEBUG_MIN_OMR 9 /* owner maybe remove */
+
+/* PROCESSES */
+#define PMC_DEBUG_MIN_TLK 8 /* link target */
+#define PMC_DEBUG_MIN_TUL 9 /* unlink target */
+#define PMC_DEBUG_MIN_EXT 10 /* process exit */
+#define PMC_DEBUG_MIN_EXC 11 /* process exec */
+#define PMC_DEBUG_MIN_FRK 12 /* process fork */
+#define PMC_DEBUG_MIN_ATT 13 /* attach/detach */
+
+/* CONTEXT SWITCHES */
+#define PMC_DEBUG_MIN_SWI 8 /* switch in */
+#define PMC_DEBUG_MIN_SWO 9 /* switch out */
+
+/* PMC */
+#define PMC_DEBUG_MIN_REG 8 /* pmc register */
+#define PMC_DEBUG_MIN_ALR 9 /* allocate row */
+
+/* MACHINE DEPENDENT LAYER */
+#define PMC_DEBUG_MIN_REA 8 /* read */
+#define PMC_DEBUG_MIN_WRI 9 /* write */
+#define PMC_DEBUG_MIN_CFG 10 /* config */
+#define PMC_DEBUG_MIN_STA 11 /* start */
+#define PMC_DEBUG_MIN_STO 12 /* stop */
+
+/* CPU */
+#define PMC_DEBUG_MIN_BND 8 /* bind */
+#define PMC_DEBUG_MIN_SEL 9 /* select */
+
+#else
+#define PMCDBG(M,N,L,F,...) /* nothing */
+#endif
+
+/* declare a dedicated memory pool */
+MALLOC_DECLARE(M_PMC);
+
+/*
+ * Functions
+ */
+
+void pmc_update_histogram(struct pmc_hw *phw, uintptr_t pc);
+void pmc_send_signal(struct pmc *pmc);
+int pmc_getrowdisp(int ri);
+
+#endif /* _KERNEL */
+#endif /* _SYS_PMC_H_ */
diff --git a/sys/sys/pmckern.h b/sys/sys/pmckern.h
new file mode 100644
index 0000000..3c11172
--- /dev/null
+++ b/sys/sys/pmckern.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 2003, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * PMC interface used by the base kernel.
+ */
+
+#ifndef _SYS_PMCKERN_H_
+#define _SYS_PMCKERN_H_
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/sx.h>
+
+#define PMC_FN_PROCESS_EXIT 1
+#define PMC_FN_PROCESS_EXEC 2
+#define PMC_FN_PROCESS_FORK 3
+#define PMC_FN_CSW_IN 4
+#define PMC_FN_CSW_OUT 5
+
+/* hook */
+extern int (*pmc_hook)(struct thread *_td, int _function, void *_arg);
+extern int (*pmc_intr)(int cpu, uintptr_t pc);
+
+/* SX lock protecting the hook */
+extern struct sx pmc_sx;
+
+/* hook invocation; for use within the kernel */
+#define PMC_CALL_HOOK(t, cmd, arg) \
+do { \
+ sx_slock(&pmc_sx); \
+ if (pmc_hook != NULL) \
+ (pmc_hook)((t), (cmd), (arg)); \
+ sx_sunlock(&pmc_sx); \
+} while (0)
+
+/* hook invocation that needs an exclusive lock */
+#define PMC_CALL_HOOK_X(t, cmd, arg) \
+do { \
+ sx_xlock(&pmc_sx); \
+ if (pmc_hook != NULL) \
+ (pmc_hook)((t), (cmd), (arg)); \
+ sx_xunlock(&pmc_sx); \
+} while (0)
+
+/* context switches cannot take locks */
+#define PMC_SWITCH_CONTEXT(t, cmd) \
+do { \
+ if (pmc_hook != NULL) \
+ (pmc_hook)((t), (cmd), NULL); \
+} while (0)
+
+
+/*
+ * check if a process is using HWPMCs.
+ */
+
+#define PMC_PROC_IS_USING_PMCS(p) \
+ (__predict_false(atomic_load_acq_int(&(p)->p_flag) & \
+ P_HWPMC))
+
+/* helper functions */
+int pmc_cpu_is_disabled(int _cpu);
+int pmc_cpu_is_logical(int _cpu);
+
+#endif /* _SYS_PMCKERN_H_ */
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 7a151bb..b7703d2 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -634,6 +634,8 @@ struct proc {
#define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */
#define P_SIGEVENT 0x200000 /* Process pending signals changed. */
#define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */
+#define P_HWPMC 0x800000 /* Process is using HWPMCs */
+
#define P_JAILED 0x1000000 /* Process is in jail. */
#define P_INEXEC 0x4000000 /* Process is in execve(). */
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 349c1b5..ff555d2 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -87,6 +87,7 @@ void sched_bind(struct thread *td, int cpu);
static __inline void sched_pin(void);
void sched_unbind(struct thread *td);
static __inline void sched_unpin(void);
+int sched_is_bound(struct thread *td);
/*
* These procedures tell the process data structure allocation code how
diff --git a/usr.sbin/Makefile b/usr.sbin/Makefile
index e8b528b..071435e 100644
--- a/usr.sbin/Makefile
+++ b/usr.sbin/Makefile
@@ -112,6 +112,8 @@ SUBDIR= ac \
${_pcvt} \
periodic \
pkg_install \
+ pmccontrol \
+ pmcstat \
${_pnpinfo} \
powerd \
ppp \
diff --git a/usr.sbin/pmccontrol/Makefile b/usr.sbin/pmccontrol/Makefile
new file mode 100644
index 0000000..851b1c7
--- /dev/null
+++ b/usr.sbin/pmccontrol/Makefile
@@ -0,0 +1,17 @@
+#
+# $FreeBSD$
+#
+
+PROG= pmccontrol
+MAN= pmccontrol.8
+
+DPADD= ${LIBPMC}
+LDADD= -lpmc
+
+WARNS= 6
+
+CFLAGS+= -I${.CURDIR}/../../sys -I${.CURDIR}/../../lib/libpmc
+
+SRCS= pmccontrol.c
+
+.include <bsd.prog.mk>
diff --git a/usr.sbin/pmccontrol/pmccontrol.8 b/usr.sbin/pmccontrol/pmccontrol.8
new file mode 100644
index 0000000..61b61aa
--- /dev/null
+++ b/usr.sbin/pmccontrol/pmccontrol.8
@@ -0,0 +1,132 @@
+.\" Copyright (c) 2003 Joseph Koshy. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" This software is provided by Joseph Koshy ``as is'' and
+.\" any express or implied warranties, including, but not limited to, the
+.\" implied warranties of merchantability and fitness for a particular purpose
+.\" are disclaimed. in no event shall Joseph Koshy be liable
+.\" for any direct, indirect, incidental, special, exemplary, or consequential
+.\" damages (including, but not limited to, procurement of substitute goods
+.\" or services; loss of use, data, or profits; or business interruption)
+.\" however caused and on any theory of liability, whether in contract, strict
+.\" liability, or tort (including negligence or otherwise) arising in any way
+.\" out of the use of this software, even if advised of the possibility of
+.\" such damage.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd Dec 15, 2003
+.Os
+.Dt PMCCONTROL 8
+.Sh NAME
+.Nm pmccontrol
+.Nd control hardware performance monitoring counters
+.Sh SYNOPSIS
+.Nm
+.Oo
+.Op Fl c Ar cpu
+.Op Fl e Ar pmc
+.Op Fl d Ar pmc
+.Oc Ns ...
+.Nm
+.Op Fl lL
+.Nm
+.Op Fl s
+.Sh DESCRIPTION
+The
+.Nm
+utility controls the operation of the system's hardware performance
+monitoring counters.
+.Sh OPTIONS
+The
+.Nm
+utility processes options in command line order, so later options modify
+the effect of earlier ones.
+The following options are available:
+.Bl -tag -width indent
+.It Fl c Ar cpu
+Subsequent enable and disable options affect the CPU
+denoted by
+.Ar cpu .
+The argument
+.Ar cpu
+is either a number denoting a CPU number in the system, or the string
+.Dq Li \&* ,
+denoting all CPUs in the system.
+.It Fl d Ar pmc
+Disable PMC number
+.Ar pmc
+on the CPU specified by
+.Fl c ,
+preventing it from being used till subsequently re-enabled.
+The argument
+.Ar pmc
+is either a number denoting a specified PMC, or the string
+.Dq Li \&*
+denoting all the PMCs on the specified CPU.
+.Pp
+Only idle PMCs may be disabled.
+.\" XXX this probably needs to be fixed.
+.It Fl e Ar pmc
+Enable PMC number
+.Ar pmc ,
+on the CPU specified by
+.Fl c ,
+allowing it to be used in the future.
+The argument
+.Ar pmc
+is either a number denoting a specified PMC, or the string
+.Dq Li \&*
+denoting all the PMCs on the specified CPU.
+If PMC
+.Ar pmc
+is already enabled, this option has no effect.
+.It Fl l
+List available hardware performance counters and their current
+disposition.
+.It Fl L
+List available hardware performance counter classes and their
+supported event names.
+.It Fl s
+Print driver statistics maintained by
+.Xr hwpmc 4 .
+.El
+.Sh EXAMPLES
+To disable all PMCs on all CPUs, use the command:
+.Dl pmccontrol -d\&*
+.Pp
+To enable all PMCs on all CPUs, use:
+.Dl pmccontrol -e\&*
+.Pp
+To disable PMCs 0 and 1 on CPU 2, use:
+.Dl pmccontrol -c2 -d0 -d1
+.Pp
+To disable PMC 0 of CPU 0 only, and enable all other PMCS on all other
+CPUs, use:
+.Dl pmccontrol -c\&* -e\&* -c0 -d0
+.Sh DIAGNOSTICS
+.Ex -std pmccontrol
+.Sh HISTORY
+The
+.Nm
+utility is proposed to be integrated into
+.Fx
+sometime after
+.Fx 5.2 .
+.Nm
+.Bt
+.Sh AUTHORS
+.An Joseph Koshy Aq jkoshy@FreeBSD.org
+.Sh SEE ALSO
+.Xr pmc 3 ,
+.Xr hwpmc 4 ,
+.Xr pmcstat 8 ,
+.Xr sysctl 8
diff --git a/usr.sbin/pmccontrol/pmccontrol.c b/usr.sbin/pmccontrol/pmccontrol.c
new file mode 100644
index 0000000..a1ed2d5
--- /dev/null
+++ b/usr.sbin/pmccontrol/pmccontrol.c
@@ -0,0 +1,476 @@
+/*-
+ * Copyright (c) 2003,2004 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+
+#include <assert.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <pmc.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+/* Compile time defaults */
+
+#define PMCC_PRINT_USAGE 0
+#define PMCC_PRINT_EVENTS 1
+#define PMCC_LIST_STATE 2
+#define PMCC_ENABLE_DISABLE 3
+#define PMCC_SHOW_STATISTICS 4
+
+#define PMCC_CPU_ALL -1
+#define PMCC_CPU_WILDCARD '*'
+
+#define PMCC_PMC_ALL -1
+#define PMCC_PMC_WILDCARD '*'
+
+#define PMCC_OP_IGNORE 0
+#define PMCC_OP_DISABLE 1
+#define PMCC_OP_ENABLE 2
+
+#define PMCC_PROGRAM_NAME "pmccontrol"
+
+STAILQ_HEAD(pmcc_op_list, pmcc_op) head = STAILQ_HEAD_INITIALIZER(head);
+
+struct pmcc_op {
+ char op_cpu;
+ char op_pmc;
+ char op_op;
+ STAILQ_ENTRY(pmcc_op) op_next;
+};
+
+/* Function Prototypes */
+#if DEBUG
+static void pmcc_init_debug(void);
+#endif
+
+static int pmcc_do_list_state(void);
+static int pmcc_do_enable_disable(struct pmcc_op_list *);
+static int pmcc_do_list_events(void);
+
+/* Globals */
+
+static char usage_message[] =
+ "Usage:\n"
+ " " PMCC_PROGRAM_NAME " -l\n"
+ " " PMCC_PROGRAM_NAME " -s\n"
+ " " PMCC_PROGRAM_NAME " [-e pmc | -d pmc | -c cpu] ...";
+
+#if DEBUG
+FILE *debug_stream = NULL;
+#endif
+
+#if DEBUG
+#define DEBUG_MSG(...) \
+ (void) fprintf(debug_stream, "[pmccontrol] " __VA_ARGS__);
+#else
+#define DEBUG_MSG(m) /* */
+#endif /* !DEBUG */
+
+int pmc_syscall = -1;
+
+#define PMC_CALL(cmd, params) \
+if ((error = syscall(pmc_syscall, PMC_OP_##cmd, (params))) != 0) \
+{ \
+ DEBUG_MSG("ERROR: syscall [" #cmd "]"); \
+ exit(EX_OSERR); \
+}
+
+#if DEBUG
+/* log debug messages to a separate file */
+static void
+pmcc_init_debug(void)
+{
+ char *fn;
+
+ fn = getenv("PMCCONTROL_DEBUG");
+ if (fn != NULL)
+ {
+ debug_stream = fopen(fn, "w");
+ if (debug_stream == NULL)
+ debug_stream = stderr;
+ } else
+ debug_stream = stderr;
+}
+#endif
+
+static int
+pmcc_do_enable_disable(struct pmcc_op_list *op_list)
+{
+ unsigned char op;
+ int c, error, i, j, ncpu, npmc, t;
+ int cpu, pmc;
+ struct pmcc_op *np;
+ unsigned char *map;
+
+ if ((ncpu = pmc_ncpu()) < 0)
+ err(EX_OSERR, "Unable to determine the number of cpus");
+
+ /* determine the maximum number of PMCs in any CPU */
+ npmc = 0;
+ for (c = 0; c < ncpu; c++) {
+ if ((t = pmc_npmc(c)) < 0)
+ err(EX_OSERR, "Unable to determine the number of PMCs in "
+ "CPU %d", c);
+ npmc = t > npmc ? t : npmc;
+ }
+
+ if (npmc == 0)
+ errx(EX_CONFIG, "No PMCs found");
+
+ if ((map = malloc(npmc * ncpu)) == NULL)
+ err(EX_SOFTWARE, "Out of memory");
+
+ (void) memset(map, PMCC_OP_IGNORE, npmc*ncpu);
+
+ error = 0;
+ STAILQ_FOREACH(np, op_list, op_next) {
+
+ cpu = np->op_cpu;
+ pmc = np->op_pmc;
+ op = np->op_op;
+
+ if (cpu >= ncpu)
+ errx(EX_DATAERR, "CPU id too large: \"%d\"", cpu);
+
+ if (pmc >= npmc)
+ errx(EX_DATAERR, "PMC id too large: \"%d\"", pmc);
+
+#define MARKMAP(M,C,P,V) do { \
+ *((M) + (C)*npmc + (P)) = (V); \
+} while (0)
+
+#define SET_PMCS(C,P,V) do { \
+ if ((P) == PMCC_PMC_ALL) { \
+ for (j = 0; j < npmc; j++) \
+ MARKMAP(map, (C), j, (V)); \
+ } else \
+ MARKMAP(map, (C), (P), (V)); \
+} while (0)
+
+#define MAP(M,C,P) (*((M) + (C)*npmc + (P)))
+
+ if (cpu == PMCC_CPU_ALL)
+ for (i = 0; i < ncpu; i++)
+ SET_PMCS(i, pmc, op);
+ else
+ SET_PMCS(cpu, pmc, op);
+ }
+
+ /* Configure PMCS */
+ for (i = 0; i < ncpu; i++)
+ for (j = 0; j < npmc; j++) {
+ unsigned char b;
+
+ b = MAP(map, i, j);
+
+ error = 0;
+
+ if (b == PMCC_OP_ENABLE)
+ error = pmc_enable(i, j);
+ else if (b == PMCC_OP_DISABLE)
+ error = pmc_disable(i, j);
+
+ if (error < 0)
+ err(EX_OSERR, "%s of PMC %d on CPU %d failed",
+ b == PMCC_OP_ENABLE ? "Enable" :
+ "Disable", j, i);
+ }
+
+ return error;
+}
+
+static int
+pmcc_do_list_state(void)
+{
+ size_t dummy;
+ int c, cpu, n, npmc, ncpu;
+ unsigned int logical_cpus_mask;
+ struct pmc_info *pd;
+ struct pmc_op_getpmcinfo *pi;
+ const struct pmc_op_getcpuinfo *pc;
+
+ if (pmc_cpuinfo(&pc) != 0)
+ err(EX_OSERR, "Unable to determine CPU information");
+
+ dummy = sizeof(logical_cpus_mask);
+ if (sysctlbyname("machdep.logical_cpus_mask", &logical_cpus_mask,
+ &dummy, NULL, 0) < 0)
+ logical_cpus_mask = 0;
+
+ ncpu = pc->pm_ncpu;
+
+ for (c = cpu = 0; cpu < ncpu; cpu++) {
+#if i386
+ if (pc->pm_cputype == PMC_CPU_INTEL_PIV &&
+ (logical_cpus_mask & (1 << cpu)))
+ continue; /* skip P4-style 'logical' cpus */
+#endif
+ if (pmc_pmcinfo(cpu, &pi) < 0)
+ err(EX_OSERR, "Unable to get PMC status for CPU %d",
+ cpu);
+
+ printf("#CPU %d:\n", c++);
+ npmc = pmc_npmc(cpu);
+ printf("#N NAME CLASS STATE ROW-DISP\n");
+
+ for (n = 0; n < npmc; n++) {
+ pd = &pi->pm_pmcs[n];
+
+ printf(" %-2d %-16s %-6s %-8s %-10s",
+ n,
+ pd->pm_name,
+ pmc_name_of_class(pd->pm_class),
+ pd->pm_enabled ? "ENABLED" : "DISABLED",
+ pmc_name_of_disposition(pd->pm_rowdisp));
+
+ if (pd->pm_ownerpid != -1) {
+ printf(" (pid %d)", pd->pm_ownerpid);
+ printf(" %-32s",
+ pmc_name_of_event(pd->pm_event));
+ if (PMC_IS_SAMPLING_MODE(pd->pm_mode))
+ printf(" (reload count %jd)",
+ pd->pm_reloadcount);
+ }
+ printf("\n");
+ }
+ free(pi);
+ }
+ return 0;
+}
+
+static int
+pmcc_do_list_events(void)
+{
+ enum pmc_class c;
+ unsigned int i, j, nevents;
+ const char **eventnamelist;
+ const struct pmc_op_getcpuinfo *ci;
+
+ if (pmc_cpuinfo(&ci) != 0)
+ err(EX_OSERR, "Unable to determine CPU information");
+
+ eventnamelist = NULL;
+
+ for (i = 0; i < ci->pm_nclass; i++) {
+ c = ci->pm_classes[i];
+
+ printf("%s\n", pmc_name_of_class(c));
+ if (pmc_event_names_of_class(c, &eventnamelist, &nevents) < 0)
+ err(EX_OSERR, "ERROR: Cannot find information for "
+ "event class \"%s\"", pmc_name_of_class(c));
+
+ for (j = 0; j < nevents; j++)
+ printf("\t%s\n", eventnamelist[j]);
+
+ free(eventnamelist);
+ }
+ return 0;
+}
+
+static int
+pmcc_show_statistics(void)
+{
+
+ struct pmc_op_getdriverstats gms;
+
+ if (pmc_get_driver_stats(&gms) < 0)
+ err(EX_OSERR, "ERROR: cannot retrieve driver statistics");
+
+ /*
+ * Print statistics.
+ */
+
+#define PRINT(N,V) (void) printf("%20s %d\n", (N), gms.pm_##V)
+
+ PRINT("interrupts-processed", intr_processed);
+ PRINT("interrupts-ignored", intr_ignored);
+ PRINT("system-calls", syscalls);
+ PRINT("system-calls-with-errors", syscall_errors);
+
+ return 0;
+}
+
+/*
+ * Main
+ */
+
+int
+main(int argc, char **argv)
+{
+ int error, command, currentcpu, option, pmc;
+ char *dummy;
+ struct pmcc_op *p;
+
+#if DEBUG
+ pmcc_init_debug();
+#endif
+
+ /* parse args */
+
+ currentcpu = PMCC_CPU_ALL;
+ command = PMCC_PRINT_USAGE;
+ error = 0;
+
+ STAILQ_INIT(&head);
+
+ while ((option = getopt(argc, argv, ":c:d:e:lLs")) != -1)
+ switch (option) {
+ case 'L':
+ if (command != PMCC_PRINT_USAGE) {
+ error = 1;
+ break;
+ }
+ command = PMCC_PRINT_EVENTS;
+ break;
+
+ case 'c':
+ if (command != PMCC_PRINT_USAGE &&
+ command != PMCC_ENABLE_DISABLE) {
+ error = 1;
+ break;
+ }
+ command = PMCC_ENABLE_DISABLE;
+
+ if (*optarg == PMCC_CPU_WILDCARD)
+ currentcpu = PMCC_CPU_ALL;
+ else {
+ currentcpu = strtoul(optarg, &dummy, 0);
+ if (*dummy != '\0' || currentcpu < 0)
+ errx(EX_DATAERR,
+ "\"%s\" is not a valid CPU id",
+ optarg);
+ }
+ break;
+
+ case 'd':
+ case 'e':
+ if (command != PMCC_PRINT_USAGE &&
+ command != PMCC_ENABLE_DISABLE) {
+ error = 1;
+ break;
+ }
+ command = PMCC_ENABLE_DISABLE;
+
+ if (*optarg == PMCC_PMC_WILDCARD)
+ pmc = PMCC_PMC_ALL;
+ else {
+ pmc = strtoul(optarg, &dummy, 0);
+ if (*dummy != '\0' || pmc < 0)
+ errx(EX_DATAERR,
+ "\"%s\" is not a valid PMC id",
+ optarg);
+ }
+
+ if ((p = malloc(sizeof(*p))) == NULL)
+ err(EX_SOFTWARE, "Out of memory");
+
+ p->op_cpu = currentcpu;
+ p->op_pmc = pmc;
+ p->op_op = option == 'd' ? PMCC_OP_DISABLE :
+ PMCC_OP_ENABLE;
+
+ STAILQ_INSERT_TAIL(&head, p, op_next);
+ break;
+
+ case 'l':
+ if (command != PMCC_PRINT_USAGE) {
+ error = 1;
+ break;
+ }
+ command = PMCC_LIST_STATE;
+ break;
+
+ case 's':
+ if (command != PMCC_PRINT_USAGE) {
+ error = 1;
+ break;
+ }
+ command = PMCC_SHOW_STATISTICS;
+ break;
+
+ case ':':
+ errx(EX_USAGE,
+ "Missing argument to option '-%c'", optopt);
+ break;
+
+ case '?':
+ warnx("Unrecognized option \"-%c\"", optopt);
+ errx(EX_USAGE, usage_message);
+ break;
+
+ default:
+ error = 1;
+ break;
+
+ }
+
+ if (command == PMCC_PRINT_USAGE)
+ (void) errx(EX_USAGE, usage_message);
+
+ if (error)
+ exit(EX_USAGE);
+
+ if (pmc_init() < 0)
+ err(EX_UNAVAILABLE,
+ "Initialization of the pmc(3) library failed");
+
+ switch (command) {
+ case PMCC_LIST_STATE:
+ error = pmcc_do_list_state();
+ break;
+ case PMCC_PRINT_EVENTS:
+ error = pmcc_do_list_events();
+ break;
+ case PMCC_SHOW_STATISTICS:
+ error = pmcc_show_statistics();
+ break;
+ case PMCC_ENABLE_DISABLE:
+ if (STAILQ_EMPTY(&head))
+ errx(EX_USAGE, "No PMCs specified to enable or disable");
+ error = pmcc_do_enable_disable(&head);
+ break;
+ default:
+ assert(0);
+
+ }
+
+ if (error != 0)
+ err(EX_OSERR, "Command failed");
+ exit(0);
+}
diff --git a/usr.sbin/pmcstat/Makefile b/usr.sbin/pmcstat/Makefile
new file mode 100644
index 0000000..350c024
--- /dev/null
+++ b/usr.sbin/pmcstat/Makefile
@@ -0,0 +1,17 @@
+#
+# $FreeBSD$
+#
+
+PROG= pmcstat
+MAN= pmcstat.8
+
+DPADD= ${LIBPMC}
+LDADD= -lpmc -lm
+
+WARNS= 6
+
+CFLAGS+= -I${.CURDIR}/../../sys -I${.CURDIR}/../../lib/libpmc
+
+SRCS= pmcstat.c
+
+.include <bsd.prog.mk>
diff --git a/usr.sbin/pmcstat/pmcstat.8 b/usr.sbin/pmcstat/pmcstat.8
new file mode 100644
index 0000000..75f132b
--- /dev/null
+++ b/usr.sbin/pmcstat/pmcstat.8
@@ -0,0 +1,196 @@
+.\" Copyright (c) 2003 Joseph Koshy. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" This software is provided by Joseph Koshy ``as is'' and
+.\" any express or implied warranties, including, but not limited to, the
+.\" implied warranties of merchantability and fitness for a particular purpose
+.\" are disclaimed. in no event shall Joseph Koshy be liable
+.\" for any direct, indirect, incidental, special, exemplary, or consequential
+.\" damages (including, but not limited to, procurement of substitute goods
+.\" or services; loss of use, data, or profits; or business interruption)
+.\" however caused and on any theory of liability, whether in contract, strict
+.\" liability, or tort (including negligence or otherwise) arising in any way
+.\" out of the use of this software, even if advised of the possibility of
+.\" such damage.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd Dec 15, 2003
+.Os
+.Dt PMCSTAT 8
+.Sh NAME
+.Nm pmcstat
+.Nd performance measurement with performance monitoring hardware
+.Sh SYNOPSIS
+.Nm
+.Op Fl C
+.Op Fl O Ar logfilename
+.Op Fl P Ar event-spec
+.Op Fl S Ar event-spec
+.Op Fl c Ar cpu
+.Op Fl d
+.Op Fl n Ar count
+.Op Fl o Ar outputfile
+.Op Fl p Ar event-spec
+.Op Fl s Ar event-spec
+.Op Fl t Ar pid
+.Op Fl w Ar interval
+.Op command Op args
+.Sh DESCRIPTION
+The
+.Nm
+utility measures system performance using the facilities provided by
+.Xr hwpmc 4 .
+.Pp
+The
+.Nm
+utility can measure both hardware events seen by the system as a
+whole, and those seen when a specified process is executing on the
+system's CPUs.
+If a specific process is being targeted (for example,
+if the
+.Fl t Ar pid
+option is specified, or if a command line is specified using
+.Ar command ) ,
+then measurement occurs till the target process exits or
+the
+.Nm
+utility is interrupted by the user.
+If a specific process is not targeted for measurement, then
+.Nm
+will perform system-wide measurements till interrupted by the
+user.
+.Pp
+A given invocation of
+.Nm
+can mix allocations of system-mode and process-mode PMCs, of both
+counting and sampling flavors.
+The values of all counting PMCs are printed in human readable form
+at regular intervals by
+.Nm .
+The output of sampling PMCs is configured to go to log file, for later
+analysis by tools like
+.Xr pmcreport 8 .
+.Pp
+Hardware events to measure are specified to
+.Nm
+using event specifier strings
+.Ar event-spec .
+The syntax of these event specifiers is machine dependent and is
+documented in
+.Xr pmc 3 .
+.Pp
+A process-mode PMC may be configured to be inheritable by the target
+process' current and future children.
+.Sh OPTIONS
+The following options are available:
+.Bl -tag -width indent
+.It Fl C
+Toggle between showing cumulative and incremental counts for
+subsequent counting mode PMCs specified on the command line.
+The default is to show incremental counts.
+.It Fl O Ar logfilename
+Send the output of sampling mode PMCs to
+.Ar logfilename .
+The default file name is
+.Pa pmcstat.out ,
+in the current directory.
+.It Fl P Ar event-spec
+Allocate a process mode sampling PMC measuring hardware events
+specified in
+.Ar event-spec .
+.It Fl S Ar event-spec
+Allocate a system mode sampling PMC measuring hardware events
+specified in
+.Ar event-spec .
+.It Fl c Ar cpu
+Set the cpu for subsequent system mode PMCs specified on the
+command line to
+.Ar cpu .
+The default is to allocate system mode PMCs on CPU zero.
+.It Fl d
+Toggle between process mode PMCs measuring events for the target
+process' current and future children or only measuring events for
+the attached process.
+The default is to measure events for the target process alone.
+.It Fl n Ar rate
+Set the default sampling rate for subsequent sampling mode
+PMCs specified on the command line.
+The default is to configure PMCs to sample the CPU's instruction
+pointer every 65536 events.
+.It Fl o Ar outputfile
+Send the periodic counter output of
+.Nm
+to file
+.Ar outputfile .
+The default is to send output to
+.Pa stderr .
+.It Fl p Ar event-spec
+Allocate a process mode counting PMC measuring hardware events
+specified in
+.Ar event-spec .
+.It Fl s Ar event-spec
+Allocate a system mode counting PMC measuring hardware events
+specified in
+.Ar event-spec .
+.It Fl t Ar pid
+Attach all process mode PMCs allocated to the process with PID
+.Ar pid .
+The option is not allowed in conjunction with specifying a
+command using
+.Ar command .
+.It Fl w Ar secs
+Print the values of all counting mode PMCs every
+.Ar secs
+seconds.
+The argument
+.Ar secs
+may be a fractional value.
+The default interval is 5 seconds.
+.El
+.Pp
+If
+.Ar command
+is specified, it is executed using
+.Xr execvp 3 .
+.Sh EXAMPLES
+To perform system-wide statistical sampling on an AMD Athlon CPU with
+samples taken every 32768 instruction retirals and data being sampled
+to file
+.Dq sample.stat ,
+use:
+.Dl pmccstat -O sample.stat -n 32768 -S k7-retired-instructions
+.Pp
+To execute
+.Dq mozilla
+and measure the number of data cache misses suffered
+by it and its children every 12 seconds on an AMD Athlon, use:
+.Dl pmcstat -d -w 12 -p k7-dc-misses mozilla
+.Sh DIAGNOSTICS
+.Ex -std pmcstat
+.Sh HISTORY
+The
+.Nm
+utility is proposed to be integrated into
+.Fx
+sometime after
+.Fx 5.2 .
+.Nm
+.Bt
+.Sh AUTHORS
+.An Joseph Koshy Aq jkoshy@FreeBSD.org
+.Sh SEE ALSO
+.Xr execvp 3 ,
+.Xr pmc 3 ,
+.Xr hwpmc 4 ,
+.Xr pmccontrol 8 ,
+.Xr pmcreport 8 ,
+.Xr sysctl 8
diff --git a/usr.sbin/pmcstat/pmcstat.c b/usr.sbin/pmcstat/pmcstat.c
new file mode 100644
index 0000000..8dc09dc
--- /dev/null
+++ b/usr.sbin/pmcstat/pmcstat.c
@@ -0,0 +1,728 @@
+/*-
+ * Copyright (c) 2003,2004 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/queue.h>
+#include <sys/time.h>
+#include <sys/ttycom.h>
+#include <sys/wait.h>
+
+#include <assert.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <math.h>
+#include <pmc.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+/* Operation modes */
+
+#define FLAG_HAS_PID 0x00000001
+#define FLAG_HAS_WAIT_INTERVAL 0x00000002
+#define FLAG_HAS_LOG_FILE 0x00000004
+#define FLAG_HAS_PROCESS 0x00000008
+#define FLAG_USING_SAMPLING 0x00000010
+#define FLAG_USING_COUNTING 0x00000020
+#define FLAG_USING_PROCESS_PMC 0x00000040
+
+#define DEFAULT_SAMPLE_COUNT 65536
+#define DEFAULT_WAIT_INTERVAL 5.0
+#define DEFAULT_DISPLAY_HEIGHT 23
+#define DEFAULT_LOGFILE_NAME "pmcstat.out"
+
+#define PRINT_HEADER_PREFIX "# "
+#define READPIPEFD 0
+#define WRITEPIPEFD 1
+#define NPIPEFD 2
+
+struct pmcstat_ev {
+ STAILQ_ENTRY(pmcstat_ev) ev_next;
+ char *ev_spec; /* event specification */
+ char *ev_name; /* (derived) event name */
+ enum pmc_mode ev_mode; /* desired mode */
+ int ev_count; /* associated count if in sampling mode */
+ int ev_cpu; /* specific cpu if requested */
+ int ev_descendants; /* attach to descendants */
+ int ev_cumulative; /* show cumulative counts */
+ int ev_fieldwidth; /* print width */
+ int ev_fieldskip; /* #leading spaces */
+ pmc_value_t ev_saved; /* saved value for incremental counts */
+ pmc_id_t ev_pmcid; /* allocated ID */
+};
+
+struct pmcstat_args {
+ int pa_flags;
+ pid_t pa_pid;
+ FILE *pa_outputfile;
+ FILE *pa_logfile;
+ double pa_interval;
+ int pa_argc;
+ char **pa_argv;
+ STAILQ_HEAD(, pmcstat_ev) pa_head;
+} args;
+
+int pmcstat_interrupt = 0;
+int pmcstat_displayheight = DEFAULT_DISPLAY_HEIGHT;
+int pmcstat_pipefd[NPIPEFD];
+int pmcstat_kq;
+
+/* Function prototypes */
+void pmcstat_cleanup(struct pmcstat_args *_a);
+void pmcstat_print_counters(struct pmcstat_args *_a);
+void pmcstat_print_headers(struct pmcstat_args *_a);
+void pmcstat_print_pmcs(struct pmcstat_args *_a);
+void pmcstat_setup_process(struct pmcstat_args *_a);
+void pmcstat_show_usage(void);
+void pmcstat_start_pmcs(struct pmcstat_args *_a);
+void pmcstat_start_process(struct pmcstat_args *_a);
+
+
+/*
+ * cleanup
+ */
+
+void
+pmcstat_cleanup(struct pmcstat_args *a)
+{
+ struct pmcstat_ev *ev, *tmp;
+
+ /* de-configure the log file if present. */
+ if (a->pa_flags & FLAG_USING_SAMPLING) {
+ (void) pmc_configure_logfile(-1);
+ (void) fclose(a->pa_logfile);
+ }
+
+ /* release allocated PMCs. */
+ STAILQ_FOREACH_SAFE(ev, &a->pa_head, ev_next, tmp)
+ if (ev->ev_pmcid != PMC_ID_INVALID) {
+ if (pmc_release(ev->ev_pmcid) < 0)
+ err(EX_OSERR, "ERROR: cannot release pmc "
+ "%d \"%s\"", ev->ev_pmcid, ev->ev_name);
+ free(ev->ev_name);
+ free(ev->ev_spec);
+ STAILQ_REMOVE(&a->pa_head, ev, pmcstat_ev, ev_next);
+ free(ev);
+ }
+}
+
+void
+pmcstat_start_pmcs(struct pmcstat_args *a)
+{
+ struct pmcstat_ev *ev;
+
+ STAILQ_FOREACH(ev, &args.pa_head, ev_next) {
+
+ assert(ev->ev_pmcid != PMC_ID_INVALID);
+
+ if (pmc_start(ev->ev_pmcid) < 0) {
+ warn("ERROR: Cannot start pmc %d \"%s\"",
+ ev->ev_pmcid, ev->ev_name);
+ pmcstat_cleanup(a);
+ }
+ }
+
+}
+
+void
+pmcstat_print_headers(struct pmcstat_args *a)
+{
+ struct pmcstat_ev *ev;
+ int c;
+
+ (void) fprintf(a->pa_outputfile, PRINT_HEADER_PREFIX);
+
+ STAILQ_FOREACH(ev, &a->pa_head, ev_next) {
+ if (PMC_IS_SAMPLING_MODE(ev->ev_mode))
+ continue;
+
+ c = PMC_IS_SYSTEM_MODE(ev->ev_mode) ? 's' : 'p';
+
+ if (ev->ev_fieldskip != 0) {
+ (void) fprintf(a->pa_outputfile, "%*s%c/%*s ",
+ ev->ev_fieldskip, "", c,
+ ev->ev_fieldwidth - ev->ev_fieldskip - 2,
+ ev->ev_name);
+ } else
+ (void) fprintf(a->pa_outputfile, "%c/%*s ",
+ c, ev->ev_fieldwidth - 2, ev->ev_name);
+ }
+
+ (void) fflush(a->pa_outputfile);
+}
+
+void
+pmcstat_print_counters(struct pmcstat_args *a)
+{
+ int extra_width;
+ struct pmcstat_ev *ev;
+ pmc_value_t value;
+
+ extra_width = sizeof(PRINT_HEADER_PREFIX) - 1;
+
+ STAILQ_FOREACH(ev, &a->pa_head, ev_next) {
+
+ /* skip sampling mode counters */
+ if (PMC_IS_SAMPLING_MODE(ev->ev_mode))
+ continue;
+
+ if (pmc_read(ev->ev_pmcid, &value) < 0)
+ err(EX_OSERR, "ERROR: Cannot read pmc "
+ "\"%s\"", ev->ev_name);
+
+ (void) fprintf(a->pa_outputfile, "%*ju ",
+ ev->ev_fieldwidth + extra_width, (uintmax_t)
+ ev->ev_cumulative ? value : (value - ev->ev_saved));
+ if (ev->ev_cumulative == 0)
+ ev->ev_saved = value;
+ extra_width = 0;
+ }
+
+ (void) fflush(a->pa_outputfile);
+}
+
+/*
+ * Print output
+ */
+
+void
+pmcstat_print_pmcs(struct pmcstat_args *a)
+{
+ static int linecount = 0;
+
+ if (++linecount > pmcstat_displayheight) {
+ (void) fprintf(a->pa_outputfile, "\n");
+ linecount = 1;
+ }
+
+ if (linecount == 1)
+ pmcstat_print_headers(a);
+
+ (void) fprintf(a->pa_outputfile, "\n");
+ pmcstat_print_counters(a);
+
+ return;
+}
+
+/*
+ * Do process profiling
+ *
+ * If a pid was specified, attach each allocated PMC to the target
+ * process. Otherwise, fork a child and attach the PMCs to the child,
+ * and have the child exec() the target program.
+ */
+
+void
+pmcstat_setup_process(struct pmcstat_args *a)
+{
+ char token;
+ struct pmcstat_ev *ev;
+ struct kevent kev;
+
+ if (a->pa_flags & FLAG_HAS_PID) {
+
+ STAILQ_FOREACH(ev, &args.pa_head, ev_next)
+ if (pmc_attach(ev->ev_pmcid, a->pa_pid) != 0)
+ err(EX_OSERR, "ERROR: cannot attach pmc \"%s\" to "
+ "process %d", ev->ev_name, (int) a->pa_pid);
+
+ } else {
+
+ /*
+ * We need to fork a new process and startup the child
+ * using execvp(). Before doing the exec() the child
+ * process reads its pipe for a token so that the parent
+ * can finish doing its pmc_attach() calls.
+ */
+
+ if (pipe(pmcstat_pipefd) < 0)
+ err(EX_OSERR, "ERROR: cannot create pipe");
+
+ switch (a->pa_pid = fork()) {
+ case -1:
+ err(EX_OSERR, "ERROR: cannot fork");
+ /*NOTREACHED*/
+
+ case 0: /* child */
+
+ /* wait for our parent to signal us */
+ (void) close(pmcstat_pipefd[WRITEPIPEFD]);
+ if (read(pmcstat_pipefd[READPIPEFD], &token, 1) < 0)
+ err(EX_OSERR, "ERROR (child): cannot read "
+ "token");
+ (void) close(pmcstat_pipefd[READPIPEFD]);
+
+ /* exec() the program requested */
+ execvp(*args.pa_argv, args.pa_argv);
+ err(EX_OSERR, "ERROR (child): execvp failed");
+ /*NOTREACHED*/
+
+ default: /* parent */
+
+ (void) close(pmcstat_pipefd[READPIPEFD]);
+
+ /* attach all our PMCs to the child */
+ STAILQ_FOREACH(ev, &args.pa_head, ev_next)
+ if (PMC_IS_VIRTUAL_MODE(ev->ev_mode) &&
+ pmc_attach(ev->ev_pmcid, a->pa_pid) != 0)
+ err(EX_OSERR, "ERROR: cannot attach pmc "
+ "\"%s\" to process %d", ev->ev_name,
+ (int) a->pa_pid);
+
+ }
+ }
+
+ /* Ask to be notified via a kevent when the child exits */
+ EV_SET(&kev, a->pa_pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, 0);
+
+ if (kevent(pmcstat_kq, &kev, 1, NULL, 0, NULL) < 0)
+ err(EX_OSERR, "ERROR: cannot monitor process %d",
+ a->pa_pid);
+
+ return;
+}
+
+void
+pmcstat_start_process(struct pmcstat_args *a)
+{
+
+ /* nothing to do: target is already running */
+ if (a->pa_flags & FLAG_HAS_PID)
+ return;
+
+ /* write token to child to state that we are ready */
+ if (write(pmcstat_pipefd[WRITEPIPEFD], "+", 1) != 1)
+ err(EX_OSERR, "ERROR: write failed");
+
+ (void) close(pmcstat_pipefd[WRITEPIPEFD]);
+}
+
+void
+pmcstat_show_usage(void)
+{
+ errx(EX_USAGE,
+ "[options] [commandline]\n"
+ "\t Measure process and/or system performance using hardware\n"
+ "\t performance monitoring counters.\n"
+ "\t Options include:\n"
+ "\t -C\t\t toggle showing cumulative counts\n"
+ "\t -O file\t set sampling log file to \"file\"\n"
+ "\t -P spec\t allocate process-private sampling PMC\n"
+ "\t -S spec\t allocate system-wide sampling PMC\n"
+ "\t -c cpu\t\t set default cpu\n"
+ "\t -d\t\t toggle tracking descendants\n"
+ "\t -n rate\t set sampling rate\n"
+ "\t -o file\t send print output to \"file\"\n"
+ "\t -p spec\t allocate process-private counting PMC\n"
+ "\t -s spec\t allocate system-wide counting PMC\n"
+ "\t -t pid\t attach to running process with pid \"pid\"\n"
+ "\t -w secs\t set printing time interval"
+ );
+}
+
+/*
+ * Main
+ */
+
+int
+main(int argc, char **argv)
+{
+ double interval;
+ int option, npmc, ncpu;
+ int c, current_cpu, current_sampling_count;
+ int running;
+ int do_descendants, use_cumulative_counts;
+ pid_t pid;
+ char *end;
+ struct pmcstat_ev *ev;
+ struct pmc_op_getpmcinfo *ppmci;
+ struct sigaction sa;
+ struct kevent kev;
+ struct winsize ws;
+
+ current_cpu = 0;
+ current_sampling_count = DEFAULT_SAMPLE_COUNT;
+ do_descendants = 0;
+ use_cumulative_counts = 0;
+ args.pa_flags = 0;
+ args.pa_pid = (pid_t) -1;
+ args.pa_logfile = NULL;
+ args.pa_outputfile = stderr;
+ args.pa_interval = DEFAULT_WAIT_INTERVAL;
+ STAILQ_INIT(&args.pa_head);
+
+ ev = NULL;
+
+ while ((option = getopt(argc, argv, "CO:P:S:c:dn:o:p:s:t:w:")) != -1)
+ switch (option) {
+ case 'C': /* cumulative values */
+ use_cumulative_counts = !use_cumulative_counts;
+ break;
+
+ case 'c': /* CPU */
+ current_cpu = strtol(optarg, &end, 0);
+ if (*end != '\0' || current_cpu < 0)
+ errx(EX_USAGE,
+ "ERROR: Illegal CPU number \"%s\"",
+ optarg);
+
+ break;
+
+ case 'd': /* toggle descendents */
+ do_descendants = !do_descendants;
+ break;
+
+ case 'p': /* process virtual counting PMC */
+ case 's': /* system-wide counting PMC */
+ case 'P': /* process virtual sampling PMC */
+ case 'S': /* system-wide sampling PMC */
+ if ((ev = malloc(sizeof(*ev))) == NULL)
+ errx(EX_SOFTWARE, "ERROR: Out of memory");
+
+ switch (option) {
+ case 'p': ev->ev_mode = PMC_MODE_TC; break;
+ case 's': ev->ev_mode = PMC_MODE_SC; break;
+ case 'P': ev->ev_mode = PMC_MODE_TS; break;
+ case 'S': ev->ev_mode = PMC_MODE_SS; break;
+ }
+
+ if (option == 'P' || option == 'p')
+ args.pa_flags |= FLAG_USING_PROCESS_PMC;
+
+ if (option == 'P' || option == 'S')
+ args.pa_flags |= FLAG_USING_SAMPLING;
+
+ if (option == 'p' || option == 's')
+ args.pa_flags |= FLAG_USING_COUNTING;
+
+ ev->ev_spec = strdup(optarg);
+
+ if (option == 'S' || option == 'P')
+ ev->ev_count = current_sampling_count;
+ else
+ ev->ev_count = -1;
+
+ if (option == 'S' || option == 's')
+ ev->ev_cpu = current_cpu;
+ else
+ ev->ev_cpu = PMC_CPU_ANY;
+
+ ev->ev_descendants = do_descendants;
+ ev->ev_cumulative = use_cumulative_counts;
+
+ ev->ev_saved = 0LL;
+ ev->ev_pmcid = PMC_ID_INVALID;
+
+ /* extract event name */
+ c = strcspn(optarg, ", \t");
+ ev->ev_name = malloc(c + 1);
+ (void) strncpy(ev->ev_name, optarg, c);
+ *(ev->ev_name + c) = '\0';
+
+ STAILQ_INSERT_TAIL(&args.pa_head, ev, ev_next);
+
+ break;
+
+ case 'n': /* sampling count */
+ current_sampling_count = strtol(optarg, &end, 0);
+ if (*end != '\0' || current_sampling_count <= 0)
+ errx(EX_USAGE,
+ "ERROR: Illegal count value \"%s\"",
+ optarg);
+ break;
+
+ case 'o': /* outputfile */
+ if (args.pa_outputfile != NULL)
+ (void) fclose(args.pa_outputfile);
+
+ if ((args.pa_outputfile = fopen(optarg, "w")) == NULL)
+ errx(EX_OSERR, "ERROR: cannot open \"%s\" for "
+ "writing", optarg);
+
+ case 'O': /* sampling output */
+ if (args.pa_logfile != NULL)
+ (void) fclose(args.pa_logfile);
+
+ if ((args.pa_logfile = fopen(optarg, "w")) == NULL)
+ errx(EX_OSERR, "ERROR: cannot open \"%s\" for "
+ "writing", optarg);
+ break;
+
+ case 't': /* target pid */
+ pid = strtol(optarg, &end, 0);
+ if (*end != '\0' || pid <= 0)
+ errx(EX_USAGE, "ERROR: Illegal pid value "
+ "\"%s\"", optarg);
+
+ args.pa_flags |= FLAG_HAS_PID;
+ args.pa_pid = pid;
+
+ break;
+
+ case 'w': /* wait interval */
+ interval = strtod(optarg, &end);
+ if (*end != '\0' || interval <= 0)
+ errx(EX_USAGE, "ERROR: Illegal wait interval "
+ "value \"%s\"", optarg);
+ args.pa_flags |= FLAG_HAS_WAIT_INTERVAL;
+ args.pa_interval = interval;
+
+ break;
+
+ case '?':
+ default:
+ pmcstat_show_usage();
+ break;
+
+ }
+
+ args.pa_argc = (argc -= optind);
+ args.pa_argv = (argv += optind);
+
+ if (argc)
+ args.pa_flags |= FLAG_HAS_PROCESS;
+
+ /*
+ * Check invocation syntax.
+ */
+
+ if (STAILQ_EMPTY(&args.pa_head)) {
+ warnx("ERROR: At least one PMC event must be specified");
+ pmcstat_show_usage();
+ }
+
+ if (argc == 0) {
+ if (args.pa_pid == -1) {
+ if (args.pa_flags & FLAG_USING_PROCESS_PMC)
+ errx(EX_USAGE, "ERROR: the -P or -p options "
+ "require a target process");
+ } else if ((args.pa_flags & FLAG_USING_PROCESS_PMC) == 0)
+ errx(EX_USAGE,
+ "ERROR: option -t requires a process-mode pmc "
+ "specification");
+ } else if (args.pa_pid != -1)
+ errx(EX_USAGE,
+ "ERROR: option -t cannot be specified with a command "
+ "name");
+
+ if (pmc_init() < 0)
+ err(EX_UNAVAILABLE,
+ "ERROR: Initialization of the pmc(3) library failed");
+
+ if ((ncpu = pmc_ncpu()) < 0)
+ err(EX_OSERR, "ERROR: Cannot determine the number CPUs "
+ "on the system");
+
+ if ((npmc = pmc_npmc(0)) < 0) /* assume all CPUs are identical */
+ err(EX_OSERR, "ERROR: Cannot determine the number of PMCs "
+ "on CPU %d", 0);
+
+ /*
+ * Allocate PMCs.
+ */
+
+ if (pmc_pmcinfo(0, &ppmci) < 0)
+ err(EX_OSERR, "ERROR: cannot retrieve pmc information");
+
+ assert(ppmci != NULL);
+
+ STAILQ_FOREACH(ev, &args.pa_head, ev_next)
+ if (pmc_allocate(ev->ev_spec, ev->ev_mode,
+ (ev->ev_descendants ? PMC_F_DESCENDANTS : 0),
+ ev->ev_cpu, &ev->ev_pmcid) < 0)
+ err(EX_OSERR, "ERROR: Cannot allocate %s-mode pmc with "
+ "specification \"%s\"",
+ PMC_IS_SYSTEM_MODE(ev->ev_mode) ? "system" : "process",
+ ev->ev_spec);
+
+ /* compute printout widths */
+ STAILQ_FOREACH(ev, &args.pa_head, ev_next) {
+ int pmc_width;
+ int pmc_display_width;
+ int pmc_header_width;
+
+ pmc_width = ppmci->pm_pmcs[ev->ev_pmcid].pm_width;
+ pmc_header_width = strlen(ev->ev_name) + 2; /* prefix '%c|' */
+ pmc_display_width = (int) floor(pmc_width / 3.32193) + 1;
+
+ if (pmc_header_width > pmc_display_width) {
+ ev->ev_fieldskip = 0;
+ ev->ev_fieldwidth = pmc_header_width;
+ } else {
+ ev->ev_fieldskip = pmc_display_width -
+ pmc_header_width;
+ ev->ev_fieldwidth = pmc_display_width;
+ }
+ }
+
+ /* Allocate a kqueue */
+ if ((pmcstat_kq = kqueue()) < 0)
+ err(EX_OSERR, "ERROR: Cannot allocate kqueue");
+
+ /*
+ * If our output is being set to a terminal, register a handler
+ * for window size changes.
+ */
+
+ if (isatty(fileno(args.pa_outputfile))) {
+
+ if (ioctl(fileno(args.pa_outputfile), TIOCGWINSZ, &ws) < 0)
+ err(EX_OSERR, "ERROR: Cannot determine window size");
+
+ pmcstat_displayheight = ws.ws_row - 1;
+
+ EV_SET(&kev, SIGWINCH, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
+
+ if (kevent(pmcstat_kq, &kev, 1, NULL, 0, NULL) < 0)
+ err(EX_OSERR, "ERROR: Cannot register kevent for "
+ "SIGWINCH");
+ }
+
+ EV_SET(&kev, SIGINT, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
+
+ if (kevent(pmcstat_kq, &kev, 1, NULL, 0, NULL) < 0)
+ err(EX_OSERR, "ERROR: Cannot register kevent for SIGINT");
+
+ if (args.pa_flags & FLAG_USING_SAMPLING) {
+
+ /*
+ * configure log file
+ */
+
+ if (args.pa_logfile == NULL)
+ if ((args.pa_logfile =
+ fopen(DEFAULT_LOGFILE_NAME, "w")) == NULL)
+ err(EX_CANTCREAT, "ERROR: Cannot open sampling "
+ "log file \"%s\"", DEFAULT_LOGFILE_NAME);
+
+ if (pmc_configure_logfile(fileno(args.pa_logfile)) < 0)
+ err(EX_OSERR, "ERROR: Cannot configure sampling "
+ "log");
+
+ STAILQ_FOREACH(ev, &args.pa_head, ev_next)
+ if (PMC_IS_SAMPLING_MODE(ev->ev_mode) &&
+ pmc_set(ev->ev_pmcid, ev->ev_count) < 0)
+ err(EX_OSERR, "ERROR: Cannot set sampling count "
+ "for PMC \"%s\"", ev->ev_name);
+ }
+
+ /* setup a timer for any counting mode PMCs */
+ if (args.pa_flags & FLAG_USING_COUNTING) {
+ EV_SET(&kev, 0, EVFILT_TIMER, EV_ADD, 0,
+ args.pa_interval * 1000, NULL);
+
+ if (kevent(pmcstat_kq, &kev, 1, NULL, 0, NULL) < 0)
+ err(EX_OSERR, "ERROR: Cannot register kevent for "
+ "timer");
+ }
+
+ /* attach PMCs to the target process, starting it if specified */
+ if (args.pa_flags & FLAG_HAS_PROCESS)
+ pmcstat_setup_process(&args);
+
+ /* start the pmcs */
+ pmcstat_start_pmcs(&args);
+
+ /* start the (commandline) process if needed */
+ if (args.pa_flags & FLAG_HAS_PROCESS)
+ pmcstat_start_process(&args);
+
+ /* Handle SIGINT using the kqueue loop */
+ sa.sa_handler = SIG_IGN;
+ sa.sa_flags = 0;
+ (void) sigemptyset(&sa.sa_mask);
+
+ if (sigaction(SIGINT, &sa, NULL) < 0)
+ err(EX_OSERR, "ERROR: Cannot install signal handler");
+
+ /*
+ * loop till either the target process (if any) exits, or we
+ * are killed by a SIGINT.
+ */
+
+ running = 1;
+ do {
+ if ((c = kevent(pmcstat_kq, NULL, 0, &kev, 1, NULL)) <= 0) {
+ if (errno != EINTR)
+ err(EX_OSERR, "ERROR: kevent failed");
+ else
+ continue;
+ }
+
+ if (kev.flags & EV_ERROR)
+ errc(EX_OSERR, kev.data, "ERROR: kevent failed");
+
+ switch (kev.filter) {
+ case EVFILT_PROC: /* target process exited */
+ running = 0;
+ /* FALLTHROUGH */
+
+ case EVFILT_TIMER: /* print out counting PMCs */
+ pmcstat_print_pmcs(&args);
+
+ if (running == 0) /* final newline */
+ (void) fprintf(args.pa_outputfile, "\n");
+ break;
+
+ case EVFILT_SIGNAL:
+ if (kev.ident == SIGINT) {
+ /* pass the signal on to the child process */
+ if ((args.pa_flags & FLAG_HAS_PROCESS) &&
+ (args.pa_flags & FLAG_HAS_PID) == 0)
+ if (kill(args.pa_pid, SIGINT) != 0)
+ err(EX_OSERR, "cannot kill "
+ "child");
+ running = 0;
+ } else if (kev.ident == SIGWINCH) {
+ if (ioctl(fileno(args.pa_outputfile),
+ TIOCGWINSZ, &ws) < 0)
+ err(EX_OSERR, "ERROR: Cannot determine "
+ "window size");
+ pmcstat_displayheight = ws.ws_row - 1;
+ } else
+ assert(0);
+
+ break;
+ }
+
+ } while (running);
+
+ pmcstat_cleanup(&args);
+
+ return 0;
+}
OpenPOWER on IntegriCloud