summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorjkoshy <jkoshy@FreeBSD.org>2005-06-09 19:45:09 +0000
committerjkoshy <jkoshy@FreeBSD.org>2005-06-09 19:45:09 +0000
commit1d3209ab83aac3089f15e00934e922d222a4ecf0 (patch)
tree4970329c2802c6329dd4f6e781d84b27dbf8f412 /lib
parent4421a087425df7cc08a5671152d0ec7410bdb33e (diff)
downloadFreeBSD-src-1d3209ab83aac3089f15e00934e922d222a4ecf0.zip
FreeBSD-src-1d3209ab83aac3089f15e00934e922d222a4ecf0.tar.gz
MFP4:
- Implement sampling modes and logging support in hwpmc(4). - Separate MI and MD parts of hwpmc(4) and allow sharing of PMC implementations across different architectures. Add support for P4 (EMT64) style PMCs to the amd64 code. - New pmcstat(8) options: -E (exit time counts) -W (counts every context switch), -R (print log file). - pmc(3) API changes, improve our ability to keep ABI compatibility in the future. Add more 'alias' names for commonly used events. - bug fixes & documentation.
Diffstat (limited to 'lib')
-rw-r--r--lib/libpmc/Makefile14
-rw-r--r--lib/libpmc/libpmc.c1464
-rw-r--r--lib/libpmc/pmc.3104
-rw-r--r--lib/libpmc/pmc.h50
-rw-r--r--lib/libpmc/pmclog.3276
-rw-r--r--lib/libpmc/pmclog.c532
-rw-r--r--lib/libpmc/pmclog.h146
7 files changed, 1833 insertions, 753 deletions
diff --git a/lib/libpmc/Makefile b/lib/libpmc/Makefile
index 7d24d85..c2560bd 100644
--- a/lib/libpmc/Makefile
+++ b/lib/libpmc/Makefile
@@ -2,12 +2,12 @@
LIB= pmc
-SRCS= libpmc.c
-INCS= pmc.h
+SRCS= libpmc.c pmclog.c
+INCS= pmc.h pmclog.h
WARNS?= 6
-MAN= pmc.3
+MAN= pmc.3 pmclog.3
MLINKS+= \
pmc.3 pmc_allocate.3 \
@@ -19,6 +19,7 @@ MLINKS+= \
pmc.3 pmc_disable.3 \
pmc.3 pmc_enable.3 \
pmc.3 pmc_event_names_of_class.3 \
+ pmc.3 pmc_flush_logfile.3 \
pmc.3 pmc_get_driver_stats.3 \
pmc.3 pmc_init.3 \
pmc.3 pmc_name_of_capability.3 \
@@ -38,6 +39,13 @@ MLINKS+= \
pmc.3 pmc_stop.3 \
pmc.3 pmc_width.3 \
pmc.3 pmc_write.3 \
+ pmc.3 pmc_writelog.3 \
pmc.3 pmc_x86_get_msr.3
+MLINKS+= \
+ pmclog.3 pmclog_open.3 \
+ pmclog.3 pmclog_close.3 \
+ pmclog.3 pmclog_feed.3 \
+ pmclog.3 pmclog_read.3
+
.include <bsd.lib.mk>
diff --git a/lib/libpmc/libpmc.c b/lib/libpmc/libpmc.c
index 272d25a..09cc2b4 100644
--- a/lib/libpmc/libpmc.c
+++ b/lib/libpmc/libpmc.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2003,2004 Joseph Koshy
+ * Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,14 +46,17 @@ __FBSDID("$FreeBSD$");
#if defined(__i386__)
static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
-static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+#endif
+#if defined(__amd64__)
+static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
+#endif
+#if defined(__i386__)
static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
-#elif defined(__amd64__)
-static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
#endif
@@ -212,7 +215,7 @@ k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
int c, has_unitmask;
uint32_t count, unitmask;
- pmc_config->pm_amd_config = 0;
+ pmc_config->pm_md.pm_amd.pm_amd_config = 0;
pmc_config->pm_caps |= PMC_CAP_READ;
if (pe == PMC_EV_TSC_TSC) {
@@ -226,7 +229,7 @@ k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
pe == PMC_EV_K7_DC_WRITEBACKS) {
has_unitmask = 1;
- unitmask = K7_PMC_UNITMASK_MOESI;
+ unitmask = AMD_PMC_UNITMASK_MOESI;
} else
unitmask = has_unitmask = 0;
@@ -243,7 +246,8 @@ k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
return -1;
pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
- pmc_config->pm_amd_config |= K7_PMC_TO_COUNTER(count);
+ pmc_config->pm_md.pm_amd.pm_amd_config |=
+ AMD_PMC_TO_COUNTER(count);
} else if (KWMATCH(p, K7_KW_EDGE)) {
pmc_config->pm_caps |= PMC_CAP_EDGE;
@@ -261,15 +265,15 @@ k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
while ((c = tolower(*q++)) != 0)
if (c == 'm')
- unitmask |= K7_PMC_UNITMASK_M;
+ unitmask |= AMD_PMC_UNITMASK_M;
else if (c == 'o')
- unitmask |= K7_PMC_UNITMASK_O;
+ unitmask |= AMD_PMC_UNITMASK_O;
else if (c == 'e')
- unitmask |= K7_PMC_UNITMASK_E;
+ unitmask |= AMD_PMC_UNITMASK_E;
else if (c == 's')
- unitmask |= K7_PMC_UNITMASK_S;
+ unitmask |= AMD_PMC_UNITMASK_S;
else if (c == 'i')
- unitmask |= K7_PMC_UNITMASK_I;
+ unitmask |= AMD_PMC_UNITMASK_I;
else if (c == '+')
continue;
else
@@ -286,14 +290,387 @@ k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
if (has_unitmask) {
pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
- pmc_config->pm_amd_config |=
- K7_PMC_TO_UNITMASK(unitmask);
+ pmc_config->pm_md.pm_amd.pm_amd_config |=
+ AMD_PMC_TO_UNITMASK(unitmask);
}
return 0;
}
+#endif
+
+#if defined(__amd64__)
+
+/*
+ * AMD K8 PMCs.
+ *
+ * These are very similar to AMD K7 PMCs, but support more kinds of
+ * events.
+ */
+
+static struct pmc_event_alias k8_aliases[] = {
+ EV_ALIAS("branches", "k8-fr-retired-taken-branches"),
+ EV_ALIAS("branch-mispredicts",
+ "k8-fr-retired-taken-branches-mispredicted"),
+ EV_ALIAS("cycles", "tsc"),
+ EV_ALIAS("dc-misses", "k8-dc-miss"),
+ EV_ALIAS("ic-misses", "k8-ic-miss"),
+ EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"),
+ EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"),
+ EV_ALIAS(NULL, NULL)
+};
+
+#define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
+
+/*
+ * Parsing tables
+ */
+
+/* fp dispatched fpu ops */
+static const struct pmc_masks k8_mask_fdfo[] = {
+ __K8MASK(add-pipe-excluding-junk-ops, 0),
+ __K8MASK(multiply-pipe-excluding-junk-ops, 1),
+ __K8MASK(store-pipe-excluding-junk-ops, 2),
+ __K8MASK(add-pipe-junk-ops, 3),
+ __K8MASK(multiply-pipe-junk-ops, 4),
+ __K8MASK(store-pipe-junk-ops, 5),
+ NULLMASK
+};
+
+/* ls segment register loads */
+static const struct pmc_masks k8_mask_lsrl[] = {
+ __K8MASK(es, 0),
+ __K8MASK(cs, 1),
+ __K8MASK(ss, 2),
+ __K8MASK(ds, 3),
+ __K8MASK(fs, 4),
+ __K8MASK(gs, 5),
+ __K8MASK(hs, 6),
+ NULLMASK
+};
+
+/* ls locked operation */
+static const struct pmc_masks k8_mask_llo[] = {
+ __K8MASK(locked-instructions, 0),
+ __K8MASK(cycles-in-request, 1),
+ __K8MASK(cycles-to-complete, 2),
+ NULLMASK
+};
+
+/* dc refill from {l2,system} and dc copyback */
+static const struct pmc_masks k8_mask_dc[] = {
+ __K8MASK(invalid, 0),
+ __K8MASK(shared, 1),
+ __K8MASK(exclusive, 2),
+ __K8MASK(owner, 3),
+ __K8MASK(modified, 4),
+ NULLMASK
+};
+
+/* dc one bit ecc error */
+static const struct pmc_masks k8_mask_dobee[] = {
+ __K8MASK(scrubber, 0),
+ __K8MASK(piggyback, 1),
+ NULLMASK
+};
+
+/* dc dispatched prefetch instructions */
+static const struct pmc_masks k8_mask_ddpi[] = {
+ __K8MASK(load, 0),
+ __K8MASK(store, 1),
+ __K8MASK(nta, 2),
+ NULLMASK
+};
+
+/* dc dcache accesses by locks */
+static const struct pmc_masks k8_mask_dabl[] = {
+ __K8MASK(accesses, 0),
+ __K8MASK(misses, 1),
+ NULLMASK
+};
+
+/* bu internal l2 request */
+static const struct pmc_masks k8_mask_bilr[] = {
+ __K8MASK(ic-fill, 0),
+ __K8MASK(dc-fill, 1),
+ __K8MASK(tlb-reload, 2),
+ __K8MASK(tag-snoop, 3),
+ __K8MASK(cancelled, 4),
+ NULLMASK
+};
+
+/* bu fill request l2 miss */
+static const struct pmc_masks k8_mask_bfrlm[] = {
+ __K8MASK(ic-fill, 0),
+ __K8MASK(dc-fill, 1),
+ __K8MASK(tlb-reload, 2),
+ NULLMASK
+};
+
+/* bu fill into l2 */
+static const struct pmc_masks k8_mask_bfil[] = {
+ __K8MASK(dirty-l2-victim, 0),
+ __K8MASK(victim-from-l2, 1),
+ NULLMASK
+};
+
+/* fr retired fpu instructions */
+static const struct pmc_masks k8_mask_frfi[] = {
+ __K8MASK(x87, 0),
+ __K8MASK(mmx-3dnow, 1),
+ __K8MASK(packed-sse-sse2, 2),
+ __K8MASK(scalar-sse-sse2, 3),
+ NULLMASK
+};
+
+/* fr retired fastpath double op instructions */
+static const struct pmc_masks k8_mask_frfdoi[] = {
+ __K8MASK(low-op-pos-0, 0),
+ __K8MASK(low-op-pos-1, 1),
+ __K8MASK(low-op-pos-2, 2),
+ NULLMASK
+};
+
+/* fr fpu exceptions */
+static const struct pmc_masks k8_mask_ffe[] = {
+ __K8MASK(x87-reclass-microfaults, 0),
+ __K8MASK(sse-retype-microfaults, 1),
+ __K8MASK(sse-reclass-microfaults, 2),
+ __K8MASK(sse-and-x87-microtraps, 3),
+ NULLMASK
+};
+
+/* nb memory controller page access event */
+static const struct pmc_masks k8_mask_nmcpae[] = {
+ __K8MASK(page-hit, 0),
+ __K8MASK(page-miss, 1),
+ __K8MASK(page-conflict, 2),
+ NULLMASK
+};
+
+/* nb memory controller turnaround */
+static const struct pmc_masks k8_mask_nmct[] = {
+ __K8MASK(dimm-turnaround, 0),
+ __K8MASK(read-to-write-turnaround, 1),
+ __K8MASK(write-to-read-turnaround, 2),
+ NULLMASK
+};
+
+/* nb memory controller bypass saturation */
+static const struct pmc_masks k8_mask_nmcbs[] = {
+ __K8MASK(memory-controller-hi-pri-bypass, 0),
+ __K8MASK(memory-controller-lo-pri-bypass, 1),
+ __K8MASK(dram-controller-interface-bypass, 2),
+ __K8MASK(dram-controller-queue-bypass, 3),
+ NULLMASK
+};
+
+/* nb sized commands */
+static const struct pmc_masks k8_mask_nsc[] = {
+ __K8MASK(nonpostwrszbyte, 0),
+ __K8MASK(nonpostwrszdword, 1),
+ __K8MASK(postwrszbyte, 2),
+ __K8MASK(postwrszdword, 3),
+ __K8MASK(rdszbyte, 4),
+ __K8MASK(rdszdword, 5),
+ __K8MASK(rdmodwr, 6),
+ NULLMASK
+};
+
+/* nb probe result */
+static const struct pmc_masks k8_mask_npr[] = {
+ __K8MASK(probe-miss, 0),
+ __K8MASK(probe-hit, 1),
+ __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
+ __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
+ NULLMASK
+};
+
+/* nb hypertransport bus bandwidth */
+static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
+ __K8MASK(command, 0),
+ __K8MASK(data, 1),
+ __K8MASK(buffer-release, 2),
+ __K8MASK(nop, 3),
+ NULLMASK
+};
+
+#undef __K8MASK
+
+#define K8_KW_COUNT "count"
+#define K8_KW_EDGE "edge"
+#define K8_KW_INV "inv"
+#define K8_KW_MASK "mask"
+#define K8_KW_OS "os"
+#define K8_KW_USR "usr"
+
+static int
+k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+ char *e, *p, *q;
+ int n;
+ uint32_t count, evmask;
+ const struct pmc_masks *pm, *pmask;
+
+ pmc_config->pm_caps |= PMC_CAP_READ;
+ pmc_config->pm_md.pm_amd.pm_amd_config = 0;
+
+ if (pe == PMC_EV_TSC_TSC) {
+ /* TSC events must be unqualified. */
+ if (ctrspec && *ctrspec != '\0')
+ return -1;
+ return 0;
+ }
+
+ pmask = NULL;
+ evmask = 0;
+
+#define __K8SETMASK(M) pmask = k8_mask_##M
+
+ /* setup parsing tables */
+ switch (pe) {
+ case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
+ __K8SETMASK(fdfo);
+ break;
+ case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
+ __K8SETMASK(lsrl);
+ break;
+ case PMC_EV_K8_LS_LOCKED_OPERATION:
+ __K8SETMASK(llo);
+ break;
+ case PMC_EV_K8_DC_REFILL_FROM_L2:
+ case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
+ case PMC_EV_K8_DC_COPYBACK:
+ __K8SETMASK(dc);
+ break;
+ case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
+ __K8SETMASK(dobee);
+ break;
+ case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
+ __K8SETMASK(ddpi);
+ break;
+ case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
+ __K8SETMASK(dabl);
+ break;
+ case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
+ __K8SETMASK(bilr);
+ break;
+ case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
+ __K8SETMASK(bfrlm);
+ break;
+ case PMC_EV_K8_BU_FILL_INTO_L2:
+ __K8SETMASK(bfil);
+ break;
+ case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
+ __K8SETMASK(frfi);
+ break;
+ case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
+ __K8SETMASK(frfdoi);
+ break;
+ case PMC_EV_K8_FR_FPU_EXCEPTIONS:
+ __K8SETMASK(ffe);
+ break;
+ case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
+ __K8SETMASK(nmcpae);
+ break;
+ case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
+ __K8SETMASK(nmct);
+ break;
+ case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
+ __K8SETMASK(nmcbs);
+ break;
+ case PMC_EV_K8_NB_SIZED_COMMANDS:
+ __K8SETMASK(nsc);
+ break;
+ case PMC_EV_K8_NB_PROBE_RESULT:
+ __K8SETMASK(npr);
+ break;
+ case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
+ case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
+ case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
+ __K8SETMASK(nhbb);
+ break;
+
+ default:
+ break; /* no options defined */
+ }
+
+ pmc_config->pm_caps |= PMC_CAP_WRITE;
+
+ while ((p = strsep(&ctrspec, ",")) != NULL) {
+ if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return -1;
+
+ count = strtol(q, &e, 0);
+ if (e == q || *e != '\0')
+ return -1;
+
+ pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
+ pmc_config->pm_md.pm_amd.pm_amd_config |=
+ AMD_PMC_TO_COUNTER(count);
+
+ } else if (KWMATCH(p, K8_KW_EDGE)) {
+ pmc_config->pm_caps |= PMC_CAP_EDGE;
+ } else if (KWMATCH(p, K8_KW_INV)) {
+ pmc_config->pm_caps |= PMC_CAP_INVERT;
+ } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
+ if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
+ return -1;
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ } else if (KWMATCH(p, K8_KW_OS)) {
+ pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+ } else if (KWMATCH(p, K8_KW_USR)) {
+ pmc_config->pm_caps |= PMC_CAP_USER;
+ } else
+ return -1;
+ }
+
+ /* other post processing */
+
+ switch (pe) {
+ case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
+ case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
+ case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
+ case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
+ case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
+ case PMC_EV_K8_FR_FPU_EXCEPTIONS:
+ /* XXX only available in rev B and later */
+ break;
+ case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
+ /* XXX only available in rev C and later */
+ break;
+ case PMC_EV_K8_LS_LOCKED_OPERATION:
+ /* XXX CPU Rev A,B evmask is to be zero */
+ if (evmask & (evmask - 1)) /* > 1 bit set */
+ return -1;
+ if (evmask == 0) {
+ evmask = 0x01; /* Rev C and later: #instrs */
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ }
+ break;
+ default:
+ if (evmask == 0 && pmask != NULL) {
+ for (pm = pmask; pm->pm_name; pm++)
+ evmask |= pm->pm_value;
+ pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
+ }
+ }
+
+ if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
+ pmc_config->pm_md.pm_amd.pm_amd_config =
+ AMD_PMC_TO_UNITMASK(evmask);
+
+ return 0;
+}
+
+#endif
+
+#if defined(__i386__)
+
/*
* Intel P4 PMCs
*/
@@ -629,7 +1006,8 @@ p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
const struct pmc_masks *pm, *pmask;
pmc_config->pm_caps |= PMC_CAP_READ;
- pmc_config->pm_p4_cccrconfig = pmc_config->pm_p4_escrconfig = 0;
+ pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
+ pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
if (pe == PMC_EV_TSC_TSC) {
/* TSC must not be further qualified */
@@ -838,7 +1216,7 @@ p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
return -1;
pmc_config->pm_caps |= PMC_CAP_TAGGING;
- pmc_config->pm_p4_escrconfig |=
+ pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
P4_ESCR_TO_TAG_VALUE(count);
} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
q = strchr(p, '=');
@@ -850,8 +1228,10 @@ p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
return -1;
pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
- pmc_config->pm_p4_cccrconfig &= ~P4_CCCR_THRESHOLD_MASK;
- pmc_config->pm_p4_cccrconfig |= P4_CCCR_TO_THRESHOLD(count);
+ pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
+ ~P4_CCCR_THRESHOLD_MASK;
+ pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
+ P4_CCCR_TO_THRESHOLD(count);
} else if (KWMATCH(p, P4_KW_USR))
pmc_config->pm_caps |= PMC_CAP_USER;
else
@@ -865,7 +1245,7 @@ p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
pmc_config->pm_caps |= PMC_CAP_EDGE;
/* fill in thread activity mask */
- pmc_config->pm_p4_cccrconfig |=
+ pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
if (evmask)
@@ -896,12 +1276,29 @@ p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
}
}
- pmc_config->pm_p4_escrconfig = P4_ESCR_TO_EVENT_MASK(evmask);
+ pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
+ P4_ESCR_TO_EVENT_MASK(evmask);
return 0;
}
/*
+ * Pentium style PMCs
+ */
+
+static struct pmc_event_alias p5_aliases[] = {
+ EV_ALIAS("cycles", "tsc"),
+ EV_ALIAS(NULL, NULL)
+};
+
+static int
+p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+ return -1 || pe || ctrspec || pmc_config; /* shut up gcc */
+}
+
+/*
* Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III,
* and Pentium M CPUs.
*/
@@ -1034,7 +1431,7 @@ p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
const struct pmc_masks *pm, *pmask;
pmc_config->pm_caps |= PMC_CAP_READ;
- pmc_config->pm_p6_config = 0;
+ pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
if (pe == PMC_EV_TSC_TSC) {
if (ctrspec && *ctrspec != '\0')
@@ -1113,7 +1510,8 @@ p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
if (e == q || *e != '\0')
return -1;
pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
- pmc_config->pm_p6_config |= P6_EVSEL_TO_CMASK(count);
+ pmc_config->pm_md.pm_ppro.pm_ppro_config |=
+ P6_EVSEL_TO_CMASK(count);
} else if (KWMATCH(p, P6_KW_EDGE)) {
pmc_config->pm_caps |= PMC_CAP_EDGE;
} else if (KWMATCH(p, P6_KW_INV)) {
@@ -1222,396 +1620,274 @@ p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
}
if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
- pmc_config->pm_p6_config |= P6_EVSEL_TO_UMASK(evmask);
+ pmc_config->pm_md.pm_ppro.pm_ppro_config |=
+ P6_EVSEL_TO_UMASK(evmask);
return 0;
}
+#endif
+
/*
- * Pentium style PMCs
+ * API entry points
*/
-static struct pmc_event_alias p5_aliases[] = {
- EV_ALIAS("cycles", "tsc"),
- EV_ALIAS(NULL, NULL)
-};
-static int
-p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
- struct pmc_op_pmcallocate *pmc_config)
+int
+pmc_allocate(const char *ctrspec, enum pmc_mode mode,
+ uint32_t flags, int cpu, pmc_id_t *pmcid)
{
- return -1 || pe || ctrspec || pmc_config; /* shut up gcc */
-}
+ int retval;
+ enum pmc_event pe;
+ char *r, *spec_copy;
+ const char *ctrname;
+ const struct pmc_event_alias *p;
+ struct pmc_op_pmcallocate pmc_config;
-#elif defined(__amd64__)
+ spec_copy = NULL;
+ retval = -1;
-/*
- * AMD K8 PMCs.
- *
- * These are very similar to AMD K7 PMCs, but support more kinds of
- * events.
- */
+ if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
+ mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
+ errno = EINVAL;
+ goto out;
+ }
-static struct pmc_event_alias k8_aliases[] = {
- EV_ALIAS("branches", "k8-fr-retired-taken-branches"),
- EV_ALIAS("branch-mispredicts",
- "k8-fr-retired-taken-branches-mispredicted"),
- EV_ALIAS("cycles", "tsc"),
- EV_ALIAS("dc-misses", "k8-dc-miss"),
- EV_ALIAS("ic-misses", "k8-ic-miss"),
- EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"),
- EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"),
- EV_ALIAS(NULL, NULL)
-};
+ /* replace an event alias with the canonical event specifier */
+ if (pmc_mdep_event_aliases)
+ for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
+ if (!strcmp(ctrspec, p->pm_alias)) {
+ spec_copy = strdup(p->pm_spec);
+ break;
+ }
-#define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
+ if (spec_copy == NULL)
+ spec_copy = strdup(ctrspec);
-/*
- * Parsing tables
- */
+ r = spec_copy;
+ ctrname = strsep(&r, ",");
-/* fp dispatched fpu ops */
-static const struct pmc_masks k8_mask_fdfo[] = {
- __K8MASK(add-pipe-excluding-junk-ops, 0),
- __K8MASK(multiply-pipe-excluding-junk-ops, 1),
- __K8MASK(store-pipe-excluding-junk-ops, 2),
- __K8MASK(add-pipe-junk-ops, 3),
- __K8MASK(multiply-pipe-junk-ops, 4),
- __K8MASK(store-pipe-junk-ops, 5),
- NULLMASK
-};
+ /* look for the given counter name */
-/* ls segment register loads */
-static const struct pmc_masks k8_mask_lsrl[] = {
- __K8MASK(es, 0),
- __K8MASK(cs, 1),
- __K8MASK(ss, 2),
- __K8MASK(ds, 3),
- __K8MASK(fs, 4),
- __K8MASK(gs, 5),
- __K8MASK(hs, 6),
- NULLMASK
-};
+ for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
+ if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
+ break;
-/* ls locked operation */
-static const struct pmc_masks k8_mask_llo[] = {
- __K8MASK(locked-instructions, 0),
- __K8MASK(cycles-in-request, 1),
- __K8MASK(cycles-to-complete, 2),
- NULLMASK
-};
+ if (pe > PMC_EVENT_LAST) {
+ errno = EINVAL;
+ goto out;
+ }
-/* dc refill from {l2,system} and dc copyback */
-static const struct pmc_masks k8_mask_dc[] = {
- __K8MASK(invalid, 0),
- __K8MASK(shared, 1),
- __K8MASK(exclusive, 2),
- __K8MASK(owner, 3),
- __K8MASK(modified, 4),
- NULLMASK
-};
+ bzero(&pmc_config, sizeof(pmc_config));
+ pmc_config.pm_ev = pmc_event_table[pe].pm_ev_code;
+ pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
+ pmc_config.pm_cpu = cpu;
+ pmc_config.pm_mode = mode;
+ pmc_config.pm_flags = flags;
-/* dc one bit ecc error */
-static const struct pmc_masks k8_mask_dobee[] = {
- __K8MASK(scrubber, 0),
- __K8MASK(piggyback, 1),
- NULLMASK
-};
+ if (PMC_IS_SAMPLING_MODE(mode))
+ pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
-/* dc dispatched prefetch instructions */
-static const struct pmc_masks k8_mask_ddpi[] = {
- __K8MASK(load, 0),
- __K8MASK(store, 1),
- __K8MASK(nta, 2),
- NULLMASK
-};
+ if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
+ errno = EINVAL;
+ goto out;
+ }
-/* dc dcache accesses by locks */
-static const struct pmc_masks k8_mask_dabl[] = {
- __K8MASK(accesses, 0),
- __K8MASK(misses, 1),
- NULLMASK
-};
+ if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
+ goto out;
-/* bu internal l2 request */
-static const struct pmc_masks k8_mask_bilr[] = {
- __K8MASK(ic-fill, 0),
- __K8MASK(dc-fill, 1),
- __K8MASK(tlb-reload, 2),
- __K8MASK(tag-snoop, 3),
- __K8MASK(cancelled, 4),
- NULLMASK
-};
+ *pmcid = pmc_config.pm_pmcid;
-/* bu fill request l2 miss */
-static const struct pmc_masks k8_mask_bfrlm[] = {
- __K8MASK(ic-fill, 0),
- __K8MASK(dc-fill, 1),
- __K8MASK(tlb-reload, 2),
- NULLMASK
-};
+ retval = 0;
-/* bu fill into l2 */
-static const struct pmc_masks k8_mask_bfil[] = {
- __K8MASK(dirty-l2-victim, 0),
- __K8MASK(victim-from-l2, 1),
- NULLMASK
-};
+ out:
+ if (spec_copy)
+ free(spec_copy);
-/* fr retired fpu instructions */
-static const struct pmc_masks k8_mask_frfi[] = {
- __K8MASK(x87, 0),
- __K8MASK(mmx-3dnow, 1),
- __K8MASK(packed-sse-sse2, 2),
- __K8MASK(scalar-sse-sse2, 3),
- NULLMASK
-};
+ return retval;
+}
-/* fr retired fastpath double op instructions */
-static const struct pmc_masks k8_mask_frfdoi[] = {
- __K8MASK(low-op-pos-0, 0),
- __K8MASK(low-op-pos-1, 1),
- __K8MASK(low-op-pos-2, 2),
- NULLMASK
-};
+int
+pmc_attach(pmc_id_t pmc, pid_t pid)
+{
+ struct pmc_op_pmcattach pmc_attach_args;
-/* fr fpu exceptions */
-static const struct pmc_masks k8_mask_ffe[] = {
- __K8MASK(x87-reclass-microfaults, 0),
- __K8MASK(sse-retype-microfaults, 1),
- __K8MASK(sse-reclass-microfaults, 2),
- __K8MASK(sse-and-x87-microtraps, 3),
- NULLMASK
-};
+ pmc_attach_args.pm_pmc = pmc;
+ pmc_attach_args.pm_pid = pid;
-/* nb memory controller page access event */
-static const struct pmc_masks k8_mask_nmcpae[] = {
- __K8MASK(page-hit, 0),
- __K8MASK(page-miss, 1),
- __K8MASK(page-conflict, 2),
- NULLMASK
-};
+ return PMC_CALL(PMCATTACH, &pmc_attach_args);
+}
-/* nb memory controller turnaround */
-static const struct pmc_masks k8_mask_nmct[] = {
- __K8MASK(dimm-turnaround, 0),
- __K8MASK(read-to-write-turnaround, 1),
- __K8MASK(write-to-read-turnaround, 2),
- NULLMASK
-};
+int
+pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
+{
+ unsigned int i;
+ enum pmc_class cl;
-/* nb memory controller bypass saturation */
-static const struct pmc_masks k8_mask_nmcbs[] = {
- __K8MASK(memory-controller-hi-pri-bypass, 0),
- __K8MASK(memory-controller-lo-pri-bypass, 1),
- __K8MASK(dram-controller-interface-bypass, 2),
- __K8MASK(dram-controller-queue-bypass, 3),
- NULLMASK
-};
+ cl = PMC_ID_TO_CLASS(pmcid);
+ for (i = 0; i < cpu_info.pm_nclass; i++)
+ if (cpu_info.pm_classes[i].pm_class == cl) {
+ *caps = cpu_info.pm_classes[i].pm_caps;
+ return 0;
+ }
+ return EINVAL;
+}
-/* nb sized commands */
-static const struct pmc_masks k8_mask_nsc[] = {
- __K8MASK(nonpostwrszbyte, 0),
- __K8MASK(nonpostwrszdword, 1),
- __K8MASK(postwrszbyte, 2),
- __K8MASK(postwrszdword, 3),
- __K8MASK(rdszbyte, 4),
- __K8MASK(rdszdword, 5),
- __K8MASK(rdmodwr, 6),
- NULLMASK
-};
+int
+pmc_configure_logfile(int fd)
+{
+ struct pmc_op_configurelog cla;
-/* nb probe result */
-static const struct pmc_masks k8_mask_npr[] = {
- __K8MASK(probe-miss, 0),
- __K8MASK(probe-hit, 1),
- __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
- __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
- NULLMASK
-};
+ cla.pm_logfd = fd;
+ if (PMC_CALL(CONFIGURELOG, &cla) < 0)
+ return -1;
+ return 0;
+}
-/* nb hypertransport bus bandwidth */
-static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
- __K8MASK(command, 0),
- __K8MASK(data, 1),
- __K8MASK(buffer-release, 2),
- __K8MASK(nop, 3),
- NULLMASK
-};
+int
+pmc_cpuinfo(const struct pmc_cpuinfo **pci)
+{
+ if (pmc_syscall == -1) {
+ errno = ENXIO;
+ return -1;
+ }
-#undef __K8MASK
+ /* kernel<->library, library<->userland interfaces are identical */
+ *pci = (struct pmc_cpuinfo *) &cpu_info;
+ return 0;
+}
-#define K8_KW_COUNT "count"
-#define K8_KW_EDGE "edge"
-#define K8_KW_INV "inv"
-#define K8_KW_MASK "mask"
-#define K8_KW_OS "os"
-#define K8_KW_USR "usr"
+int
+pmc_detach(pmc_id_t pmc, pid_t pid)
+{
+ struct pmc_op_pmcattach pmc_detach_args;
-static int
-k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
- struct pmc_op_pmcallocate *pmc_config)
+ pmc_detach_args.pm_pmc = pmc;
+ pmc_detach_args.pm_pid = pid;
+
+ return PMC_CALL(PMCDETACH, &pmc_detach_args);
+}
+
+int
+pmc_disable(int cpu, int pmc)
{
- char *e, *p, *q;
- int n;
- uint32_t count, evmask;
- const struct pmc_masks *pm, *pmask;
+ struct pmc_op_pmcadmin ssa;
- pmc_config->pm_caps |= PMC_CAP_READ;
- pmc_config->pm_amd_config = 0;
+ ssa.pm_cpu = cpu;
+ ssa.pm_pmc = pmc;
+ ssa.pm_state = PMC_STATE_DISABLED;
+ return PMC_CALL(PMCADMIN, &ssa);
+}
- if (pe == PMC_EV_TSC_TSC) {
- /* TSC events must be unqualified. */
- if (ctrspec && *ctrspec != '\0')
- return -1;
- return 0;
- }
+int
+pmc_enable(int cpu, int pmc)
+{
+ struct pmc_op_pmcadmin ssa;
- pmask = NULL;
- evmask = 0;
+ ssa.pm_cpu = cpu;
+ ssa.pm_pmc = pmc;
+ ssa.pm_state = PMC_STATE_FREE;
+ return PMC_CALL(PMCADMIN, &ssa);
+}
-#define __K8SETMASK(M) pmask = k8_mask_##M
+/*
+ * Return a list of events known to a given PMC class. 'cl' is the
+ * PMC class identifier, 'eventnames' is the returned list of 'const
+ * char *' pointers pointing to the names of the events. 'nevents' is
+ * the number of event name pointers returned.
+ *
+ * The space for 'eventnames' is allocated using malloc(3). The caller
+ * is responsible for freeing this space when done.
+ */
- /* setup parsing tables */
- switch (pe) {
- case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
- __K8SETMASK(fdfo);
- break;
- case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
- __K8SETMASK(lsrl);
- break;
- case PMC_EV_K8_LS_LOCKED_OPERATION:
- __K8SETMASK(llo);
- break;
- case PMC_EV_K8_DC_REFILL_FROM_L2:
- case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
- case PMC_EV_K8_DC_COPYBACK:
- __K8SETMASK(dc);
- break;
- case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
- __K8SETMASK(dobee);
- break;
- case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
- __K8SETMASK(ddpi);
- break;
- case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
- __K8SETMASK(dabl);
- break;
- case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
- __K8SETMASK(bilr);
- break;
- case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
- __K8SETMASK(bfrlm);
- break;
- case PMC_EV_K8_BU_FILL_INTO_L2:
- __K8SETMASK(bfil);
- break;
- case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
- __K8SETMASK(frfi);
- break;
- case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
- __K8SETMASK(frfdoi);
- break;
- case PMC_EV_K8_FR_FPU_EXCEPTIONS:
- __K8SETMASK(ffe);
- break;
- case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
- __K8SETMASK(nmcpae);
+int
+pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
+ int *nevents)
+{
+ int count;
+ const char **names;
+ const struct pmc_event_descr *ev;
+
+ switch (cl)
+ {
+ case PMC_CLASS_TSC:
+ ev = &pmc_event_table[PMC_EV_TSC_TSC];
+ count = 1;
break;
- case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
- __K8SETMASK(nmct);
+ case PMC_CLASS_K7:
+ ev = &pmc_event_table[PMC_EV_K7_FIRST];
+ count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
break;
- case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
- __K8SETMASK(nmcbs);
+ case PMC_CLASS_K8:
+ ev = &pmc_event_table[PMC_EV_K8_FIRST];
+ count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
break;
- case PMC_EV_K8_NB_SIZED_COMMANDS:
- __K8SETMASK(nsc);
+ case PMC_CLASS_P5:
+ ev = &pmc_event_table[PMC_EV_P5_FIRST];
+ count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
break;
- case PMC_EV_K8_NB_PROBE_RESULT:
- __K8SETMASK(npr);
+ case PMC_CLASS_P6:
+ ev = &pmc_event_table[PMC_EV_P6_FIRST];
+ count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
break;
- case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
- case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
- case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
- __K8SETMASK(nhbb);
+ case PMC_CLASS_P4:
+ ev = &pmc_event_table[PMC_EV_P4_FIRST];
+ count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
break;
-
default:
- break; /* no options defined */
+ errno = EINVAL;
+ return -1;
}
- pmc_config->pm_caps |= PMC_CAP_WRITE;
-
- while ((p = strsep(&ctrspec, ",")) != NULL) {
- if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
- q = strchr(p, '=');
- if (*++q == '\0') /* skip '=' */
- return -1;
+ if ((names = malloc(count * sizeof(const char *))) == NULL)
+ return -1;
- count = strtol(q, &e, 0);
- if (e == q || *e != '\0')
- return -1;
+ *eventnames = names;
+ *nevents = count;
- pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
- pmc_config->pm_amd_config |= K8_PMC_TO_COUNTER(count);
+ for (;count--; ev++, names++)
+ *names = ev->pm_ev_name;
+ return 0;
+}
- } else if (KWMATCH(p, K8_KW_EDGE)) {
- pmc_config->pm_caps |= PMC_CAP_EDGE;
- } else if (KWMATCH(p, K8_KW_INV)) {
- pmc_config->pm_caps |= PMC_CAP_INVERT;
- } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
- if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
- return -1;
- pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
- } else if (KWMATCH(p, K8_KW_OS)) {
- pmc_config->pm_caps |= PMC_CAP_SYSTEM;
- } else if (KWMATCH(p, K8_KW_USR)) {
- pmc_config->pm_caps |= PMC_CAP_USER;
- } else
- return -1;
- }
+int
+pmc_flush_logfile(void)
+{
+ return PMC_CALL(FLUSHLOG,0);
+}
- /* other post processing */
+int
+pmc_get_driver_stats(struct pmc_driverstats *ds)
+{
+ struct pmc_op_getdriverstats gms;
- switch (pe) {
- case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
- case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
- case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
- case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
- case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
- case PMC_EV_K8_FR_FPU_EXCEPTIONS:
- /* XXX only available in rev B and later */
- break;
- case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
- /* XXX only available in rev C and later */
- break;
- case PMC_EV_K8_LS_LOCKED_OPERATION:
- /* XXX CPU Rev A,B evmask is to be zero */
- if (evmask & (evmask - 1)) /* > 1 bit set */
- return -1;
- if (evmask == 0) {
- evmask = 0x01; /* Rev C and later: #instrs */
- pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
- }
- break;
- default:
- if (evmask == 0 && pmask != NULL) {
- for (pm = pmask; pm->pm_name; pm++)
- evmask |= pm->pm_value;
- pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
- }
- }
+ if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
+ return -1;
- if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
- pmc_config->pm_amd_config = K8_PMC_TO_UNITMASK(evmask);
+ /* copy out fields in the current userland<->library interface */
+ ds->pm_intr_ignored = gms.pm_intr_ignored;
+ ds->pm_intr_processed = gms.pm_intr_processed;
+ ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
+ ds->pm_syscalls = gms.pm_syscalls;
+ ds->pm_syscall_errors = gms.pm_syscall_errors;
+ ds->pm_buffer_requests = gms.pm_buffer_requests;
+ ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
+ ds->pm_log_sweeps = gms.pm_log_sweeps;
return 0;
}
-#endif
-/*
- * API entry points
- */
+int
+pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
+{
+ struct pmc_op_getmsr gm;
+
+ gm.pm_pmcid = pmc;
+ if (PMC_CALL(PMCGETMSR, &gm) < 0)
+ return -1;
+ *msr = gm.pm_msr;
+ return 0;
+}
int
pmc_init(void)
@@ -1633,12 +1909,13 @@ pmc_init(void)
pmc_syscall = pmc_modstat.data.intval;
- /* check ABI version against compiled-in version */
+ /* check the kernel module's ABI against our compiled-in version */
+ abi_version = PMC_VERSION;
if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
return (pmc_syscall = -1);
- /* ignore patch numbers for the comparision */
- if ((abi_version & 0xFFFF0000) != (PMC_VERSION & 0xFFFF0000)) {
+ /* ignore patch & minor numbers for the comparision */
+ if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
errno = EPROGMISMATCH;
return (pmc_syscall = -1);
}
@@ -1688,128 +1965,145 @@ pmc_init(void)
return 0;
}
-int
-pmc_allocate(const char *ctrspec, enum pmc_mode mode,
- uint32_t flags, int cpu, pmc_id_t *pmcid)
+const char *
+pmc_name_of_capability(enum pmc_caps cap)
{
- int retval;
- enum pmc_event pe;
- char *r, *spec_copy;
- const char *ctrname;
- const struct pmc_event_alias *p;
- struct pmc_op_pmcallocate pmc_config;
+ int i;
- spec_copy = NULL;
- retval = -1;
+ /*
+ * 'cap' should have a single bit set and should be in
+ * range.
+ */
- if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
- mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
+ if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
+ cap > PMC_CAP_LAST) {
errno = EINVAL;
- goto out;
+ return NULL;
}
- /* replace an event alias with the canonical event specifier */
- if (pmc_mdep_event_aliases)
- for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
- if (!strcmp(ctrspec, p->pm_alias)) {
- spec_copy = strdup(p->pm_spec);
- break;
- }
-
- if (spec_copy == NULL)
- spec_copy = strdup(ctrspec);
+ i = ffs(cap);
- r = spec_copy;
- ctrname = strsep(&r, ",");
+ return pmc_capability_names[i - 1];
+}
- /* look for the given counter name */
+const char *
+pmc_name_of_class(enum pmc_class pc)
+{
+ if ((int) pc >= PMC_CLASS_FIRST &&
+ pc <= PMC_CLASS_LAST)
+ return pmc_class_names[pc];
- for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
- if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
- break;
+ errno = EINVAL;
+ return NULL;
+}
- if (pe > PMC_EVENT_LAST) {
- errno = EINVAL;
- goto out;
- }
+const char *
+pmc_name_of_cputype(enum pmc_cputype cp)
+{
+ if ((int) cp >= PMC_CPU_FIRST &&
+ cp <= PMC_CPU_LAST)
+ return pmc_cputype_names[cp];
+ errno = EINVAL;
+ return NULL;
+}
- bzero(&pmc_config, sizeof(pmc_config));
- pmc_config.pm_ev = pmc_event_table[pe].pm_ev_code;
- pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
- pmc_config.pm_cpu = cpu;
- pmc_config.pm_mode = mode;
- pmc_config.pm_flags = flags;
+const char *
+pmc_name_of_disposition(enum pmc_disp pd)
+{
+ if ((int) pd >= PMC_DISP_FIRST &&
+ pd <= PMC_DISP_LAST)
+ return pmc_disposition_names[pd];
- if (PMC_IS_SAMPLING_MODE(mode))
- pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
+ errno = EINVAL;
+ return NULL;
+}
- if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
- errno = EINVAL;
- goto out;
- }
+const char *
+pmc_name_of_event(enum pmc_event pe)
+{
+ if ((int) pe >= PMC_EVENT_FIRST &&
+ pe <= PMC_EVENT_LAST)
+ return pmc_event_table[pe].pm_ev_name;
- if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
- goto out;
+ errno = EINVAL;
+ return NULL;
+}
- *pmcid = pmc_config.pm_pmcid;
+const char *
+pmc_name_of_mode(enum pmc_mode pm)
+{
+ if ((int) pm >= PMC_MODE_FIRST &&
+ pm <= PMC_MODE_LAST)
+ return pmc_mode_names[pm];
- retval = 0;
+ errno = EINVAL;
+ return NULL;
+}
- out:
- if (spec_copy)
- free(spec_copy);
+const char *
+pmc_name_of_state(enum pmc_state ps)
+{
+ if ((int) ps >= PMC_STATE_FIRST &&
+ ps <= PMC_STATE_LAST)
+ return pmc_state_names[ps];
- return retval;
+ errno = EINVAL;
+ return NULL;
}
int
-pmc_attach(pmc_id_t pmc, pid_t pid)
+pmc_ncpu(void)
{
- struct pmc_op_pmcattach pmc_attach_args;
-
- pmc_attach_args.pm_pmc = pmc;
- pmc_attach_args.pm_pid = pid;
+ if (pmc_syscall == -1) {
+ errno = ENXIO;
+ return -1;
+ }
- return PMC_CALL(PMCATTACH, &pmc_attach_args);
+ return cpu_info.pm_ncpu;
}
int
-pmc_detach(pmc_id_t pmc, pid_t pid)
+pmc_npmc(int cpu)
{
- struct pmc_op_pmcattach pmc_detach_args;
+ if (pmc_syscall == -1) {
+ errno = ENXIO;
+ return -1;
+ }
- pmc_detach_args.pm_pmc = pmc;
- pmc_detach_args.pm_pid = pid;
+ if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
+ errno = EINVAL;
+ return -1;
+ }
- return PMC_CALL(PMCDETACH, &pmc_detach_args);
+ return cpu_info.pm_npmc;
}
int
-pmc_release(pmc_id_t pmc)
+pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
{
- struct pmc_op_simple pmc_release_args;
+ int nbytes, npmc;
+ struct pmc_op_getpmcinfo *pmci;
- pmc_release_args.pm_pmcid = pmc;
+ if ((npmc = pmc_npmc(cpu)) < 0)
+ return -1;
- return PMC_CALL(PMCRELEASE, &pmc_release_args);
-}
+ nbytes = sizeof(struct pmc_op_getpmcinfo) +
+ npmc * sizeof(struct pmc_info);
-int
-pmc_start(pmc_id_t pmc)
-{
- struct pmc_op_simple pmc_start_args;
+ if ((pmci = calloc(1, nbytes)) == NULL)
+ return -1;
- pmc_start_args.pm_pmcid = pmc;
- return PMC_CALL(PMCSTART, &pmc_start_args);
-}
+ pmci->pm_cpu = cpu;
-int
-pmc_stop(pmc_id_t pmc)
-{
- struct pmc_op_simple pmc_stop_args;
+ if (PMC_CALL(GETPMCINFO, pmci) < 0) {
+ free(pmci);
+ return -1;
+ }
- pmc_stop_args.pm_pmcid = pmc;
- return PMC_CALL(PMCSTOP, &pmc_stop_args);
+ /* kernel<->library, library<->userland interfaces are identical */
+ *ppmci = (struct pmc_pmcinfo *) pmci;
+
+ return 0;
}
int
@@ -1830,15 +2124,13 @@ pmc_read(pmc_id_t pmc, pmc_value_t *value)
}
int
-pmc_write(pmc_id_t pmc, pmc_value_t value)
+pmc_release(pmc_id_t pmc)
{
- struct pmc_op_pmcrw pmc_write_op;
+ struct pmc_op_simple pmc_release_args;
- pmc_write_op.pm_pmcid = pmc;
- pmc_write_op.pm_flags = PMC_F_NEWVALUE;
- pmc_write_op.pm_value = value;
+ pmc_release_args.pm_pmcid = pmc;
- return PMC_CALL(PMCRW, &pmc_write_op);
+ return PMC_CALL(PMCRELEASE, &pmc_release_args);
}
int
@@ -1874,111 +2166,21 @@ pmc_set(pmc_id_t pmc, pmc_value_t value)
}
int
-pmc_configure_logfile(int fd)
-{
- struct pmc_op_configurelog cla;
-
- cla.pm_logfd = fd;
- if (PMC_CALL(CONFIGURELOG, &cla) < 0)
- return -1;
-
- return 0;
-}
-
-int
-pmc_get_driver_stats(struct pmc_op_getdriverstats *gms)
-{
- return PMC_CALL(GETDRIVERSTATS, gms);
-}
-
-int
-pmc_ncpu(void)
-{
- if (pmc_syscall == -1) {
- errno = ENXIO;
- return -1;
- }
-
- return cpu_info.pm_ncpu;
-}
-
-int
-pmc_npmc(int cpu)
-{
- if (pmc_syscall == -1) {
- errno = ENXIO;
- return -1;
- }
-
- if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
- errno = EINVAL;
- return -1;
- }
-
- return cpu_info.pm_npmc;
-}
-
-int
-pmc_enable(int cpu, int pmc)
-{
- struct pmc_op_pmcadmin ssa;
-
- ssa.pm_cpu = cpu;
- ssa.pm_pmc = pmc;
- ssa.pm_state = PMC_STATE_FREE;
- return PMC_CALL(PMCADMIN, &ssa);
-}
-
-int
-pmc_disable(int cpu, int pmc)
-{
- struct pmc_op_pmcadmin ssa;
-
- ssa.pm_cpu = cpu;
- ssa.pm_pmc = pmc;
- ssa.pm_state = PMC_STATE_DISABLED;
- return PMC_CALL(PMCADMIN, &ssa);
-}
-
-
-int
-pmc_pmcinfo(int cpu, struct pmc_op_getpmcinfo **ppmci)
+pmc_start(pmc_id_t pmc)
{
- int nbytes, npmc, saved_errno;
- struct pmc_op_getpmcinfo *pmci;
-
- if ((npmc = pmc_npmc(cpu)) < 0)
- return -1;
-
- nbytes = sizeof(struct pmc_op_getpmcinfo) +
- npmc * sizeof(struct pmc_info);
-
- if ((pmci = calloc(1, nbytes)) == NULL)
- return -1;
-
- pmci->pm_cpu = cpu;
-
- if (PMC_CALL(GETPMCINFO, pmci) < 0) {
- saved_errno = errno;
- free(pmci);
- errno = saved_errno;
- return -1;
- }
+ struct pmc_op_simple pmc_start_args;
- *ppmci = pmci;
- return 0;
+ pmc_start_args.pm_pmcid = pmc;
+ return PMC_CALL(PMCSTART, &pmc_start_args);
}
int
-pmc_cpuinfo(const struct pmc_op_getcpuinfo **pci)
+pmc_stop(pmc_id_t pmc)
{
- if (pmc_syscall == -1) {
- errno = ENXIO;
- return -1;
- }
+ struct pmc_op_simple pmc_stop_args;
- *pci = &cpu_info;
- return 0;
+ pmc_stop_args.pm_pmcid = pmc;
+ return PMC_CALL(PMCSTOP, &pmc_stop_args);
}
int
@@ -1997,182 +2199,22 @@ pmc_width(pmc_id_t pmcid, uint32_t *width)
}
int
-pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
-{
- unsigned int i;
- enum pmc_class cl;
-
- cl = PMC_ID_TO_CLASS(pmcid);
- for (i = 0; i < cpu_info.pm_nclass; i++)
- if (cpu_info.pm_classes[i].pm_class == cl) {
- *caps = cpu_info.pm_classes[i].pm_caps;
- return 0;
- }
- return EINVAL;
-}
-
-const char *
-pmc_name_of_cputype(enum pmc_cputype cp)
-{
- if ((int) cp >= PMC_CPU_FIRST &&
- cp <= PMC_CPU_LAST)
- return pmc_cputype_names[cp];
- errno = EINVAL;
- return NULL;
-}
-
-const char *
-pmc_name_of_class(enum pmc_class pc)
-{
- if ((int) pc >= PMC_CLASS_FIRST &&
- pc <= PMC_CLASS_LAST)
- return pmc_class_names[pc];
-
- errno = EINVAL;
- return NULL;
-}
-
-const char *
-pmc_name_of_mode(enum pmc_mode pm)
-{
- if ((int) pm >= PMC_MODE_FIRST &&
- pm <= PMC_MODE_LAST)
- return pmc_mode_names[pm];
-
- errno = EINVAL;
- return NULL;
-}
-
-const char *
-pmc_name_of_event(enum pmc_event pe)
-{
- if ((int) pe >= PMC_EVENT_FIRST &&
- pe <= PMC_EVENT_LAST)
- return pmc_event_table[pe].pm_ev_name;
-
- errno = EINVAL;
- return NULL;
-}
-
-const char *
-pmc_name_of_state(enum pmc_state ps)
-{
- if ((int) ps >= PMC_STATE_FIRST &&
- ps <= PMC_STATE_LAST)
- return pmc_state_names[ps];
-
- errno = EINVAL;
- return NULL;
-}
-
-const char *
-pmc_name_of_disposition(enum pmc_disp pd)
-{
- if ((int) pd >= PMC_DISP_FIRST &&
- pd <= PMC_DISP_LAST)
- return pmc_disposition_names[pd];
-
- errno = EINVAL;
- return NULL;
-}
-
-const char *
-pmc_name_of_capability(enum pmc_caps cap)
-{
- int i;
-
- /*
- * 'cap' should have a single bit set and should be in
- * range.
- */
-
- if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
- cap > PMC_CAP_LAST) {
- errno = EINVAL;
- return NULL;
- }
-
- i = ffs(cap);
-
- return pmc_capability_names[i - 1];
-}
-
-/*
- * Return a list of events known to a given PMC class. 'cl' is the
- * PMC class identifier, 'eventnames' is the returned list of 'const
- * char *' pointers pointing to the names of the events. 'nevents' is
- * the number of event name pointers returned.
- *
- * The space for 'eventnames' is allocated using malloc(3). The caller
- * is responsible for freeing this space when done.
- */
-
-int
-pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
- int *nevents)
+pmc_write(pmc_id_t pmc, pmc_value_t value)
{
- int count;
- const char **names;
- const struct pmc_event_descr *ev;
-
- switch (cl)
- {
- case PMC_CLASS_TSC:
- ev = &pmc_event_table[PMC_EV_TSC_TSC];
- count = 1;
- break;
- case PMC_CLASS_K7:
- ev = &pmc_event_table[PMC_EV_K7_FIRST];
- count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
- break;
- case PMC_CLASS_K8:
- ev = &pmc_event_table[PMC_EV_K8_FIRST];
- count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
- break;
- case PMC_CLASS_P5:
- ev = &pmc_event_table[PMC_EV_P5_FIRST];
- count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
- break;
- case PMC_CLASS_P6:
- ev = &pmc_event_table[PMC_EV_P6_FIRST];
- count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
- break;
- case PMC_CLASS_P4:
- ev = &pmc_event_table[PMC_EV_P4_FIRST];
- count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
- break;
- default:
- errno = EINVAL;
- return -1;
- }
-
- if ((names = malloc(count * sizeof(const char *))) == NULL)
- return -1;
+ struct pmc_op_pmcrw pmc_write_op;
- *eventnames = names;
- *nevents = count;
+ pmc_write_op.pm_pmcid = pmc;
+ pmc_write_op.pm_flags = PMC_F_NEWVALUE;
+ pmc_write_op.pm_value = value;
- for (;count--; ev++, names++)
- *names = ev->pm_ev_name;
- return 0;
+ return PMC_CALL(PMCRW, &pmc_write_op);
}
-/*
- * Architecture specific APIs
- */
-
-#if defined(__i386__) || defined(__amd64__)
-
int
-pmc_x86_get_msr(pmc_id_t pmc, uint32_t *msr)
+pmc_writelog(uint32_t userdata)
{
- struct pmc_op_x86_getmsr gm;
+ struct pmc_op_writelog wl;
- gm.pm_pmcid = pmc;
- if (PMC_CALL(PMCX86GETMSR, &gm) < 0)
- return -1;
- *msr = gm.pm_msr;
- return 0;
+ wl.pm_userdata = userdata;
+ return PMC_CALL(WRITELOG, &wl);
}
-
-#endif
diff --git a/lib/libpmc/pmc.3 b/lib/libpmc/pmc.3
index 0612ce7..7a771d4 100644
--- a/lib/libpmc/pmc.3
+++ b/lib/libpmc/pmc.3
@@ -1,4 +1,4 @@
-.\" Copyright (c) 2003 Joseph Koshy. All rights reserved.
+.\" Copyright (c) 2003-2005 Joseph Koshy. All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
@@ -36,7 +36,9 @@
.Nm pmc_disable ,
.Nm pmc_enable ,
.Nm pmc_event_names_of_class ,
+.Nm pmc_flush_logfile ,
.Nm pmc_get_driver_stats ,
+.Nm pmc_get_msr ,
.Nm pmc_init ,
.Nm pmc_name_of_capability ,
.Nm pmc_name_of_class ,
@@ -53,9 +55,9 @@
.Nm pmc_set ,
.Nm pmc_start ,
.Nm pmc_stop ,
-.Nm pmc_write ,
.Nm pmc_width ,
-.Nm pmc_x86_get_msr
+.Nm pmc_write ,
+.Nm pmc_writelog
.Nd programming API for using hardware performance monitoring counters
.Sh LIBRARY
.Lb libpmc
@@ -79,7 +81,7 @@
.Ft int
.Fn pmc_configure_logfile "int fd"
.Ft int
-.Fn pmc_cpuinfo "const struct pmc_op_getcpuinfo **cpu_info"
+.Fn pmc_cpuinfo "const struct pmc_cpuinfo **cpu_info"
.Ft int
.Fo pmc_detach
.Fa "pmc_id_t pmcid"
@@ -96,7 +98,11 @@
.Fa "int *nevents"
.Fc
.Ft int
-.Fn pmc_get_driver_stats "struct pmc_op_getdriverstats *gms"
+.Fn pmc_flush_logfile "void"
+.Ft int
+.Fn pmc_get_driver_stats "struct pmc_driverstats *gms"
+.Ft int
+.Fn pmc_get_msr "pmc_id_t pmc" "uint32_t *msr"
.Ft int
.Fn pmc_init "void"
.Ft "const char *"
@@ -118,7 +124,7 @@
.Ft int
.Fn pmc_npmc "uint32_t cpu"
.Ft int
-.Fn pmc_pmcinfo "uint32_t cpu" "struct pmc_op_getpmcinfo **pmc_info"
+.Fn pmc_pmcinfo "uint32_t cpu" "struct pmc_pmcinfo **pmc_info"
.Ft int
.Fn pmc_read "pmc_id_t pmc" "pmc_value_t *value"
.Ft int
@@ -134,9 +140,9 @@
.Ft int
.Fn pmc_write "pmc_id_t pmc" "pmc_value_t value"
.Ft int
-.Fn pmc_width "pmc_id_t pmc" "uint32_t *width"
+.Fn pmc_writelog "uint32_t userdata"
.Ft int
-.Fn pmc_x86_get_msr "int pmc" "uint32_t *msr"
+.Fn pmc_width "pmc_id_t pmc" "uint32_t *width"
.Sh DESCRIPTION
These functions implement a high-level library for using the
system's hardware performance counters.
@@ -276,9 +282,24 @@ The
.Fn pmc_configure_logfile
function causes the
.Xr hwpmc 4
-driver to log system wide performance data to file corresponding
+driver to log performance data to file corresponding
to the process' file handle
.Fa fd .
+If argument
+.Fa fd
+is -1, then any previously configured logging is reset
+and all data queued to be written are discarded.
+.Pp
+The
+.Fn pmc_flush_logfile
+function will send all data queued inside the
+.Xr hwpmc 4
+driver to the configured log file before returning.
+The
+.Fn pmc_writelog
+function will append a log entry containing the argument
+.Fa userdata
+to the log file.
.Pp
.Fn pmc_set
configures an sampling PMC
@@ -307,8 +328,19 @@ module is unloaded using
processes that have PMCs allocated to them will be sent a
SIGBUS signal.
.It SIGIO
-Attempting to read a PMC that is not currently attached to a running
-process will cause a SIGIO signal to be sent to the reader.
+The
+.Xr hwpmc 4
+driver will send a PMC owning process a SIGIO signal if:
+.Bl -bullet
+.It
+If any process-mode PMC allocated by it loses all its
+target processes.
+.It
+If the driver encounters an error when writing log data to a
+configured log file.
+This error may be retrieved by a subsequent call to
+.Fn pmc_flush_logfile .
+.El
.El
.Ss CONVENIENCE FUNCTIONS
.Fn pmc_ncpu
@@ -321,10 +353,18 @@ returns the number of PMCs supported on CPU
sets argument
.Fa cpu_info
to point to a structure with information about the system's CPUs.
+Function
.Fn pmc_pmcinfo
returns information about the current state of CPU
.Fa cpu Ap s
PMCs.
+This function sets argument
+.Fa *pmc_info
+to point to a memory area allocated with
+.Xr calloc 3 .
+The caller is expected to
+.Fn free
+the area when done.
.Pp
The functions
.Fn pmc_name_of_capability ,
@@ -370,7 +410,7 @@ is the index of the PMC to be operated on.
Only the super-user is allowed to enable and disable PMCs.
.Ss X86 ARCHITECTURE SPECIFIC API
The
-.Fn pmc_x86_get_msr
+.Fn pmc_get_msr
function returns the processor model specific register number
associated with
.Fa pmc .
@@ -3096,25 +3136,39 @@ was unrecognized for this cpu type.
.Pp
Calls to
.Fn pmc_attach ,
+.Fn pmc_configure_logfile ,
.Fn pmc_detach ,
+.Fn pmc_disable ,
+.Fn pmc_enable ,
+.Fn pmc_get_driver_stats ,
+.Fn pmc_get_msr ,
+.Fn pmc_read ,
.Fn pmc_release ,
+.Fn pmc_rw ,
+.Fn pmc_set ,
.Fn pmc_start ,
.Fn pmc_stop ,
-.Fn pmc_read ,
.Fn pmc_write ,
-.Fn pmc_rw ,
-.Fn pmc_set ,
-.Fn pmc_configure_logfile ,
-.Fn pmc_get_driver_stats ,
-.Fn pmc_enable ,
-.Fn pmc_disable ,
and
-.Fn pmc_x86_get_msr
+.Fn pmc_writelog
may fail with the errors described in
.Xr hwpmc 4 .
+.Pp
+If a log file was configured using
+.Fn pmc_configure_logfile
+and the
+.Xr hwpmc 4
+driver encountered an error while logging data to it, then
+logging will be stopped and a subsequent call to
+.Fn pmc_flush_logfile
+will fail with the error code seen by the
+.Xr hwpmc 4
+driver.
.Sh SEE ALSO
.Xr modfind 2 ,
.Xr modstat 2 ,
+.Xr calloc 3 ,
+.Xr pmclog 3 ,
.Xr hwpmc 4 ,
.Xr pmccontrol 8 ,
.Xr pmcreport 8 ,
@@ -3126,12 +3180,6 @@ The information returned by
and possibly
.Fn pmc_npmc
should really be available all the time, through a better designed
-interface.
-.Pp
-The API for
-.Fn pmc_cpuinfo
-and
-.Fn pmc_pmcinfo
-expose too much of the underlying
+interface and not just when
.Xr hwpmc 4
-driver's internals to userland.
+is present in the kernel.
diff --git a/lib/libpmc/pmc.h b/lib/libpmc/pmc.h
index 7ee257b..ee3f772 100644
--- a/lib/libpmc/pmc.h
+++ b/lib/libpmc/pmc.h
@@ -32,6 +32,39 @@
#include <sys/pmc.h>
/*
+ * Driver statistics.
+ */
+struct pmc_driverstats {
+ int pm_intr_ignored; /* #interrupts ignored */
+ int pm_intr_processed; /* #interrupts processed */
+ int pm_intr_bufferfull; /* #interrupts with ENOSPC */
+ int pm_syscalls; /* #syscalls */
+ int pm_syscall_errors; /* #syscalls with errors */
+ int pm_buffer_requests; /* #buffer requests */
+ int pm_buffer_requests_failed; /* #failed buffer requests */
+ int pm_log_sweeps; /* #sample buffer processing passes */
+};
+
+/*
+ * CPU information.
+ */
+struct pmc_cpuinfo {
+ enum pmc_cputype pm_cputype; /* the kind of CPU */
+ uint32_t pm_ncpu; /* number of CPUs */
+ uint32_t pm_npmc; /* #PMCs per CPU */
+ uint32_t pm_nclass; /* #classes of PMCs */
+ struct pmc_classinfo pm_classes[PMC_CLASS_MAX];
+};
+
+/*
+ * Current PMC state.
+ */
+struct pmc_pmcinfo {
+ int32_t pm_cpu; /* CPU number */
+ struct pmc_info pm_pmcs[]; /* NPMC structs */
+};
+
+/*
* Prototypes
*/
@@ -40,10 +73,12 @@ int pmc_allocate(const char *_ctrspec, enum pmc_mode _mode, uint32_t _flags,
int pmc_attach(pmc_id_t _pmcid, pid_t _pid);
int pmc_capabilities(pmc_id_t _pmc, uint32_t *_caps);
int pmc_configure_logfile(int _fd);
+int pmc_flush_logfile(void);
int pmc_detach(pmc_id_t _pmcid, pid_t _pid);
int pmc_disable(int _cpu, int _pmc);
int pmc_enable(int _cpu, int _pmc);
-int pmc_get_driver_stats(struct pmc_op_getdriverstats *_gms);
+int pmc_get_driver_stats(struct pmc_driverstats *_gms);
+int pmc_get_msr(pmc_id_t _pmc, uint32_t *_msr);
int pmc_init(void);
int pmc_read(pmc_id_t _pmc, pmc_value_t *_value);
int pmc_release(pmc_id_t _pmc);
@@ -53,11 +88,12 @@ int pmc_start(pmc_id_t _pmc);
int pmc_stop(pmc_id_t _pmc);
int pmc_width(pmc_id_t _pmc, uint32_t *_width);
int pmc_write(pmc_id_t _pmc, pmc_value_t _value);
+int pmc_writelog(uint32_t _udata);
int pmc_ncpu(void);
int pmc_npmc(int _cpu);
-int pmc_cpuinfo(const struct pmc_op_getcpuinfo **_cpu_info);
-int pmc_pmcinfo(int _cpu, struct pmc_op_getpmcinfo **_pmc_info);
+int pmc_cpuinfo(const struct pmc_cpuinfo **_cpu_info);
+int pmc_pmcinfo(int _cpu, struct pmc_pmcinfo **_pmc_info);
const char *pmc_name_of_capability(uint32_t _c);
const char *pmc_name_of_class(enum pmc_class _pc);
@@ -70,12 +106,4 @@ const char *pmc_name_of_state(enum pmc_state _ps);
int pmc_event_names_of_class(enum pmc_class _cl, const char ***_eventnames,
int *_nevents);
-/*
- * Architecture specific extensions
- */
-
-#if __i386__ || __amd64__
-int pmc_x86_get_msr(pmc_id_t _pmc, uint32_t *_msr);
-#endif
-
#endif
diff --git a/lib/libpmc/pmclog.3 b/lib/libpmc/pmclog.3
new file mode 100644
index 0000000..1487e90
--- /dev/null
+++ b/lib/libpmc/pmclog.3
@@ -0,0 +1,276 @@
+.\" Copyright (c) 2005 Joseph Koshy. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" This software is provided by Joseph Koshy ``as is'' and
+.\" any express or implied warranties, including, but not limited to, the
+.\" implied warranties of merchantability and fitness for a particular purpose
+.\" are disclaimed. in no event shall Joseph Koshy be liable
+.\" for any direct, indirect, incidental, special, exemplary, or consequential
+.\" damages (including, but not limited to, procurement of substitute goods
+.\" or services; loss of use, data, or profits; or business interruption)
+.\" however caused and on any theory of liability, whether in contract, strict
+.\" liability, or tort (including negligence or otherwise) arising in any way
+.\" out of the use of this software, even if advised of the possibility of
+.\" such damage.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd Jun 1, 2005
+.Os
+.Dt PMCLOG 3
+.Sh NAME
+.Nm pmclog_open ,
+.Nm pmclog_close ,
+.Nm pmclog_read ,
+.Nm pmclog_feed
+.Nd parse event log data generated by
+.Xr hwpmc 4
+.Sh LIBRARY
+.Lb libpmc
+.Sh SYNOPSIS
+.In pmclog.h
+.Ft "void *"
+.Fn pmclog_open "int fd"
+.Ft void
+.Fn pmclog_close "void *cookie"
+.Ft int
+.Fn pmclog_read "void *cookie" "struct pmclog_ev *ev"
+.Ft int
+.Fn pmclog_feed "void *cookie" "char *data" "int len"
+.Sh DESCRIPTION
+These functions provide a way for application programs to extract
+events from an event stream generated by
+.Xr hwpmc 4 .
+.Pp
+A new event log parser is allocated using
+.Fn pmclog_open .
+Argument
+.Fa fd
+may be a file descriptor opened for reading if the event stream is
+present in a file, or the constant
+.Dv PMCLOG_FD_NONE
+for an event stream present in memory.
+This function returns a cookie that is passed into the other functions
+in this API set.
+.Pp
+Function
+.Fn pmclog_read
+returns the next available event in the event stream associated with
+argument
+.Fa cookie .
+Argument
+.Fa ev
+points to an event descriptor that which will contain the result of a
+successfully parsed event.
+.Pp
+An event descriptor returned by
+.Fn pmclog_read
+has the following structure:
+.Bd -literal
+struct pmclog_ev {
+ enum pmclog_state pl_state; /* parser state after 'get_event()' */
+ off_t pl_offset; /* byte offset in stream */
+ size_t pl_count; /* count of records so far */
+ struct timespec pl_ts; /* log entry timestamp */
+ enum pmclog_type pl_type; /* log entry kind */
+ union { /* log entry data */
+ struct pmclog_ev_allocate pl_a;
+ struct pmclog_ev_proccsw pl_c;
+ struct pmclog_ev_dropnotify pl_d;
+ struct pmclog_ev_procexit pl_e;
+ struct pmclog_ev_initialize pl_i;
+ struct pmclog_ev_pcsample pl_s;
+ struct pmclog_ev_pmcattach pl_t;
+ struct pmclog_ev_userdata pl_u;
+ struct pmclog_ev_procexec pl_x;
+ } pl_u;
+};
+.Ed
+.Pp
+The current state of the parser is recorded in
+.Va pl_state .
+This field can take on the following values:
+.Bl -tag -width "PMCLOG_REQUIRE_DATA" -compact
+.It Dv PMCLOG_EOF
+.Pq For file based parsers only
+An end-of-file condition was encountered on the configured file
+descriptor.
+.It Dv PMCLOG_ERROR
+An error occurred during parsing.
+.It Dv PMCLOG_OK
+A complete event record was read into
+.Fa "*ev" .
+.It Dv PMCLOG_REQUIRE_DATA
+There was insufficient data in the event stream to assemble a complete
+event record.
+For memory based parsers, more data can be fed to the
+parser using function
+.Fn pmclog_feed .
+For file based parsers, function
+.Fn pmclog_read
+may be retried when data is available on the configured file
+descriptor.
+.El
+.Pp
+The rest of the event structure is valid only if field
+.Va pl_state
+contains
+.Dv PMCLOG_OK .
+Field
+.Va pl_offset
+contains the offset of the current record in the byte stream.
+Field
+.Va pl_count
+contains the serial number of this event.
+Field
+.Va pl_ts
+contains a timestamp with the system time when the event occurred.
+Field
+.Va pl_type
+denotes the kind of the event returned in argument
+.Fa *ev
+and is one of the following:
+.Bl -tag -width XXXXXXXXXXXXXXXXXXXXXXX -compact
+.It Dv PMCLOG_TYPE_DROPNOTIFY
+a marker indicating that
+.Xr hwpmc 4
+had to drop data due to a resource constraint.
+.It Dv PMCLOG_TYPE_INITIALIZE
+an initialization record.
+This is usually the first record in a log file.
+.It Dv PMCLOG_TYPE_PCSAMPLE
+A record containing an instruction pointer sample.
+.It Dv PMCLOG_TYPE_PMCALLOCATE
+A record describing a PMC allocation operation.
+.It Dv PMCLOG_TYPE_PMCATTACH
+A record describing a PMC attach operation.
+.It Dv PMCLOG_TYPE_PROCCSW
+A record describing a PMC reading at the time of a process context switch.
+.It Dv PMCLOG_TYPE_PROCEXIT
+A record describing the accumulated PMC reading for a process at the
+time of
+.Xr _exit 2 .
+.It Dv PMCLOG_TYPE_PROCEXEC
+A record describing an
+.Xr execve 2
+by a target process.
+.It Dv PMCLOG_TYPE_USERDATA
+A record containing user data.
+.El
+.Pp
+Function
+.Fn pmclog_feed
+is used with parsers configured to parse memory based event streams.
+It is intended to be called when function
+.Fn pmclog_read
+indicates the need for more data by a returning
+.Dv PMCLOG_REQUIRE_DATA
+in field
+.Va pl_state
+of its event structure argument.
+Argument
+.Fa data
+points to the start of a memory buffer containing fresh event data.
+Argument
+.Fa len
+indicates the number of bytes of data available.
+The memory range
+.Bq data , data+len
+must remain valid till the next time
+.Fn pmclog_read
+returns an error.
+It is an error to use
+.Fn pmclog_feed
+on a parser configured to parse file data.
+.Pp
+Function
+.Fn pmclog_close
+releases the internal state allocated by a prior call
+to
+.Fn pmclog_open .
+.Sh RETURN VALUES
+Function
+.Fn pmclog_open
+will return a non-NULL value if successful or NULL otherwise.
+.Pp
+Function
+.Fn pmclog_read
+will return 0 in case a complete event record was successfully read,
+or will return -1 and will set the
+.Va pl_state
+field of the event record to the appropriate code in case of an error.
+.Pp
+Function
+.Fn pmclog_feed
+will return 0 on success or -1 in case of failure.
+.Sh EXAMPLES
+A template for using the log file parsing API is shown below in psuedocode:
+.Bd -literal
+void *parser; /* cookie */
+struct pmclog_ev ev; /* parsed event */
+int fd; /* file descriptor */
+
+fd = open(filename, O_RDONLY); /* open log file */
+parser = pmclog_open(fd); /* initialize parser */
+if (parser == NULL)
+ --handle an out of memory error--;
+
+/* read and parse data */
+while (pmclog_read(parser, &ev) == 0) {
+ assert(ev.pl_state == PMCLOG_OK);
+ /* process the event */
+ switch (ev.pl_type) {
+ case PMCLOG_TYPE_ALLOCATE:
+ --process a pmc allocation record--
+ break;
+ case PMCLOG_TYPE_PROCCSW:
+ --process a thread context switch record--
+ break;
+ case PMCLOG_TYPE_PCSAMPLE:
+ --process a PC sample--
+ break;
+ --and so on--
+ }
+}
+
+/* examine parser state */
+switch (ev.pl_state) {
+case PMCLOG_EOF:
+ --normal termination--
+ break;
+case PMCLOG_ERROR:
+ --look at errno here--
+ break;
+case PMCLOG_REQUIRE_DATA:
+ --arrange for more data to be available for parsing--
+ break;
+default:
+ assert(0);
+ /*NOTREACHED*/
+}
+
+pmclog_close(parser); /* cleanup */
+.Ed
+.Sh ERRORS
+A call to
+.Fn pmclog_init_parser
+may fail with any of the errors returned by
+.Xr malloc 3 .
+.Pp
+A call to
+.Fn pmclog_read
+for a file based parser may fail with any of the errors returned by
+.Xr read 2 .
+.Sh SEE ALSO
+.Xr read 2 ,
+.Xr malloc 3 ,
+.Xr pmc 3 ,
+.Xr hwpmc 4
diff --git a/lib/libpmc/pmclog.c b/lib/libpmc/pmclog.c
new file mode 100644
index 0000000..8772c58
--- /dev/null
+++ b/lib/libpmc/pmclog.c
@@ -0,0 +1,532 @@
+/*-
+ * Copyright (c) 2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmclog.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <pmc.h>
+#include <pmclog.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include <machine/pmc_mdep.h>
+
+#define PMCLOG_BUFFER_SIZE 4096
+
+/*
+ * API NOTES
+ *
+ * The pmclog(3) API is oriented towards parsing an event stream in
+ * "realtime", i.e., from an data source that may or may not preserve
+ * record boundaries -- for example when the data source is elsewhere
+ * on a network. The API allows data to be fed into the parser zero
+ * or more bytes at a time.
+ *
+ * The state for a log file parser is maintained in a 'struct
+ * pmclog_parse_state'. Parser invocations are done by calling
+ * 'pmclog_read()'; this function will inform the caller when a
+ * complete event is parsed.
+ *
+ * The parser first assembles a complete log file event in an internal
+ * work area (see "ps_saved" below). Once a complete log file event
+ * is read, the parser then parses it and converts it to an event
+ * descriptor usable by the client. We could possibly avoid this two
+ * step process by directly parsing the input log to set fields in the
+ * event record. However the parser's state machine would get
+ * insanely complicated, and this code is unlikely to be used in
+ * performance critical paths.
+ */
+
+enum pmclog_parser_state {
+ PL_STATE_NEW_RECORD, /* in-between records */
+ PL_STATE_EXPECTING_HEADER, /* header being read */
+ PL_STATE_PARTIAL_RECORD, /* header present but not the record */
+ PL_STATE_ERROR /* parsing error encountered */
+};
+
+struct pmclog_parse_state {
+ enum pmclog_parser_state ps_state;
+ enum pmc_cputype ps_arch; /* log file architecture */
+ uint32_t ps_version; /* hwpmc version */
+ int ps_initialized; /* whether initialized */
+ int ps_count; /* count of records processed */
+ off_t ps_offset; /* stream byte offset */
+ union pmclog_entry ps_saved; /* saved partial log entry */
+ int ps_svcount; /* #bytes saved */
+ int ps_fd; /* active fd or -1 */
+ char *ps_buffer; /* scratch buffer if fd != -1 */
+ char *ps_data; /* current parse pointer */
+ size_t ps_len; /* length of buffered data */
+};
+
+#define PMCLOG_HEADER_FROM_SAVED_STATE(PS) \
+ (* ((uint32_t *) &(PS)->ps_saved))
+
+#define PMCLOG_INITIALIZE_READER(LE,A) LE = (uint32_t *) &(A)
+#define PMCLOG_READ32(LE,V) do { \
+ (V) = *(LE)++; \
+ } while (0)
+#define PMCLOG_READ64(LE,V) do { \
+ uint64_t _v; \
+ _v = (uint64_t) *(LE)++; \
+ _v |= ((uint64_t) *(LE)++) << 32; \
+ (V) = _v; \
+ } while (0)
+
+#define PMCLOG_READSTRING(LE,DST,LEN) strlcpy((DST), (char *) (LE), (LEN))
+
+/*
+ * Assemble a log record from '*len' octets starting from address '*data'.
+ * Update 'data' and 'len' to reflect the number of bytes consumed.
+ *
+ * '*data' is potentially an unaligned address and '*len' octets may
+ * not be enough to complete a event record.
+ */
+
+static enum pmclog_parser_state
+pmclog_get_record(struct pmclog_parse_state *ps, char **data, ssize_t *len)
+{
+ int avail, copylen, recordsize, used;
+ uint32_t h;
+ const int HEADERSIZE = sizeof(uint32_t);
+ char *src, *dst;
+
+ if ((avail = *len) <= 0)
+ return (ps->ps_state = PL_STATE_ERROR);
+
+ src = *data;
+ h = used = 0;
+
+ if (ps->ps_state == PL_STATE_NEW_RECORD)
+ ps->ps_svcount = 0;
+
+ dst = (char *) &ps->ps_saved + ps->ps_svcount;
+
+ switch (ps->ps_state) {
+ case PL_STATE_NEW_RECORD:
+
+ /*
+ * Transitions:
+ *
+ * Case A: avail < headersize
+ * -> 'expecting header'
+ *
+ * Case B: avail >= headersize
+ * B.1: avail < recordsize
+ * -> 'partial record'
+ * B.2: avail >= recordsize
+ * -> 'new record'
+ */
+
+ copylen = avail < HEADERSIZE ? avail : HEADERSIZE;
+ bcopy(src, dst, copylen);
+ ps->ps_svcount = used = copylen;
+
+ if (copylen < HEADERSIZE) {
+ ps->ps_state = PL_STATE_EXPECTING_HEADER;
+ goto done;
+ }
+
+ src += copylen;
+ dst += copylen;
+
+ h = PMCLOG_HEADER_FROM_SAVED_STATE(ps);
+ recordsize = PMCLOG_HEADER_TO_LENGTH(h);
+
+ if (recordsize <= 0)
+ goto error;
+
+ if (recordsize <= avail) { /* full record available */
+ bcopy(src, dst, recordsize - copylen);
+ ps->ps_svcount = used = recordsize;
+ goto done;
+ }
+
+ /* header + a partial record is available */
+ bcopy(src, dst, avail - copylen);
+ ps->ps_svcount = used = avail;
+ ps->ps_state = PL_STATE_PARTIAL_RECORD;
+
+ break;
+
+ case PL_STATE_EXPECTING_HEADER:
+
+ /*
+ * Transitions:
+ *
+ * Case C: avail+saved < headersize
+ * -> 'expecting header'
+ *
+ * Case D: avail+saved >= headersize
+ * D.1: avail+saved < recordsize
+ * -> 'partial record'
+ * D.2: avail+saved >= recordsize
+ * -> 'new record'
+ * (see PARTIAL_RECORD handling below)
+ */
+
+ if (avail + ps->ps_svcount < HEADERSIZE) {
+ bcopy(src, dst, avail);
+ ps->ps_svcount += avail;
+ used = avail;
+ break;
+ }
+
+ used = copylen = HEADERSIZE - ps->ps_svcount;
+ bcopy(src, dst, copylen);
+ src += copylen;
+ dst += copylen;
+ avail -= copylen;
+ ps->ps_svcount += copylen;
+
+ /*FALLTHROUGH*/
+
+ case PL_STATE_PARTIAL_RECORD:
+
+ /*
+ * Transitions:
+ *
+ * Case E: avail+saved < recordsize
+ * -> 'partial record'
+ *
+ * Case F: avail+saved >= recordsize
+ * -> 'new record'
+ */
+
+ h = PMCLOG_HEADER_FROM_SAVED_STATE(ps);
+ recordsize = PMCLOG_HEADER_TO_LENGTH(h);
+
+ if (recordsize <= 0)
+ goto error;
+
+ if (avail + ps->ps_svcount < recordsize) {
+ copylen = avail;
+ ps->ps_state = PL_STATE_PARTIAL_RECORD;
+ } else {
+ copylen = recordsize - ps->ps_svcount;
+ ps->ps_state = PL_STATE_NEW_RECORD;
+ }
+
+ bcopy(src, dst, copylen);
+ ps->ps_svcount += copylen;
+ used += copylen;
+ break;
+
+ default:
+ goto error;
+ }
+
+ done:
+ *data += used;
+ *len -= used;
+ return ps->ps_state;
+
+ error:
+ ps->ps_state = PL_STATE_ERROR;
+ return ps->ps_state;
+}
+
+/*
+ * Get an event from the stream pointed to by '*data'. '*len'
+ * indicates the number of bytes available to parse. Arguments
+ * '*data' and '*len' are updated to indicate the number of bytes
+ * consumed.
+ */
+
+static int
+pmclog_get_event(void *cookie, char **data, ssize_t *len,
+ struct pmclog_ev *ev)
+{
+ int evlen, pathlen;
+ uint32_t h, *le;
+ enum pmclog_parser_state e;
+ struct pmclog_parse_state *ps;
+
+ ps = (struct pmclog_parse_state *) cookie;
+
+ assert(ps->ps_state != PL_STATE_ERROR);
+
+ if ((e = pmclog_get_record(ps,data,len)) == PL_STATE_ERROR) {
+ ev->pl_state = PMCLOG_ERROR;
+ return -1;
+ }
+
+ if (e != PL_STATE_NEW_RECORD) {
+ ev->pl_state = PMCLOG_REQUIRE_DATA;
+ return -1;
+ }
+
+ PMCLOG_INITIALIZE_READER(le, ps->ps_saved);
+
+ PMCLOG_READ32(le,h);
+
+ if (!PMCLOG_HEADER_CHECK_MAGIC(h)) {
+ ps->ps_state = PL_STATE_ERROR;
+ ev->pl_state = PMCLOG_ERROR;
+ return -1;
+ }
+
+ /* copy out the time stamp */
+ PMCLOG_READ32(le,ev->pl_ts.tv_sec);
+ PMCLOG_READ32(le,ev->pl_ts.tv_nsec);
+
+ evlen = PMCLOG_HEADER_TO_LENGTH(h);
+
+#define PMCLOG_GET_PATHLEN(P,E,TYPE) do { \
+ (P) = (E) - offsetof(struct TYPE, pl_pathname); \
+ if ((P) > PATH_MAX || (P) < 0) \
+ goto error; \
+ } while (0)
+
+ switch (ev->pl_type = PMCLOG_HEADER_TO_TYPE(h)) {
+ case PMCLOG_TYPE_CLOSELOG:
+ case PMCLOG_TYPE_DROPNOTIFY:
+ /* nothing to do */
+ break;
+ case PMCLOG_TYPE_INITIALIZE:
+ PMCLOG_READ32(le,ev->pl_u.pl_i.pl_version);
+ PMCLOG_READ32(le,ev->pl_u.pl_i.pl_arch);
+ ps->ps_version = ev->pl_u.pl_i.pl_version;
+ ps->ps_arch = ev->pl_u.pl_i.pl_arch;
+ ps->ps_initialized = 1;
+ break;
+ case PMCLOG_TYPE_MAPPINGCHANGE:
+ PMCLOG_GET_PATHLEN(pathlen,evlen,pmclog_mappingchange);
+ PMCLOG_READ32(le,ev->pl_u.pl_m.pl_type);
+ PMCLOG_READADDR(le,ev->pl_u.pl_m.pl_start);
+ PMCLOG_READADDR(le,ev->pl_u.pl_m.pl_end);
+ PMCLOG_READ32(le,ev->pl_u.pl_m.pl_pid);
+ PMCLOG_READSTRING(le, ev->pl_u.pl_m.pl_pathname, pathlen);
+ break;
+ case PMCLOG_TYPE_PCSAMPLE:
+ PMCLOG_READ32(le,ev->pl_u.pl_s.pl_pid);
+ PMCLOG_READADDR(le,ev->pl_u.pl_s.pl_pc);
+ PMCLOG_READ32(le,ev->pl_u.pl_s.pl_pmcid);
+ break;
+ case PMCLOG_TYPE_PMCALLOCATE:
+ PMCLOG_READ32(le,ev->pl_u.pl_a.pl_pmcid);
+ PMCLOG_READ32(le,ev->pl_u.pl_a.pl_event);
+ PMCLOG_READ32(le,ev->pl_u.pl_a.pl_flags);
+ if ((ev->pl_u.pl_a.pl_evname =
+ pmc_name_of_event(ev->pl_u.pl_a.pl_event)) == NULL)
+ goto error;
+ break;
+ case PMCLOG_TYPE_PMCATTACH:
+ PMCLOG_GET_PATHLEN(pathlen,evlen,pmclog_pmcattach);
+ PMCLOG_READ32(le,ev->pl_u.pl_t.pl_pmcid);
+ PMCLOG_READ32(le,ev->pl_u.pl_t.pl_pid);
+ PMCLOG_READSTRING(le,ev->pl_u.pl_t.pl_pathname,pathlen);
+ break;
+ case PMCLOG_TYPE_PMCDETACH:
+ PMCLOG_READ32(le,ev->pl_u.pl_d.pl_pmcid);
+ PMCLOG_READ32(le,ev->pl_u.pl_d.pl_pid);
+ break;
+ case PMCLOG_TYPE_PROCCSW:
+ PMCLOG_READ32(le,ev->pl_u.pl_c.pl_pmcid);
+ PMCLOG_READ64(le,ev->pl_u.pl_c.pl_value);
+ PMCLOG_READ32(le,ev->pl_u.pl_c.pl_pid);
+ break;
+ case PMCLOG_TYPE_PROCEXEC:
+ PMCLOG_GET_PATHLEN(pathlen,evlen,pmclog_procexec);
+ PMCLOG_READ32(le,ev->pl_u.pl_x.pl_pid);
+ PMCLOG_READSTRING(le,ev->pl_u.pl_x.pl_pathname,pathlen);
+ break;
+ case PMCLOG_TYPE_PROCEXIT:
+ PMCLOG_READ32(le,ev->pl_u.pl_e.pl_pmcid);
+ PMCLOG_READ64(le,ev->pl_u.pl_e.pl_value);
+ PMCLOG_READ32(le,ev->pl_u.pl_e.pl_pid);
+ break;
+ case PMCLOG_TYPE_PROCFORK:
+ PMCLOG_READ32(le,ev->pl_u.pl_f.pl_oldpid);
+ PMCLOG_READ32(le,ev->pl_u.pl_f.pl_newpid);
+ break;
+ case PMCLOG_TYPE_SYSEXIT:
+ PMCLOG_READ32(le,ev->pl_u.pl_se.pl_pid);
+ break;
+ case PMCLOG_TYPE_USERDATA:
+ PMCLOG_READ32(le,ev->pl_u.pl_u.pl_userdata);
+ break;
+ default: /* unknown record type */
+ ps->ps_state = PL_STATE_ERROR;
+ ev->pl_state = PMCLOG_ERROR;
+ return -1;
+ }
+
+ ev->pl_offset = (ps->ps_offset += evlen);
+ ev->pl_count = (ps->ps_count += 1);
+ ev->pl_state = PMCLOG_OK;
+ return 0;
+
+ error:
+ ev->pl_state = PMCLOG_ERROR;
+ ps->ps_state = PL_STATE_ERROR;
+ return -1;
+}
+
+/*
+ * Extract and return the next event from the byte stream.
+ *
+ * Returns 0 and sets the event's state to PMCLOG_OK in case an event
+ * was successfully parsed. Otherwise this function returns -1 and
+ * sets the event's state to one of PMCLOG_REQUIRE_DATA (if more data
+ * is needed) or PMCLOG_EOF (if an EOF was seen) or PMCLOG_ERROR if
+ * a parse error was encountered.
+ */
+
+int
+pmclog_read(void *cookie, struct pmclog_ev *ev)
+{
+ ssize_t nread;
+ struct pmclog_parse_state *ps;
+
+ ps = (struct pmclog_parse_state *) cookie;
+
+ if (ps->ps_state == PL_STATE_ERROR) {
+ ev->pl_state = PMCLOG_ERROR;
+ return -1;
+ }
+
+ /*
+ * If there isn't enough data left for a new event try and get
+ * more data.
+ */
+ if (ps->ps_len == 0) {
+ ev->pl_state = PMCLOG_REQUIRE_DATA;
+
+ /*
+ * If we have a valid file descriptor to read from, attempt
+ * to read from that. This read may return with an error,
+ * (which may be EAGAIN or other recoverable error), or
+ * can return EOF.
+ */
+ if (ps->ps_fd != PMCLOG_FD_NONE) {
+ nread = read(ps->ps_fd, ps->ps_buffer,
+ PMCLOG_BUFFER_SIZE);
+
+ if (nread <= 0) {
+ ev->pl_state = nread < 0 ? PMCLOG_ERROR :
+ PMCLOG_EOF;
+ return -1;
+ }
+
+ ps->ps_len = nread;
+ ps->ps_data = ps->ps_buffer;
+ } else
+ return -1;
+ }
+
+ assert(ps->ps_len > 0);
+
+ /*
+ * Retrieve one event from the byte stream.
+ */
+ return pmclog_get_event(ps, &ps->ps_data, &ps->ps_len, ev);
+}
+
+/*
+ * Feed data to a memory based parser.
+ *
+ * The memory area pointed to by 'data' needs to be valid till the
+ * next error return from pmclog_next_event().
+ */
+
+int
+pmclog_feed(void *cookie, char *data, int len)
+{
+ struct pmclog_parse_state *ps;
+
+ ps = (struct pmclog_parse_state *) cookie;
+
+ if (len < 0 || /* invalid length */
+ ps->ps_buffer || /* called for a file parser */
+ ps->ps_len != 0) /* unnecessary call */
+ return -1;
+
+ ps->ps_data = data;
+ ps->ps_len = len;
+
+ return 0;
+}
+
+/*
+ * Allocate and initialize parser state.
+ */
+
+void *
+pmclog_open(int fd)
+{
+ struct pmclog_parse_state *ps;
+
+ if ((ps = (struct pmclog_parse_state *) malloc(sizeof(*ps))) == NULL)
+ return NULL;
+
+ ps->ps_state = PL_STATE_NEW_RECORD;
+ ps->ps_arch = -1;
+ ps->ps_initialized = 0;
+ ps->ps_count = 0;
+ ps->ps_offset = (off_t) 0;
+ bzero(&ps->ps_saved, sizeof(ps->ps_saved));
+ ps->ps_svcount = 0;
+ ps->ps_fd = fd;
+ ps->ps_data = NULL;
+ ps->ps_buffer = NULL;
+ ps->ps_len = 0;
+
+ /* allocate space for a work area */
+ if (ps->ps_fd != PMCLOG_FD_NONE) {
+ if ((ps->ps_buffer = malloc(PMCLOG_BUFFER_SIZE)) == NULL)
+ return NULL;
+ }
+
+ return ps;
+}
+
+
+/*
+ * Free up parser state.
+ */
+
+void
+pmclog_close(void *cookie)
+{
+ struct pmclog_parse_state *ps;
+
+ ps = (struct pmclog_parse_state *) cookie;
+
+ if (ps->ps_buffer)
+ free(ps->ps_buffer);
+
+ free(ps);
+}
diff --git a/lib/libpmc/pmclog.h b/lib/libpmc/pmclog.h
new file mode 100644
index 0000000..3e3119e
--- /dev/null
+++ b/lib/libpmc/pmclog.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _PMCLOG_H_
+#define _PMCLOG_H_
+
+#include <sys/pmclog.h>
+
+enum pmclog_state {
+ PMCLOG_OK,
+ PMCLOG_EOF,
+ PMCLOG_REQUIRE_DATA,
+ PMCLOG_ERROR
+};
+
+struct pmclog_ev_dropnotify {
+};
+
+struct pmclog_ev_closelog {
+};
+
+struct pmclog_ev_initialize {
+ uint32_t pl_version;
+ uint32_t pl_arch;
+};
+
+struct pmclog_ev_mappingchange {
+ uint32_t pl_type;
+ pid_t pl_pid;
+ uintfptr_t pl_start;
+ uintfptr_t pl_end;
+ char pl_pathname[PATH_MAX];
+};
+
+struct pmclog_ev_pcsample {
+ uintfptr_t pl_pc;
+ pid_t pl_pid;
+ pmc_id_t pl_pmcid;
+};
+
+struct pmclog_ev_pmcallocate {
+ uint32_t pl_event;
+ const char * pl_evname;
+ uint32_t pl_flags;
+ pmc_id_t pl_pmcid;
+};
+
+struct pmclog_ev_pmcattach {
+ pmc_id_t pl_pmcid;
+ pid_t pl_pid;
+ char pl_pathname[PATH_MAX];
+};
+
+struct pmclog_ev_pmcdetach {
+ pmc_id_t pl_pmcid;
+ pid_t pl_pid;
+};
+
+struct pmclog_ev_proccsw {
+ pid_t pl_pid;
+ pmc_id_t pl_pmcid;
+ pmc_value_t pl_value;
+};
+
+struct pmclog_ev_procexec {
+ pid_t pl_pid;
+ char pl_pathname[PATH_MAX];
+};
+
+struct pmclog_ev_procexit {
+ uint32_t pl_pid;
+ pmc_id_t pl_pmcid;
+ pmc_value_t pl_value;
+};
+
+struct pmclog_ev_procfork {
+ pid_t pl_oldpid;
+ pid_t pl_newpid;
+};
+
+struct pmclog_ev_sysexit {
+ pid_t pl_pid;
+};
+
+struct pmclog_ev_userdata {
+ uint32_t pl_userdata;
+};
+
+struct pmclog_ev {
+ enum pmclog_state pl_state; /* state after 'get_event()' */
+ off_t pl_offset; /* byte offset in stream */
+ size_t pl_count; /* count of records so far */
+ struct timespec pl_ts; /* log entry timestamp */
+ enum pmclog_type pl_type; /* type of log entry */
+ union { /* log entry data */
+ struct pmclog_ev_closelog pl_cl;
+ struct pmclog_ev_dropnotify pl_dn;
+ struct pmclog_ev_initialize pl_i;
+ struct pmclog_ev_mappingchange pl_m;
+ struct pmclog_ev_pcsample pl_s;
+ struct pmclog_ev_pmcallocate pl_a;
+ struct pmclog_ev_pmcattach pl_t;
+ struct pmclog_ev_pmcdetach pl_d;
+ struct pmclog_ev_proccsw pl_c;
+ struct pmclog_ev_procexec pl_x;
+ struct pmclog_ev_procexit pl_e;
+ struct pmclog_ev_procfork pl_f;
+ struct pmclog_ev_sysexit pl_se;
+ struct pmclog_ev_userdata pl_u;
+ } pl_u;
+};
+
+#define PMCLOG_FD_NONE (-1)
+
+void *pmclog_open(int _fd);
+int pmclog_feed(void *_cookie, char *_data, int _len);
+int pmclog_read(void *_cookie, struct pmclog_ev *_ev);
+void pmclog_close(void *_cookie);
+
+#endif
+
OpenPOWER on IntegriCloud