summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
authormarkm <markm@FreeBSD.org>2013-09-06 17:42:12 +0000
committermarkm <markm@FreeBSD.org>2013-09-06 17:42:12 +0000
commit9d67aa8bffecbac35da57b6e638e6ae76f81a4be (patch)
tree0090524dd3d818125109031a4cf05e46ec2d2355 /sys/dev
parent2fd409fcd71c91841eee3f09280c21b2031c8450 (diff)
parentd13d69ef17e933f4e8a1be14f0558e25dad171c7 (diff)
downloadFreeBSD-src-9d67aa8bffecbac35da57b6e638e6ae76f81a4be.zip
FreeBSD-src-9d67aa8bffecbac35da57b6e638e6ae76f81a4be.tar.gz
MFC
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/aac/aac_linux.c4
-rw-r--r--sys/dev/aacraid/aacraid_linux.c13
-rw-r--r--sys/dev/amr/amr_linux.c4
-rw-r--r--sys/dev/atkbdc/psm.c131
-rw-r--r--sys/dev/cfi/cfi_bus_nexus.c5
-rw-r--r--sys/dev/cfi/cfi_core.c329
-rw-r--r--sys/dev/cfi/cfi_dev.c12
-rw-r--r--sys/dev/cfi/cfi_disk.c6
-rw-r--r--sys/dev/cfi/cfi_reg.h14
-rw-r--r--sys/dev/cfi/cfi_var.h21
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c2
-rw-r--r--sys/dev/filemon/filemon.c16
-rw-r--r--sys/dev/gpio/gpiobus.c2
-rw-r--r--sys/dev/gxemul/cons/gxemul_cons.c10
-rw-r--r--sys/dev/gxemul/disk/gxemul_disk.c15
-rw-r--r--sys/dev/gxemul/disk/gxemul_diskreg.h14
-rw-r--r--sys/dev/gxemul/ether/gxreg.h7
-rw-r--r--sys/dev/hwpmc/hwpmc_logging.c4
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c6
-rw-r--r--sys/dev/hwpmc/hwpmc_mpc7xxx.c748
-rw-r--r--sys/dev/hwpmc/hwpmc_powerpc.c763
-rw-r--r--sys/dev/hwpmc/hwpmc_powerpc.h59
-rw-r--r--sys/dev/ipmi/ipmi_linux.c4
-rw-r--r--sys/dev/iscsi_initiator/iscsi.c7
-rw-r--r--sys/dev/mfi/mfi_linux.c4
-rw-r--r--sys/dev/ntb/if_ntb/if_ntb.c50
-rw-r--r--sys/dev/ntb/ntb_hw/ntb_hw.c432
-rw-r--r--sys/dev/ntb/ntb_hw/ntb_hw.h1
-rw-r--r--sys/dev/ntb/ntb_hw/ntb_regs.h38
-rw-r--r--sys/dev/tdfx/tdfx_linux.c4
-rw-r--r--sys/dev/usb/serial/uftdi.c1
-rw-r--r--sys/dev/usb/usbdevs2
-rw-r--r--sys/dev/usb/wlan/if_run.c1
-rw-r--r--sys/dev/virtio/network/if_vtnet.c11
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h1
-rw-r--r--sys/dev/virtio/virtqueue.c4
-rw-r--r--sys/dev/xen/blkback/blkback.c18
37 files changed, 1725 insertions, 1038 deletions
diff --git a/sys/dev/aac/aac_linux.c b/sys/dev/aac/aac_linux.c
index 049e2be..591dfbb 100644
--- a/sys/dev/aac/aac_linux.c
+++ b/sys/dev/aac/aac_linux.c
@@ -75,11 +75,13 @@ MODULE_DEPEND(aac_linux, linux, 1, 1, 1);
static int
aac_linux_ioctl(struct thread *td, struct linux_ioctl_args *args)
{
+ cap_rights_t rights;
struct file *fp;
u_long cmd;
int error;
- if ((error = fget(td, args->fd, CAP_IOCTL, &fp)) != 0)
+ error = fget(td, args->fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
+ if (error != 0)
return (error);
cmd = args->cmd;
diff --git a/sys/dev/aacraid/aacraid_linux.c b/sys/dev/aacraid/aacraid_linux.c
index 3d85445..e58d0a4 100644
--- a/sys/dev/aacraid/aacraid_linux.c
+++ b/sys/dev/aacraid/aacraid_linux.c
@@ -34,6 +34,9 @@ __FBSDID("$FreeBSD$");
*/
#include <sys/param.h>
+#if __FreeBSD_version >= 900000
+#include <sys/capability.h>
+#endif
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/kernel.h>
@@ -77,15 +80,19 @@ static int
aacraid_linux_ioctl(struct thread *td, struct linux_ioctl_args *args)
{
struct file *fp;
+#if __FreeBSD_version >= 900000
+ cap_rights_t rights;
+#endif
u_long cmd;
int error;
+ if ((error = fget(td, args->fd,
#if __FreeBSD_version >= 900000
- if ((error = fget(td, args->fd, 0, &fp)) != 0)
-#else
- if ((error = fget(td, args->fd, &fp)) != 0)
+ cap_rights_init(&rights, CAP_IOCTL),
#endif
+ &fp)) != 0) {
return (error);
+ }
cmd = args->cmd;
/*
diff --git a/sys/dev/amr/amr_linux.c b/sys/dev/amr/amr_linux.c
index 44e858b..5b1a17f 100644
--- a/sys/dev/amr/amr_linux.c
+++ b/sys/dev/amr/amr_linux.c
@@ -72,10 +72,12 @@ MODULE_DEPEND(amr, linux, 1, 1, 1);
static int
amr_linux_ioctl(struct thread *p, struct linux_ioctl_args *args)
{
+ cap_rights_t rights;
struct file *fp;
int error;
- if ((error = fget(p, args->fd, CAP_IOCTL, &fp)) != 0)
+ error = fget(p, args->fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
+ if (error != 0)
return (error);
error = fo_ioctl(fp, args->cmd, (caddr_t)args->arg, p->td_ucred, p);
fdrop(fp, p);
diff --git a/sys/dev/atkbdc/psm.c b/sys/dev/atkbdc/psm.c
index 541624f..9a6ae72 100644
--- a/sys/dev/atkbdc/psm.c
+++ b/sys/dev/atkbdc/psm.c
@@ -2601,14 +2601,14 @@ proc_synaptics(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *ms,
static int guest_buttons;
int w, x0, y0;
- /* TouchPad PS/2 absolute mode message format
+ /* TouchPad PS/2 absolute mode message format with capFourButtons:
*
* Bits: 7 6 5 4 3 2 1 0 (LSB)
* ------------------------------------------------
* ipacket[0]: 1 0 W3 W2 0 W1 R L
* ipacket[1]: Yb Ya Y9 Y8 Xb Xa X9 X8
* ipacket[2]: Z7 Z6 Z5 Z4 Z3 Z2 Z1 Z0
- * ipacket[3]: 1 1 Yc Xc 0 W0 D U
+ * ipacket[3]: 1 1 Yc Xc 0 W0 D^R U^L
* ipacket[4]: X7 X6 X5 X4 X3 X2 X1 X0
* ipacket[5]: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
*
@@ -2622,6 +2622,21 @@ proc_synaptics(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *ms,
* Y: y position
* Z: pressure
*
+ * Without capFourButtons but with nExtendeButtons and/or capMiddle
+ *
+ * Bits: 7 6 5 4 3 2 1 0 (LSB)
+ * ------------------------------------------------------
+ * ipacket[3]: 1 1 Yc Xc 0 W0 E^R M^L
+ * ipacket[4]: X7 X6 X5 X4 X3|b7 X2|b5 X1|b3 X0|b1
+ * ipacket[5]: Y7 Y6 Y5 Y4 Y3|b8 Y2|b6 Y1|b4 Y0|b2
+ *
+ * Legend:
+ * M: Middle physical mouse button
+ * E: Extended mouse buttons reported instead of low bits of X and Y
+ * b1-b8: Extended mouse buttons
+ * Only ((nExtendedButtons + 1) >> 1) bits are used in packet
+ * 4 and 5, for reading X and Y value they should be zeroed.
+ *
* Absolute reportable limits: 0 - 6143.
* Typical bezel limits: 1472 - 5472.
* Typical edge marings: 1632 - 5312.
@@ -2675,8 +2690,10 @@ proc_synaptics(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *ms,
w = 4;
}
- /* Handle packets from the guest device */
- /* XXX Documentation? */
+ /*
+ * Handle packets from the guest device. See:
+ * Synaptics PS/2 TouchPad Interfacing Guide, Section 5.1
+ */
if (w == 3 && sc->synhw.capPassthrough) {
*x = ((pb->ipacket[1] & 0x10) ?
pb->ipacket[4] - 256 : pb->ipacket[4]);
@@ -2704,36 +2721,49 @@ proc_synaptics(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *ms,
touchpad_buttons |= MOUSE_BUTTON3DOWN;
if (sc->synhw.capExtended && sc->synhw.capFourButtons) {
- if ((pb->ipacket[3] & 0x01) && (pb->ipacket[0] & 0x01) == 0)
+ if ((pb->ipacket[3] ^ pb->ipacket[0]) & 0x01)
touchpad_buttons |= MOUSE_BUTTON4DOWN;
- if ((pb->ipacket[3] & 0x02) && (pb->ipacket[0] & 0x02) == 0)
+ if ((pb->ipacket[3] ^ pb->ipacket[0]) & 0x02)
touchpad_buttons |= MOUSE_BUTTON5DOWN;
- }
-
- /*
- * In newer pads - bit 0x02 in the third byte of
- * the packet indicates that we have an extended
- * button press.
- */
- /* XXX Documentation? */
- if (pb->ipacket[3] & 0x02) {
- /*
- * if directional_scrolls is not 1, we treat any of
- * the scrolling directions as middle-click.
- */
- if (sc->syninfo.directional_scrolls) {
- if (pb->ipacket[4] & 0x01)
- touchpad_buttons |= MOUSE_BUTTON4DOWN;
- if (pb->ipacket[5] & 0x01)
- touchpad_buttons |= MOUSE_BUTTON5DOWN;
- if (pb->ipacket[4] & 0x02)
- touchpad_buttons |= MOUSE_BUTTON6DOWN;
- if (pb->ipacket[5] & 0x02)
- touchpad_buttons |= MOUSE_BUTTON7DOWN;
- } else {
- if ((pb->ipacket[4] & 0x0F) ||
- (pb->ipacket[5] & 0x0F))
+ } else if (sc->synhw.capExtended && sc->synhw.capMiddle) {
+ /* Middle Button */
+ if ((pb->ipacket[0] ^ pb->ipacket[3]) & 0x01)
+ touchpad_buttons |= MOUSE_BUTTON2DOWN;
+ } else if (sc->synhw.capExtended && (sc->synhw.nExtendedButtons > 0)) {
+ /* Extended Buttons */
+ if ((pb->ipacket[0] ^ pb->ipacket[3]) & 0x02) {
+ if (sc->syninfo.directional_scrolls) {
+ if (pb->ipacket[4] & 0x01)
+ touchpad_buttons |= MOUSE_BUTTON4DOWN;
+ if (pb->ipacket[5] & 0x01)
+ touchpad_buttons |= MOUSE_BUTTON5DOWN;
+ if (pb->ipacket[4] & 0x02)
+ touchpad_buttons |= MOUSE_BUTTON6DOWN;
+ if (pb->ipacket[5] & 0x02)
+ touchpad_buttons |= MOUSE_BUTTON7DOWN;
+ } else {
touchpad_buttons |= MOUSE_BUTTON2DOWN;
+ }
+
+ /*
+ * Zero out bits used by extended buttons to avoid
+ * misinterpretation of the data absolute position.
+ *
+ * The bits represented by
+ *
+ * (nExtendedButtons + 1) >> 1
+ *
+ * will be masked out in both bytes.
+ * The mask for n bits is computed with the formula
+ *
+ * (1 << n) - 1
+ */
+ int maskedbits = 0;
+ int mask = 0;
+ maskedbits = (sc->synhw.nExtendedButtons + 1) >> 1;
+ mask = (1 << maskedbits) - 1;
+ pb->ipacket[4] &= ~(mask);
+ pb->ipacket[5] &= ~(mask);
}
}
@@ -4440,15 +4470,20 @@ enable_synaptics(KBDC kbdc, struct psm_softc *sc)
buttons = 0;
synhw.capExtended = (status[0] & 0x80) != 0;
if (synhw.capExtended) {
- synhw.capPassthrough = (status[2] & 0x80) != 0;
- synhw.capSleep = (status[2] & 0x10) != 0;
- synhw.capFourButtons = (status[2] & 0x08) != 0;
- synhw.capMultiFinger = (status[2] & 0x02) != 0;
- synhw.capPalmDetect = (status[2] & 0x01) != 0;
+ synhw.nExtendedQueries = (status[0] & 0x70) != 0;
+ synhw.capMiddle = (status[0] & 0x04) != 0;
+ synhw.capPassthrough = (status[2] & 0x80) != 0;
+ synhw.capSleep = (status[2] & 0x10) != 0;
+ synhw.capFourButtons = (status[2] & 0x08) != 0;
+ synhw.capMultiFinger = (status[2] & 0x02) != 0;
+ synhw.capPalmDetect = (status[2] & 0x01) != 0;
if (verbose >= 2) {
printf(" Extended capabilities:\n");
printf(" capExtended: %d\n", synhw.capExtended);
+ printf(" capMiddle: %d\n", synhw.capMiddle);
+ printf(" nExtendedQueries: %d\n",
+ synhw.nExtendedQueries);
printf(" capPassthrough: %d\n", synhw.capPassthrough);
printf(" capSleep: %d\n", synhw.capSleep);
printf(" capFourButtons: %d\n", synhw.capFourButtons);
@@ -4457,16 +4492,27 @@ enable_synaptics(KBDC kbdc, struct psm_softc *sc)
}
/*
- * If we have bits set in status[0] & 0x70, then we can load
+ * If nExtendedQueries is 1 or greater, then the TouchPad
+ * supports this number of extended queries. We can load
* more information about buttons using query 0x09.
*/
- if ((status[0] & 0x70) != 0) {
+ if (synhw.capExtended && synhw.nExtendedQueries) {
if (mouse_ext_command(kbdc, 0x09) == 0)
return (FALSE);
if (get_mouse_status(kbdc, status, 0, 3) != 3)
return (FALSE);
- buttons = (status[1] & 0xf0) >> 4;
+ synhw.nExtendedButtons = (status[1] & 0xf0) >> 4;
+ /*
+ * Add the number of extended buttons to the total
+ * button support count, including the middle button
+ * if capMiddle support bit is set.
+ */
+ buttons = synhw.nExtendedButtons + synhw.capMiddle;
} else
+ /*
+ * If the capFourButtons support bit is set,
+ * add a fourth button to the total button count.
+ */
buttons = synhw.capFourButtons ? 1 : 0;
}
if (verbose >= 2) {
@@ -4477,6 +4523,12 @@ enable_synaptics(KBDC kbdc, struct psm_softc *sc)
}
/*
+ * Add the default number of 3 buttons to the total
+ * count of supported buttons reported above.
+ */
+ buttons += 3;
+
+ /*
* Read the mode byte.
*
* XXX: Note the Synaptics documentation also defines the first
@@ -4503,7 +4555,6 @@ enable_synaptics(KBDC kbdc, struct psm_softc *sc)
/* "Commit" the Set Mode Byte command sent above. */
set_mouse_sampling_rate(kbdc, 20);
- buttons += 3;
VLOG(3, (LOG_DEBUG, "synaptics: END init (%d buttons)\n", buttons));
if (sc != NULL) {
diff --git a/sys/dev/cfi/cfi_bus_nexus.c b/sys/dev/cfi/cfi_bus_nexus.c
index 1b317e6..4e1fa4e 100644
--- a/sys/dev/cfi/cfi_bus_nexus.c
+++ b/sys/dev/cfi/cfi_bus_nexus.c
@@ -4,6 +4,11 @@
* Copyright (c) 2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
+ * programme.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
diff --git a/sys/dev/cfi/cfi_core.c b/sys/dev/cfi/cfi_core.c
index 083f5fc..f318ebc 100644
--- a/sys/dev/cfi/cfi_core.c
+++ b/sys/dev/cfi/cfi_core.c
@@ -1,7 +1,13 @@
/*-
* Copyright (c) 2007, Juniper Networks, Inc.
+ * Copyright (c) 2012-2013, SRI International
* All rights reserved.
*
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
+ * programme.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -49,6 +55,8 @@ __FBSDID("$FreeBSD$");
#include <dev/cfi/cfi_reg.h>
#include <dev/cfi/cfi_var.h>
+static void cfi_add_sysctls(struct cfi_softc *);
+
extern struct cdevsw cfi_cdevsw;
char cfi_driver_name[] = "cfi";
@@ -262,6 +270,7 @@ cfi_attach(device_t dev)
struct cfi_softc *sc;
u_int blksz, blocks;
u_int r, u;
+ uint64_t mtoexp, ttoexp;
#ifdef CFI_SUPPORT_STRATAFLASH
uint64_t ppr;
char name[KENV_MNAMELEN], value[32];
@@ -279,11 +288,79 @@ cfi_attach(device_t dev)
sc->sc_tag = rman_get_bustag(sc->sc_res);
sc->sc_handle = rman_get_bushandle(sc->sc_res);
- /* Get time-out values for erase and write. */
- sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
- sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
- sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
- sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
+ /* Get time-out values for erase, write, and buffer write. */
+ ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
+ mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
+ if (ttoexp == 0) {
+ device_printf(dev, "erase timeout == 0, using 2^16ms\n");
+ ttoexp = 16;
+ }
+ if (ttoexp > 41) {
+ device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
+ return (EINVAL);
+ }
+ if (mtoexp == 0) {
+ device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
+ ttoexp + 4);
+ mtoexp = 4;
+ }
+ if (ttoexp + mtoexp > 41) {
+ device_printf(dev, "insane max erase timeout: 2^%jd\n",
+ ttoexp + mtoexp);
+ return (EINVAL);
+ }
+ sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
+ sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
+ sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
+
+ ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
+ mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
+ if (ttoexp == 0) {
+ device_printf(dev, "write timeout == 0, using 2^18ns\n");
+ ttoexp = 18;
+ }
+ if (ttoexp > 51) {
+ device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
+ return (EINVAL);
+ }
+ if (mtoexp == 0) {
+ device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
+ ttoexp + 4);
+ mtoexp = 4;
+ }
+ if (ttoexp + mtoexp > 51) {
+ device_printf(dev, "insane max write timeout: 2^%jdus\n",
+ ttoexp + mtoexp);
+ return (EINVAL);
+ }
+ sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
+ sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
+ sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
+
+ ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
+ mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
+ /* Don't check for 0, it means not-supported. */
+ if (ttoexp > 51) {
+ device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
+ return (EINVAL);
+ }
+ if (ttoexp + mtoexp > 51) {
+ device_printf(dev, "insane max write timeout: 2^%jdus\n",
+ ttoexp + mtoexp);
+ return (EINVAL);
+ }
+ sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
+ SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
+ sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
+ sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
+ (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
+
+ /* Get the maximum size of a multibyte program */
+ if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
+ sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
+ cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
+ else
+ sc->sc_maxbuf = 0;
/* Get erase regions. */
sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
@@ -317,6 +394,8 @@ cfi_attach(device_t dev)
"%s%u", cfi_driver_name, u);
sc->sc_nod->si_drv1 = sc;
+ cfi_add_sysctls(sc);
+
#ifdef CFI_SUPPORT_STRATAFLASH
/*
* Store the Intel factory PPR in the environment. In some
@@ -337,6 +416,45 @@ cfi_attach(device_t dev)
return (0);
}
+static void
+cfi_add_sysctls(struct cfi_softc *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *children;
+
+ ctx = device_get_sysctl_ctx(sc->sc_dev);
+ children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
+
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ "typical_erase_timout_count",
+ CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
+ 0, "Number of times the typical erase timeout was exceeded");
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ "max_erase_timout_count",
+ CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
+ "Number of times the maximum erase timeout was exceeded");
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ "typical_write_timout_count",
+ CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
+ "Number of times the typical write timeout was exceeded");
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ "max_write_timout_count",
+ CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
+ "Number of times the maximum write timeout was exceeded");
+ if (sc->sc_maxbuf > 0) {
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ "typical_bufwrite_timout_count",
+ CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
+ "Number of times the typical buffered write timeout was "
+ "exceeded");
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ "max_bufwrite_timout_count",
+ CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
+ "Number of times the maximum buffered write timeout was "
+ "exceeded");
+ }
+}
+
int
cfi_detach(device_t dev)
{
@@ -351,17 +469,22 @@ cfi_detach(device_t dev)
}
static int
-cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
+cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
+ enum cfi_wait_cmd cmd)
{
- int done, error;
+ int done, error, tto_exceeded;
uint32_t st0 = 0, st = 0;
+ sbintime_t now;
done = 0;
error = 0;
- timeout *= 10;
- while (!done && !error && timeout) {
- DELAY(100);
- timeout--;
+ tto_exceeded = 0;
+ while (!done && !error) {
+ /*
+ * Save time before we start so we always do one check
+ * after the timeout has expired.
+ */
+ now = sbinuptime();
switch (sc->sc_cmdset) {
case CFI_VEND_INTEL_ECS:
@@ -390,6 +513,25 @@ cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
break;
}
+
+ if (tto_exceeded ||
+ now > start + sc->sc_typical_timeouts[cmd]) {
+ if (!tto_exceeded) {
+ tto_exceeded = 1;
+ sc->sc_tto_counts[cmd]++;
+#ifdef CFI_DEBUG_TIMEOUT
+ device_printf(sc->sc_dev,
+ "typical timeout exceeded (cmd %d)", cmd);
+#endif
+ }
+ if (now > start + sc->sc_max_timeouts[cmd]) {
+ sc->sc_mto_counts[cmd]++;
+#ifdef CFI_DEBUG_TIMEOUT
+ device_printf(sc->sc_dev,
+ "max timeout exceeded (cmd %d)", cmd);
+#endif
+ }
+ }
}
if (!done && !error)
error = ETIMEDOUT;
@@ -405,9 +547,12 @@ cfi_write_block(struct cfi_softc *sc)
uint8_t *x8;
uint16_t *x16;
uint32_t *x32;
- } ptr;
+ } ptr, cpyprt;
register_t intr;
- int error, i;
+ int error, i, neederase = 0;
+ uint32_t st;
+ u_int wlen;
+ sbintime_t start;
/* Intel flash must be unlocked before modification */
switch (sc->sc_cmdset) {
@@ -419,31 +564,124 @@ cfi_write_block(struct cfi_softc *sc)
break;
}
- /* Erase the block. */
- switch (sc->sc_cmdset) {
- case CFI_VEND_INTEL_ECS:
- case CFI_VEND_INTEL_SCS:
- cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
- cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
- break;
- case CFI_VEND_AMD_SCS:
- case CFI_VEND_AMD_ECS:
- cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
- CFI_AMD_ERASE_SECTOR);
- cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
- break;
- default:
- /* Better safe than sorry... */
- return (ENODEV);
- }
- error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout);
- if (error)
- goto out;
+ /* Check if an erase is required. */
+ for (i = 0; i < sc->sc_wrbufsz; i++)
+ if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
+ neederase = 1;
+ break;
+ }
+
+ if (neederase) {
+ intr = intr_disable();
+ start = sbinuptime();
+ /* Erase the block. */
+ switch (sc->sc_cmdset) {
+ case CFI_VEND_INTEL_ECS:
+ case CFI_VEND_INTEL_SCS:
+ cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
+ cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
+ break;
+ case CFI_VEND_AMD_SCS:
+ case CFI_VEND_AMD_ECS:
+ cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
+ CFI_AMD_ERASE_SECTOR);
+ cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
+ break;
+ default:
+ /* Better safe than sorry... */
+ intr_restore(intr);
+ return (ENODEV);
+ }
+ intr_restore(intr);
+ error = cfi_wait_ready(sc, sc->sc_wrofs, start,
+ CFI_TIMEOUT_ERASE);
+ if (error)
+ goto out;
+ } else
+ error = 0;
- /* Write the block. */
+ /* Write the block using a multibyte write if supported. */
ptr.x8 = sc->sc_wrbuf;
+ cpyprt.x8 = sc->sc_wrbufcpy;
+ if (sc->sc_maxbuf > sc->sc_width) {
+ switch (sc->sc_cmdset) {
+ case CFI_VEND_INTEL_ECS:
+ case CFI_VEND_INTEL_SCS:
+ for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
+ wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
+
+ intr = intr_disable();
+
+ start = sbinuptime();
+ do {
+ cfi_write(sc, sc->sc_wrofs + i,
+ CFI_BCS_BUF_PROG_SETUP);
+ if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
+ error = ETIMEDOUT;
+ goto out;
+ }
+ st = cfi_read(sc, sc->sc_wrofs + i);
+ } while (! (st & CFI_INTEL_STATUS_WSMS));
+
+ cfi_write(sc, sc->sc_wrofs + i,
+ (wlen / sc->sc_width) - 1);
+ switch (sc->sc_width) {
+ case 1:
+ bus_space_write_region_1(sc->sc_tag,
+ sc->sc_handle, sc->sc_wrofs + i,
+ ptr.x8 + i, wlen);
+ break;
+ case 2:
+ bus_space_write_region_2(sc->sc_tag,
+ sc->sc_handle, sc->sc_wrofs + i,
+ ptr.x16 + i / 2, wlen / 2);
+ break;
+ case 4:
+ bus_space_write_region_4(sc->sc_tag,
+ sc->sc_handle, sc->sc_wrofs + i,
+ ptr.x32 + i / 4, wlen / 4);
+ break;
+ }
+
+ cfi_write(sc, sc->sc_wrofs + i,
+ CFI_BCS_CONFIRM);
+
+ intr_restore(intr);
+
+ error = cfi_wait_ready(sc, sc->sc_wrofs + i,
+ start, CFI_TIMEOUT_BUFWRITE);
+ if (error != 0)
+ goto out;
+ }
+ goto out;
+ default:
+ /* Fall through to single word case */
+ break;
+ }
+
+ }
+
+ /* Write the block one byte/word at a time. */
for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
+ /* Avoid writing unless we are actually changing bits */
+ if (!neederase) {
+ switch (sc->sc_width) {
+ case 1:
+ if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
+ continue;
+ break;
+ case 2:
+ if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
+ continue;
+ break;
+ case 4:
+ if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
+ continue;
+ break;
+ }
+ }
+
/*
* Make sure the command to start a write and the
* actual write happens back-to-back without any
@@ -451,6 +689,7 @@ cfi_write_block(struct cfi_softc *sc)
*/
intr = intr_disable();
+ start = sbinuptime();
switch (sc->sc_cmdset) {
case CFI_VEND_INTEL_ECS:
case CFI_VEND_INTEL_SCS:
@@ -464,21 +703,22 @@ cfi_write_block(struct cfi_softc *sc)
switch (sc->sc_width) {
case 1:
bus_space_write_1(sc->sc_tag, sc->sc_handle,
- sc->sc_wrofs + i, *(ptr.x8)++);
+ sc->sc_wrofs + i, *(ptr.x8 + i));
break;
case 2:
bus_space_write_2(sc->sc_tag, sc->sc_handle,
- sc->sc_wrofs + i, *(ptr.x16)++);
+ sc->sc_wrofs + i, *(ptr.x16 + i / 2));
break;
case 4:
bus_space_write_4(sc->sc_tag, sc->sc_handle,
- sc->sc_wrofs + i, *(ptr.x32)++);
+ sc->sc_wrofs + i, *(ptr.x32 + i / 4));
break;
}
-
+
intr_restore(intr);
- error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout);
+ error = cfi_wait_ready(sc, sc->sc_wrofs, start,
+ CFI_TIMEOUT_WRITE);
if (error)
goto out;
}
@@ -576,6 +816,7 @@ cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
#ifdef CFI_ARMEDANDDANGEROUS
register_t intr;
int i, error;
+ sbintime_t start;
#endif
if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
@@ -585,11 +826,12 @@ cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
#ifdef CFI_ARMEDANDDANGEROUS
for (i = 7; i >= 4; i--, id >>= 16) {
intr = intr_disable();
+ start = sbinuptime();
cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
intr_restore(intr);
- error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS,
- sc->sc_write_timeout);
+ error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
+ CFI_TIMEOUT_WRITE);
if (error)
break;
}
@@ -629,6 +871,7 @@ cfi_intel_set_plr(struct cfi_softc *sc)
#ifdef CFI_ARMEDANDDANGEROUS
register_t intr;
int error;
+ sbintime_t start;
#endif
if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
return EOPNOTSUPP;
@@ -638,10 +881,12 @@ cfi_intel_set_plr(struct cfi_softc *sc)
/* worthy of console msg */
device_printf(sc->sc_dev, "set PLR\n");
intr = intr_disable();
+ binuptime(&start);
cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
intr_restore(intr);
- error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout);
+ error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
+ CFI_TIMEOUT_WRITE);
cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
return error;
#else
diff --git a/sys/dev/cfi/cfi_dev.c b/sys/dev/cfi/cfi_dev.c
index d511eac..7d1f92b 100644
--- a/sys/dev/cfi/cfi_dev.c
+++ b/sys/dev/cfi/cfi_dev.c
@@ -1,7 +1,13 @@
/*-
* Copyright (c) 2007, Juniper Networks, Inc.
+ * Copyright (c) 2012-2013, SRI International
* All rights reserved.
*
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
+ * programme.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -72,7 +78,8 @@ struct cdevsw cfi_cdevsw = {
* Begin writing into a new block/sector. We read the sector into
* memory and keep updating that, until we move into another sector
* or the process stops writing. At that time we write the whole
- * sector to flash (see cfi_block_finish).
+ * sector to flash (see cfi_block_finish). To avoid unneeded erase
+ * cycles, keep a pristine copy of the sector on hand.
*/
int
cfi_block_start(struct cfi_softc *sc, u_int ofs)
@@ -116,6 +123,8 @@ cfi_block_start(struct cfi_softc *sc, u_int ofs)
break;
}
}
+ sc->sc_wrbufcpy = malloc(sc->sc_wrbufsz, M_TEMP, M_WAITOK);
+ memcpy(sc->sc_wrbufcpy, sc->sc_wrbuf, sc->sc_wrbufsz);
sc->sc_writing = 1;
return (0);
}
@@ -131,6 +140,7 @@ cfi_block_finish(struct cfi_softc *sc)
error = cfi_write_block(sc);
free(sc->sc_wrbuf, M_TEMP);
+ free(sc->sc_wrbufcpy, M_TEMP);
sc->sc_wrbuf = NULL;
sc->sc_wrbufsz = 0;
sc->sc_wrofs = 0;
diff --git a/sys/dev/cfi/cfi_disk.c b/sys/dev/cfi/cfi_disk.c
index f5bcb1b..7980722 100644
--- a/sys/dev/cfi/cfi_disk.c
+++ b/sys/dev/cfi/cfi_disk.c
@@ -1,7 +1,13 @@
/*-
* Copyright (c) 2009 Sam Leffler, Errno Consulting
+ * Copyright (c) 2012-2013, SRI International
* All rights reserved.
*
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
+ * programme.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
diff --git a/sys/dev/cfi/cfi_reg.h b/sys/dev/cfi/cfi_reg.h
index 7c22211..c810e3f 100644
--- a/sys/dev/cfi/cfi_reg.h
+++ b/sys/dev/cfi/cfi_reg.h
@@ -1,7 +1,13 @@
/*-
* Copyright (c) 2007, Juniper Networks, Inc.
+ * Copyright (c) 2012-2013, SRI International
* All rights reserved.
*
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
+ * programme.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -44,8 +50,8 @@ struct cfi_qry {
u_char max_vcc;
u_char min_vpp;
u_char max_vpp;
- u_char tto_byte_write; /* 2**n milliseconds. */
- u_char tto_buf_write; /* 2**n milliseconds. */
+ u_char tto_byte_write; /* 2**n microseconds. */
+ u_char tto_buf_write; /* 2**n microseconds. */
u_char tto_block_erase; /* 2**n milliseconds. */
u_char tto_chip_erase; /* 2**n milliseconds. */
u_char mto_byte_write; /* 2**n times typical t/o. */
@@ -70,12 +76,15 @@ struct cfi_qry {
#define CFI_QRY_VEND offsetof(struct cfi_qry, pri_vend)
#define CFI_QRY_TTO_WRITE offsetof(struct cfi_qry, tto_byte_write)
+#define CFI_QRY_TTO_BUFWRITE offsetof(struct cfi_qry, tto_buf_write)
#define CFI_QRY_TTO_ERASE offsetof(struct cfi_qry, tto_block_erase)
#define CFI_QRY_MTO_WRITE offsetof(struct cfi_qry, mto_byte_write)
+#define CFI_QRY_MTO_BUFWRITE offsetof(struct cfi_qry, mto_buf_write)
#define CFI_QRY_MTO_ERASE offsetof(struct cfi_qry, mto_block_erase)
#define CFI_QRY_SIZE offsetof(struct cfi_qry, size)
#define CFI_QRY_IFACE offsetof(struct cfi_qry, iface)
+#define CFI_QRY_MAXBUF offsetof(struct cfi_qry, max_buf_write_size)
#define CFI_QRY_NREGIONS offsetof(struct cfi_qry, nregions)
#define CFI_QRY_REGION0 offsetof(struct cfi_qry, region)
#define CFI_QRY_REGION(x) (CFI_QRY_REGION0 + (x) * 4)
@@ -102,6 +111,7 @@ struct cfi_qry {
#define CFI_BCS_ERASE_SUSPEND 0xb0
#define CFI_BCS_ERASE_RESUME 0xd0 /* Equals CONFIRM */
#define CFI_BCS_CONFIRM 0xd0
+#define CFI_BCS_BUF_PROG_SETUP 0xe8
#define CFI_BCS_READ_ARRAY 0xff
/* Intel commands. */
diff --git a/sys/dev/cfi/cfi_var.h b/sys/dev/cfi/cfi_var.h
index 15c7769..e218a4d 100644
--- a/sys/dev/cfi/cfi_var.h
+++ b/sys/dev/cfi/cfi_var.h
@@ -1,7 +1,13 @@
/*-
* Copyright (c) 2007, Juniper Networks, Inc.
+ * Copyright (c) 2012-2013, SRI International
* All rights reserved.
*
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
+ * programme.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -32,6 +38,12 @@
#ifndef _DEV_CFI_VAR_H_
#define _DEV_CFI_VAR_H_
+enum cfi_wait_cmd {
+ CFI_TIMEOUT_ERASE,
+ CFI_TIMEOUT_WRITE,
+ CFI_TIMEOUT_BUFWRITE
+};
+
struct cfi_region {
u_int r_blocks;
u_int r_blksz;
@@ -51,13 +63,18 @@ struct cfi_softc {
struct cfi_region *sc_region; /* Array of region info. */
u_int sc_cmdset;
- u_int sc_erase_timeout;
- u_int sc_write_timeout;
+ sbintime_t sc_typical_timeouts[3];
+ sbintime_t sc_max_timeouts[3];
+ u_int sc_tto_counts[3];
+ u_int sc_mto_counts[3];
+
+ u_int sc_maxbuf;
struct cdev *sc_nod;
struct proc *sc_opened; /* Process that has us opened. */
u_char *sc_wrbuf;
+ u_char *sc_wrbufcpy;
u_int sc_wrbufsz;
u_int sc_wrofs;
u_int sc_writing;
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index 9e1dc80..17f4adb 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -1007,7 +1007,7 @@ calc_opt2p(struct adapter *sc, struct port_info *pi, int rxqid,
opt2 |= F_TSTAMPS_EN;
if (tcpopt->sack)
opt2 |= F_SACK_EN;
- if (tcpopt->wsf > 0)
+ if (tcpopt->wsf <= 14)
opt2 |= F_WND_SCALE_EN;
}
diff --git a/sys/dev/filemon/filemon.c b/sys/dev/filemon/filemon.c
index ce84e3d..e3fda18 100644
--- a/sys/dev/filemon/filemon.c
+++ b/sys/dev/filemon/filemon.c
@@ -138,12 +138,6 @@ filemon_dtr(void *data)
}
}
-#if __FreeBSD_version < 900041
-#define FGET_WRITE(a1, a2, a3) fget_write((a1), (a2), (a3))
-#else
-#define FGET_WRITE(a1, a2, a3) fget_write((a1), (a2), CAP_WRITE | CAP_SEEK, (a3))
-#endif
-
static int
filemon_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag __unused,
struct thread *td)
@@ -151,13 +145,21 @@ filemon_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag __unused,
int error = 0;
struct filemon *filemon;
struct proc *p;
+#if __FreeBSD_version >= 900041
+ cap_rights_t rights;
+#endif
devfs_get_cdevpriv((void **) &filemon);
switch (cmd) {
/* Set the output file descriptor. */
case FILEMON_SET_FD:
- if ((error = FGET_WRITE(td, *(int *)data, &filemon->fp)) == 0)
+ error = fget_write(td, *(int *)data,
+#if __FreeBSD_version >= 900041
+ cap_rights_init(&rights, CAP_PWRITE),
+#endif
+ &filemon->fp);
+ if (error == 0)
/* Write the file header. */
filemon_comment(filemon);
break;
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index d61f7aa..6abb10c 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -131,7 +131,7 @@ gpiobus_parse_pins(struct gpiobus_softc *sc, device_t child, int mask)
}
if (npins == 0) {
- device_printf(child, "empty pin mask");
+ device_printf(child, "empty pin mask\n");
return (EINVAL);
}
diff --git a/sys/dev/gxemul/cons/gxemul_cons.c b/sys/dev/gxemul/cons/gxemul_cons.c
index b83aa94..cb3b000 100644
--- a/sys/dev/gxemul/cons/gxemul_cons.c
+++ b/sys/dev/gxemul/cons/gxemul_cons.c
@@ -99,18 +99,16 @@ static void gxemul_cons_timeout(void *);
* XXXRW: Should be using FreeBSD's bus routines here, but they are not
* available until later in the boot.
*/
-typedef uint64_t paddr_t;
-typedef uint64_t vaddr_t;
-static inline vaddr_t
-mips_phys_to_uncached(paddr_t phys)
+static inline vm_offset_t
+mips_phys_to_uncached(vm_paddr_t phys)
{
return (MIPS_PHYS_TO_DIRECT_UNCACHED(phys));
}
static inline uint8_t
-mips_ioread_uint8(vaddr_t vaddr)
+mips_ioread_uint8(vm_offset_t vaddr)
{
uint8_t v;
@@ -119,7 +117,7 @@ mips_ioread_uint8(vaddr_t vaddr)
}
static inline void
-mips_iowrite_uint8(vaddr_t vaddr, uint8_t v)
+mips_iowrite_uint8(vm_offset_t vaddr, uint8_t v)
{
__asm__ __volatile__ ("sb %0, 0(%1)" : : "r" (v), "r" (vaddr));
diff --git a/sys/dev/gxemul/disk/gxemul_disk.c b/sys/dev/gxemul/disk/gxemul_disk.c
index 8cf52e4..3b7e649 100644
--- a/sys/dev/gxemul/disk/gxemul_disk.c
+++ b/sys/dev/gxemul/disk/gxemul_disk.c
@@ -214,7 +214,14 @@ gxemul_disk_read(unsigned diskid, void *buf, off_t off)
if (off < 0 || off % GXEMUL_DISK_DEV_BLOCKSIZE != 0)
return (EINVAL);
+#ifdef _LP64
GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_OFFSET, (uint64_t)off);
+#else
+ GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_OFFSET_LO,
+ (uint32_t)(off & 0xffffffff));
+ GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_OFFSET_HI,
+ (uint32_t)((off >> 32) & 0xffffffff));
+#endif
GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_DISKID, diskid);
GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_START, GXEMUL_DISK_DEV_START_READ);
switch (GXEMUL_DISK_DEV_READ(GXEMUL_DISK_DEV_STATUS)) {
@@ -280,7 +287,15 @@ gxemul_disk_write(unsigned diskid, const void *buf, off_t off)
if (off < 0 || off % GXEMUL_DISK_DEV_BLOCKSIZE != 0)
return (EINVAL);
+#ifdef _LP64
GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_OFFSET, (uint64_t)off);
+#else
+ GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_OFFSET_LO,
+ (uint32_t)(off & 0xffffffff));
+ GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_OFFSET_HI,
+ (uint32_t)((off >> 32) & 0xffffffff));
+#endif
+
GXEMUL_DISK_DEV_WRITE(GXEMUL_DISK_DEV_DISKID, diskid);
dst = GXEMUL_DISK_DEV_FUNCTION(GXEMUL_DISK_DEV_BLOCK);
diff --git a/sys/dev/gxemul/disk/gxemul_diskreg.h b/sys/dev/gxemul/disk/gxemul_diskreg.h
index c3460e5..f837944 100644
--- a/sys/dev/gxemul/disk/gxemul_diskreg.h
+++ b/sys/dev/gxemul/disk/gxemul_diskreg.h
@@ -36,16 +36,28 @@
#define GXEMUL_DISK_DEV_ID_START (0x0000)
#define GXEMUL_DISK_DEV_ID_END (0x0100)
-#define GXEMUL_DISK_DEV_OFFSET (0x0000)
+#ifdef _LP64
+#define GXEMUL_DISK_DEV_OFFSET (0x0000)
+#else
+#define GXEMUL_DISK_DEV_OFFSET_LO (0x0000)
+#define GXEMUL_DISK_DEV_OFFSET_HI (0x0008)
+#endif
#define GXEMUL_DISK_DEV_DISKID (0x0010)
#define GXEMUL_DISK_DEV_START (0x0020)
#define GXEMUL_DISK_DEV_STATUS (0x0030)
#define GXEMUL_DISK_DEV_BLOCK (0x4000)
+#ifdef _LP64
#define GXEMUL_DISK_DEV_FUNCTION(f) \
(volatile uint64_t *)MIPS_PHYS_TO_DIRECT_UNCACHED(GXEMUL_DISK_DEV_BASE + (f))
#define GXEMUL_DISK_DEV_READ(f) \
(volatile uint64_t)*GXEMUL_DISK_DEV_FUNCTION(f)
+#else
+#define GXEMUL_DISK_DEV_FUNCTION(f) \
+ (volatile uint32_t *)MIPS_PHYS_TO_DIRECT_UNCACHED(GXEMUL_DISK_DEV_BASE + (f))
+#define GXEMUL_DISK_DEV_READ(f) \
+ (volatile uint32_t)*GXEMUL_DISK_DEV_FUNCTION(f)
+#endif
#define GXEMUL_DISK_DEV_WRITE(f, v) \
*GXEMUL_DISK_DEV_FUNCTION(f) = (v)
diff --git a/sys/dev/gxemul/ether/gxreg.h b/sys/dev/gxemul/ether/gxreg.h
index e67f43d..a528250 100644
--- a/sys/dev/gxemul/ether/gxreg.h
+++ b/sys/dev/gxemul/ether/gxreg.h
@@ -40,10 +40,17 @@
#define GXEMUL_ETHER_DEV_COMMAND (0x4020)
#define GXEMUL_ETHER_DEV_MAC (0x4040)
+#ifdef _LP64
#define GXEMUL_ETHER_DEV_FUNCTION(f) \
(volatile uint64_t *)MIPS_PHYS_TO_DIRECT_UNCACHED(GXEMUL_ETHER_DEV_BASE + (f))
#define GXEMUL_ETHER_DEV_READ(f) \
(volatile uint64_t)*GXEMUL_ETHER_DEV_FUNCTION(f)
+#else
+#define GXEMUL_ETHER_DEV_FUNCTION(f) \
+ (volatile uint32_t *)MIPS_PHYS_TO_DIRECT_UNCACHED(GXEMUL_ETHER_DEV_BASE + (f))
+#define GXEMUL_ETHER_DEV_READ(f) \
+ (volatile uint32_t)*GXEMUL_ETHER_DEV_FUNCTION(f)
+#endif
#define GXEMUL_ETHER_DEV_WRITE(f, v) \
*GXEMUL_ETHER_DEV_FUNCTION(f) = (v)
diff --git a/sys/dev/hwpmc/hwpmc_logging.c b/sys/dev/hwpmc/hwpmc_logging.c
index 880bcaa..a60e096 100644
--- a/sys/dev/hwpmc/hwpmc_logging.c
+++ b/sys/dev/hwpmc/hwpmc_logging.c
@@ -570,6 +570,7 @@ pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd)
{
int error;
struct proc *p;
+ cap_rights_t rights;
/*
* As long as it is possible to get a LOR between pmc_sx lock and
@@ -593,7 +594,8 @@ pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd)
po->po_file));
/* get a reference to the file state */
- error = fget_write(curthread, logfd, CAP_WRITE, &po->po_file);
+ error = fget_write(curthread, logfd,
+ cap_rights_init(&rights, CAP_WRITE), &po->po_file);
if (error)
goto error;
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 86242d9..8e5eac8 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -2026,11 +2026,7 @@ pmc_allocate_owner_descriptor(struct proc *p)
/* allocate space for N pointers and one descriptor struct */
po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
- po->po_sscount = po->po_error = po->po_flags = po->po_logprocmaps = 0;
- po->po_file = NULL;
po->po_owner = p;
- po->po_kthread = NULL;
- LIST_INIT(&po->po_pmcs);
LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
TAILQ_INIT(&po->po_logbuffers);
@@ -2156,8 +2152,6 @@ pmc_allocate_pmc_descriptor(void)
struct pmc *pmc;
pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
- pmc->pm_owner = NULL;
- LIST_INIT(&pmc->pm_targets);
PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
diff --git a/sys/dev/hwpmc/hwpmc_mpc7xxx.c b/sys/dev/hwpmc/hwpmc_mpc7xxx.c
new file mode 100644
index 0000000..93b5c74
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_mpc7xxx.c
@@ -0,0 +1,748 @@
+/*-
+ * Copyright (c) 2011 Justin Hibbits
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/pmc_mdep.h>
+#include <machine/spr.h>
+#include <machine/cpu.h>
+
+#include "hwpmc_powerpc.h"
+
+#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
+
+#define PPC_SET_PMC1SEL(r, x) ((r & ~(SPR_MMCR0_PMC1SEL(0x3f))) | SPR_MMCR0_PMC1SEL(x))
+#define PPC_SET_PMC2SEL(r, x) ((r & ~(SPR_MMCR0_PMC2SEL(0x3f))) | SPR_MMCR0_PMC2SEL(x))
+#define PPC_SET_PMC3SEL(r, x) ((r & ~(SPR_MMCR1_PMC3SEL(0x1f))) | SPR_MMCR1_PMC3SEL(x))
+#define PPC_SET_PMC4SEL(r, x) ((r & ~(SPR_MMCR1_PMC4SEL(0x1f))) | SPR_MMCR1_PMC4SEL(x))
+#define PPC_SET_PMC5SEL(r, x) ((r & ~(SPR_MMCR1_PMC5SEL(0x1f))) | SPR_MMCR1_PMC5SEL(x))
+#define PPC_SET_PMC6SEL(r, x) ((r & ~(SPR_MMCR1_PMC6SEL(0x3f))) | SPR_MMCR1_PMC6SEL(x))
+
+/* Change this when we support more than just the 7450. */
+#define MPC7XXX_MAX_PMCS 6
+
+#define MPC7XXX_PMC_HAS_OVERFLOWED(x) (mpc7xxx_pmcn_read(x) & (0x1 << 31))
+
+/*
+ * Things to improve on this:
+ * - It stops (clears to 0) the PMC and resets it at every context switch
+ * currently.
+ */
+
+/*
+ * This should work for every 32-bit PowerPC implementation I know of (G3 and G4
+ * specifically).
+ */
+
+struct powerpc_event_code_map {
+ enum pmc_event pe_ev; /* enum value */
+ uint8_t pe_counter_mask; /* Which counter this can be counted in. */
+ uint8_t pe_code; /* numeric code */
+};
+
+#define PPC_PMC_MASK1 0
+#define PPC_PMC_MASK2 1
+#define PPC_PMC_MASK3 2
+#define PPC_PMC_MASK4 3
+#define PPC_PMC_MASK5 4
+#define PPC_PMC_MASK6 5
+#define PPC_PMC_MASK_ALL 0x3f
+#define PMC_POWERPC_EVENT(id, mask, number) \
+ { .pe_ev = PMC_EV_PPC7450_##id, .pe_counter_mask = mask, .pe_code = number }
+
+static struct powerpc_event_code_map powerpc_event_codes[] = {
+ PMC_POWERPC_EVENT(CYCLE,PPC_PMC_MASK_ALL, 1),
+ PMC_POWERPC_EVENT(INSTR_COMPLETED, 0x0f, 2),
+ PMC_POWERPC_EVENT(TLB_BIT_TRANSITIONS, 0x0f, 3),
+ PMC_POWERPC_EVENT(INSTR_DISPATCHED, 0x0f, 4),
+ PMC_POWERPC_EVENT(PMON_EXCEPT, 0x0f, 5),
+ PMC_POWERPC_EVENT(PMON_SIG, 0x0f, 7),
+ PMC_POWERPC_EVENT(VPU_INSTR_COMPLETED, 0x03, 8),
+ PMC_POWERPC_EVENT(VFPU_INSTR_COMPLETED, 0x03, 9),
+ PMC_POWERPC_EVENT(VIU1_INSTR_COMPLETED, 0x03, 10),
+ PMC_POWERPC_EVENT(VIU2_INSTR_COMPLETED, 0x03, 11),
+ PMC_POWERPC_EVENT(MTVSCR_INSTR_COMPLETED, 0x03, 12),
+ PMC_POWERPC_EVENT(MTVRSAVE_INSTR_COMPLETED, 0x03, 13),
+ PMC_POWERPC_EVENT(VPU_INSTR_WAIT_CYCLES, 0x03, 14),
+ PMC_POWERPC_EVENT(VFPU_INSTR_WAIT_CYCLES, 0x03, 15),
+ PMC_POWERPC_EVENT(VIU1_INSTR_WAIT_CYCLES, 0x03, 16),
+ PMC_POWERPC_EVENT(VIU2_INSTR_WAIT_CYCLES, 0x03, 17),
+ PMC_POWERPC_EVENT(MFVSCR_SYNC_CYCLES, 0x03, 18),
+ PMC_POWERPC_EVENT(VSCR_SAT_SET, 0x03, 19),
+ PMC_POWERPC_EVENT(STORE_INSTR_COMPLETED, 0x03, 20),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_MISSES, 0x03, 21),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOPS, 0x03, 22),
+ PMC_POWERPC_EVENT(UNRESOLVED_BRANCHES, 0x01, 23),
+ PMC_POWERPC_EVENT(SPEC_BUFFER_CYCLES, 0x01, 24),
+ PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_CYCLES, 0x01, 25),
+ PMC_POWERPC_EVENT(TRUE_BRANCH_TARGET_HITS, 0x01, 26),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STAC_PREDICTED, 0x01, 27),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_DISPATCHES, 0x01, 28),
+ PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_DISPATCHED, 0x01, 29),
+ PMC_POWERPC_EVENT(THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 30),
+ PMC_POWERPC_EVENT(THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 31),
+ PMC_POWERPC_EVENT(CYCLES_NO_COMPLETED_INSTRS, 0x01, 32),
+ PMC_POWERPC_EVENT(IU2_INSTR_COMPLETED, 0x01, 33),
+ PMC_POWERPC_EVENT(BRANCHES_COMPLETED, 0x01, 34),
+ PMC_POWERPC_EVENT(EIEIO_INSTR_COMPLETED, 0x01, 35),
+ PMC_POWERPC_EVENT(MTSPR_INSTR_COMPLETED, 0x01, 36),
+ PMC_POWERPC_EVENT(SC_INSTR_COMPLETED, 0x01, 37),
+ PMC_POWERPC_EVENT(LS_LM_COMPLETED, 0x01, 38),
+ PMC_POWERPC_EVENT(ITLB_HW_TABLE_SEARCH_CYCLES, 0x01, 39),
+ PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x01, 40),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_ACCESSES, 0x01, 41),
+ PMC_POWERPC_EVENT(INSTR_BKPT_MATCHES, 0x01, 42),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD, 0x01, 43),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_ON_MODIFIED, 0x01, 44),
+ PMC_POWERPC_EVENT(LOAD_MISS_ALIAS, 0x01, 45),
+ PMC_POWERPC_EVENT(LOAD_MISS_ALIAS_ON_TOUCH, 0x01, 46),
+ PMC_POWERPC_EVENT(TOUCH_ALIAS, 0x01, 47),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT_QUEUE, 0x01, 48),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT, 0x01, 49),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HITS, 0x01, 50),
+ PMC_POWERPC_EVENT(WRITE_THROUGH_STORES, 0x01, 51),
+ PMC_POWERPC_EVENT(CACHE_INHIBITED_STORES, 0x01, 52),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_HIT, 0x01, 53),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_HIT, 0x01, 54),
+ PMC_POWERPC_EVENT(L1_DATA_STORE_HIT, 0x01, 55),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_HITS, 0x01, 56),
+ PMC_POWERPC_EVENT(DST_INSTR_DISPATCHED, 0x01, 57),
+ PMC_POWERPC_EVENT(REFRESHED_DSTS, 0x01, 58),
+ PMC_POWERPC_EVENT(SUCCESSFUL_DST_TABLE_SEARCHES, 0x01, 59),
+ PMC_POWERPC_EVENT(DSS_INSTR_COMPLETED, 0x01, 60),
+ PMC_POWERPC_EVENT(DST_STREAM_0_CACHE_LINE_FETCHES, 0x01, 61),
+ PMC_POWERPC_EVENT(VTQ_SUSPENDS_DUE_TO_CTX_CHANGE, 0x01, 62),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH_HIT, 0x01, 63),
+ PMC_POWERPC_EVENT(VEC_LOAD_INSTR_COMPLETED, 0x01, 64),
+ PMC_POWERPC_EVENT(FP_STORE_INSTR_COMPLETED_IN_LSU, 0x01, 65),
+ PMC_POWERPC_EVENT(FPU_RENORMALIZATION, 0x01, 66),
+ PMC_POWERPC_EVENT(FPU_DENORMALIZATION, 0x01, 67),
+ PMC_POWERPC_EVENT(FP_STORE_CAUSES_STALL_IN_LSU, 0x01, 68),
+ PMC_POWERPC_EVENT(LD_ST_TRUE_ALIAS_STALL, 0x01, 70),
+ PMC_POWERPC_EVENT(LSU_INDEXED_ALIAS_STALL, 0x01, 71),
+ PMC_POWERPC_EVENT(LSU_ALIAS_VS_FSQ_WB0_WB1, 0x01, 72),
+ PMC_POWERPC_EVENT(LSU_ALIAS_VS_CSQ, 0x01, 73),
+ PMC_POWERPC_EVENT(LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0, 0x01, 74),
+ PMC_POWERPC_EVENT(LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0, 0x01, 75),
+ PMC_POWERPC_EVENT(LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1, 0x01, 76),
+ PMC_POWERPC_EVENT(LSU_TOUCH_ALIAS_VS_CSQ, 0x01, 77),
+ PMC_POWERPC_EVENT(LSU_LMQ_FULL_STALL, 0x01, 78),
+ PMC_POWERPC_EVENT(FP_LOAD_INSTR_COMPLETED_IN_LSU, 0x01, 79),
+ PMC_POWERPC_EVENT(FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU, 0x01, 80),
+ PMC_POWERPC_EVENT(FP_LOAD_DOUBLE_COMPLETED_IN_LSU, 0x01, 81),
+ PMC_POWERPC_EVENT(LSU_RA_LATCH_STALL, 0x01, 82),
+ PMC_POWERPC_EVENT(LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL, 0x01, 83),
+ PMC_POWERPC_EVENT(LSU_LMQ_INDEX_ALIAS, 0x01, 84),
+ PMC_POWERPC_EVENT(LSU_STORE_QUEUE_INDEX_ALIAS, 0x01, 85),
+ PMC_POWERPC_EVENT(LSU_CSQ_FORWARDING, 0x01, 86),
+ PMC_POWERPC_EVENT(LSU_MISALIGNED_LOAD_FINISH, 0x01, 87),
+ PMC_POWERPC_EVENT(LSU_MISALIGN_STORE_COMPLETED, 0x01, 88),
+ PMC_POWERPC_EVENT(LSU_MISALIGN_STALL, 0x01, 89),
+ PMC_POWERPC_EVENT(FP_ONE_QUARTER_FPSCR_RENAMES_BUSY, 0x01, 90),
+ PMC_POWERPC_EVENT(FP_ONE_HALF_FPSCR_RENAMES_BUSY, 0x01, 91),
+ PMC_POWERPC_EVENT(FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY, 0x01, 92),
+ PMC_POWERPC_EVENT(FP_ALL_FPSCR_RENAMES_BUSY, 0x01, 93),
+ PMC_POWERPC_EVENT(FP_DENORMALIZED_RESULT, 0x01, 94),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISSES, 0x02, 23),
+ PMC_POWERPC_EVENT(DISPATCHES_TO_FPR_ISSUE_QUEUE, 0x02, 24),
+ PMC_POWERPC_EVENT(LSU_INSTR_COMPLETED, 0x02, 25),
+ PMC_POWERPC_EVENT(LOAD_INSTR_COMPLETED, 0x02, 26),
+ PMC_POWERPC_EVENT(SS_SM_INSTR_COMPLETED, 0x02, 27),
+ PMC_POWERPC_EVENT(TLBIE_INSTR_COMPLETED, 0x02, 28),
+ PMC_POWERPC_EVENT(LWARX_INSTR_COMPLETED, 0x02, 29),
+ PMC_POWERPC_EVENT(MFSPR_INSTR_COMPLETED, 0x02, 30),
+ PMC_POWERPC_EVENT(REFETCH_SERIALIZATION, 0x02, 31),
+ PMC_POWERPC_EVENT(COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD, 0x02, 32),
+ PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x02, 33),
+ PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x02, 34),
+ PMC_POWERPC_EVENT(ITLB_NON_SPECULATIVE_MISSES, 0x02, 35),
+ PMC_POWERPC_EVENT(CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS, 0x02, 36),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_ACCESS_MISS, 0x02, 37),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS, 0x02, 38),
+ PMC_POWERPC_EVENT(L1_DATA_STORE_MISS, 0x02, 39),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS_CYCLES, 0x02, 40),
+ PMC_POWERPC_EVENT(L1_DATA_CYCLES_USED, 0x02, 41),
+ PMC_POWERPC_EVENT(DST_STREAM_1_CACHE_LINE_FETCHES, 0x02, 42),
+ PMC_POWERPC_EVENT(VTQ_STREAM_CANCELED_PREMATURELY, 0x02, 43),
+ PMC_POWERPC_EVENT(VTQ_RESUMES_DUE_TO_CTX_CHANGE, 0x02, 44),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH_MISS, 0x02, 45),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH, 0x02, 46),
+ PMC_POWERPC_EVENT(TLBIE_SNOOPS, 0x02, 47),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_RELOADS, 0x02, 48),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_RELOADS, 0x02, 49),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_CASTOUTS_TO_L2, 0x02, 50),
+ PMC_POWERPC_EVENT(STORE_MERGE_GATHER, 0x02, 51),
+ PMC_POWERPC_EVENT(CACHEABLE_STORE_MERGE_TO_32_BYTES, 0x02, 52),
+ PMC_POWERPC_EVENT(DATA_BKPT_MATCHES, 0x02, 53),
+ PMC_POWERPC_EVENT(FALL_THROUGH_BRANCHES_PROCESSED, 0x02, 54),
+ PMC_POWERPC_EVENT(FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x02, 55),
+ PMC_POWERPC_EVENT(SECOND_SPECULATION_BUFFER_ACTIVE, 0x02, 56),
+ PMC_POWERPC_EVENT(BPU_STALL_ON_LR_DEPENDENCY, 0x02, 57),
+ PMC_POWERPC_EVENT(BTIC_MISS, 0x02, 58),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STACK_CORRECTLY_RESOLVED, 0x02, 59),
+ PMC_POWERPC_EVENT(FPR_ISSUE_STALLED, 0x02, 60),
+ PMC_POWERPC_EVENT(SWITCHES_BETWEEN_PRIV_USER, 0x02, 61),
+ PMC_POWERPC_EVENT(LSU_COMPLETES_FP_STORE_SINGLE, 0x02, 62),
+ PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x04, 8),
+ PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x04, 9),
+ PMC_POWERPC_EVENT(VR_ISSUE_QUEUE_DISPATCHES, 0x04, 10),
+ PMC_POWERPC_EVENT(VR_STALLS, 0x04, 11),
+ PMC_POWERPC_EVENT(GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD, 0x04, 12),
+ PMC_POWERPC_EVENT(FPR_ISSUE_QUEUE_ENTRIES, 0x04, 13),
+ PMC_POWERPC_EVENT(FPU_INSTR_COMPLETED, 0x04, 14),
+ PMC_POWERPC_EVENT(STWCX_INSTR_COMPLETED, 0x04, 15),
+ PMC_POWERPC_EVENT(LS_LM_INSTR_PIECES, 0x04, 16),
+ PMC_POWERPC_EVENT(ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x04, 17),
+ PMC_POWERPC_EVENT(DTLB_MISSES, 0x04, 18),
+ PMC_POWERPC_EVENT(CANCELLED_L1_INSTR_CACHE_MISSES, 0x04, 19),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_OP_HIT, 0x04, 20),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_MISS_CYCLES, 0x04, 21),
+ PMC_POWERPC_EVENT(L1_DATA_PUSHES, 0x04, 22),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISS, 0x04, 23),
+ PMC_POWERPC_EVENT(VT2_FETCHES, 0x04, 24),
+ PMC_POWERPC_EVENT(TAKEN_BRANCHES_PROCESSED, 0x04, 25),
+ PMC_POWERPC_EVENT(BRANCH_FLUSHES, 0x04, 26),
+ PMC_POWERPC_EVENT(SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x04, 27),
+ PMC_POWERPC_EVENT(THIRD_SPECULATION_BUFFER_ACTIVE, 0x04, 28),
+ PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY, 0x04, 29),
+ PMC_POWERPC_EVENT(FAST_BTIC_HIT, 0x04, 30),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STACK_MISPREDICTED, 0x04, 31),
+ PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_COMPLETED, 0x08, 14),
+ PMC_POWERPC_EVENT(CYCLES_NO_INSTR_DISPATCHED, 0x08, 15),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD, 0x08, 16),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_STALLED, 0x08, 17),
+ PMC_POWERPC_EVENT(IU1_INSTR_COMPLETED, 0x08, 18),
+ PMC_POWERPC_EVENT(DSSALL_INSTR_COMPLETED, 0x08, 19),
+ PMC_POWERPC_EVENT(TLBSYNC_INSTR_COMPLETED, 0x08, 20),
+ PMC_POWERPC_EVENT(SYNC_INSTR_COMPLETED, 0x08, 21),
+ PMC_POWERPC_EVENT(SS_SM_INSTR_PIECES, 0x08, 22),
+ PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES, 0x08, 23),
+ PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x08, 24),
+ PMC_POWERPC_EVENT(SUCCESSFUL_STWCX, 0x08, 25),
+ PMC_POWERPC_EVENT(DST_STREAM_3_CACHE_LINE_FETCHES, 0x08, 26),
+ PMC_POWERPC_EVENT(THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x08, 27),
+ PMC_POWERPC_EVENT(MISPREDICTED_BRANCHES, 0x08, 28),
+ PMC_POWERPC_EVENT(FOLDED_BRANCHES, 0x08, 29),
+ PMC_POWERPC_EVENT(FP_STORE_DOUBLE_COMPLETES_IN_LSU, 0x08, 30),
+ PMC_POWERPC_EVENT(L2_CACHE_HITS, 0x30, 2),
+ PMC_POWERPC_EVENT(L3_CACHE_HITS, 0x30, 3),
+ PMC_POWERPC_EVENT(L2_INSTR_CACHE_MISSES, 0x30, 4),
+ PMC_POWERPC_EVENT(L3_INSTR_CACHE_MISSES, 0x30, 5),
+ PMC_POWERPC_EVENT(L2_DATA_CACHE_MISSES, 0x30, 6),
+ PMC_POWERPC_EVENT(L3_DATA_CACHE_MISSES, 0x30, 7),
+ PMC_POWERPC_EVENT(L2_LOAD_HITS, 0x10, 8),
+ PMC_POWERPC_EVENT(L2_STORE_HITS, 0x10, 9),
+ PMC_POWERPC_EVENT(L3_LOAD_HITS, 0x10, 10),
+ PMC_POWERPC_EVENT(L3_STORE_HITS, 0x10, 11),
+ PMC_POWERPC_EVENT(L2_TOUCH_HITS, 0x30, 13),
+ PMC_POWERPC_EVENT(L3_TOUCH_HITS, 0x30, 14),
+ PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x30, 15),
+ PMC_POWERPC_EVENT(SNOOP_MODIFIED, 0x10, 16),
+ PMC_POWERPC_EVENT(SNOOP_VALID, 0x10, 17),
+ PMC_POWERPC_EVENT(INTERVENTION, 0x30, 18),
+ PMC_POWERPC_EVENT(L2_CACHE_MISSES, 0x10, 19),
+ PMC_POWERPC_EVENT(L3_CACHE_MISSES, 0x10, 20),
+ PMC_POWERPC_EVENT(L2_CACHE_CASTOUTS, 0x20, 8),
+ PMC_POWERPC_EVENT(L3_CACHE_CASTOUTS, 0x20, 9),
+ PMC_POWERPC_EVENT(L2SQ_FULL_CYCLES, 0x20, 10),
+ PMC_POWERPC_EVENT(L3SQ_FULL_CYCLES, 0x20, 11),
+ PMC_POWERPC_EVENT(RAQ_FULL_CYCLES, 0x20, 16),
+ PMC_POWERPC_EVENT(WAQ_FULL_CYCLES, 0x20, 17),
+ PMC_POWERPC_EVENT(L1_EXTERNAL_INTERVENTIONS, 0x20, 19),
+ PMC_POWERPC_EVENT(L2_EXTERNAL_INTERVENTIONS, 0x20, 20),
+ PMC_POWERPC_EVENT(L3_EXTERNAL_INTERVENTIONS, 0x20, 21),
+ PMC_POWERPC_EVENT(EXTERNAL_INTERVENTIONS, 0x20, 22),
+ PMC_POWERPC_EVENT(EXTERNAL_PUSHES, 0x20, 23),
+ PMC_POWERPC_EVENT(EXTERNAL_SNOOP_RETRY, 0x20, 24),
+ PMC_POWERPC_EVENT(DTQ_FULL_CYCLES, 0x20, 25),
+ PMC_POWERPC_EVENT(BUS_RETRY, 0x20, 26),
+ PMC_POWERPC_EVENT(L2_VALID_REQUEST, 0x20, 27),
+ PMC_POWERPC_EVENT(BORDQ_FULL, 0x20, 28),
+ PMC_POWERPC_EVENT(BUS_TAS_FOR_READS, 0x20, 42),
+ PMC_POWERPC_EVENT(BUS_TAS_FOR_WRITES, 0x20, 43),
+ PMC_POWERPC_EVENT(BUS_READS_NOT_RETRIED, 0x20, 44),
+ PMC_POWERPC_EVENT(BUS_WRITES_NOT_RETRIED, 0x20, 45),
+ PMC_POWERPC_EVENT(BUS_READS_WRITES_NOT_RETRIED, 0x20, 46),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_L1_RETRY, 0x20, 47),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT, 0x20, 48),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_COLLISION, 0x20, 49),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_INTERVENTION_ORDERING, 0x20, 50),
+ PMC_POWERPC_EVENT(SNOOP_REQUESTS, 0x20, 51),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_REQUEST, 0x20, 52),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD, 0x20, 53),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_STORE, 0x20, 54),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH, 0x20, 55),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH, 0x20, 56),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_FULL, 0x20, 57)
+};
+
+const size_t powerpc_event_codes_size =
+ sizeof(powerpc_event_codes) / sizeof(powerpc_event_codes[0]);
+
+static pmc_value_t
+mpc7xxx_pmcn_read(unsigned int pmc)
+{
+ switch (pmc) {
+ case 0:
+ return mfspr(SPR_PMC1);
+ break;
+ case 1:
+ return mfspr(SPR_PMC2);
+ break;
+ case 2:
+ return mfspr(SPR_PMC3);
+ break;
+ case 3:
+ return mfspr(SPR_PMC4);
+ break;
+ case 4:
+ return mfspr(SPR_PMC5);
+ break;
+ case 5:
+ return mfspr(SPR_PMC6);
+ default:
+ panic("Invalid PMC number: %d\n", pmc);
+ }
+}
+
+static void
+mpc7xxx_pmcn_write(unsigned int pmc, uint32_t val)
+{
+ switch (pmc) {
+ case 0:
+ mtspr(SPR_PMC1, val);
+ break;
+ case 1:
+ mtspr(SPR_PMC2, val);
+ break;
+ case 2:
+ mtspr(SPR_PMC3, val);
+ break;
+ case 3:
+ mtspr(SPR_PMC4, val);
+ break;
+ case 4:
+ mtspr(SPR_PMC5, val);
+ break;
+ case 5:
+ mtspr(SPR_PMC6, val);
+ break;
+ default:
+ panic("Invalid PMC number: %d\n", pmc);
+ }
+}
+
+static int
+mpc7xxx_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
+ ("[powerpc,%d] illegal row index %d", __LINE__, ri));
+
+ pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+ KASSERT(pm,
+ ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
+ ri));
+
+ tmp = mpc7xxx_pmcn_read(ri);
+ PMCDBG(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ else
+ *v = tmp;
+
+ return 0;
+}
+
+static int
+mpc7xxx_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ PMCDBG(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ mpc7xxx_pmcn_write(ri, v);
+
+ return 0;
+}
+
+static int
+mpc7xxx_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+static int
+mpc7xxx_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ register_t pmc_mmcr;
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ pm = phw->phw_pmc;
+ config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE;
+
+ /* Enable the PMC. */
+ switch (ri) {
+ case 0:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 1:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 2:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 3:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 4:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 5:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ default:
+ break;
+ }
+
+ /* The mask is inverted (enable is 1) compared to the flags in MMCR0, which
+ * are Freeze flags.
+ */
+ config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE;
+
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr &= ~SPR_MMCR0_FC;
+ pmc_mmcr |= config;
+ mtspr(SPR_MMCR0, pmc_mmcr);
+
+ return 0;
+}
+
+static int
+mpc7xxx_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ register_t pmc_mmcr;
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ pm = phw->phw_pmc;
+
+ /*
+ * Disable the PMCs.
+ */
+ switch (ri) {
+ case 0:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 1:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 2:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 3:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 4:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 5:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
+mpc7xxx_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, i;
+ struct pmc_cpu *pc;
+ struct powerpc_cpu *pac;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
+ PMCDBG(MDP,INI,1,"powerpc-init cpu=%d", cpu);
+
+ powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+ pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * MPC7XXX_MAX_PMCS,
+ M_PMC, M_WAITOK|M_ZERO);
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450].pcd_ri;
+ KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
+
+ for (i = 0, phw = pac->pc_ppcpmcs; i < MPC7XXX_MAX_PMCS; i++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[i + first_ri] = phw;
+ }
+
+ /* Clear the MMCRs, and set FC, to disable all PMCs. */
+ mtspr(SPR_MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE);
+ mtspr(SPR_MMCR1, 0);
+
+ return 0;
+}
+
+static int
+mpc7xxx_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ uint32_t mmcr0 = mfspr(SPR_MMCR0);
+
+ mmcr0 |= SPR_MMCR0_FC;
+ mtspr(SPR_MMCR0, mmcr0);
+ free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
+ free(powerpc_pcpu[cpu], M_PMC);
+ return 0;
+}
+
+static int
+mpc7xxx_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event pe;
+ uint32_t caps, config, counter;
+ int i;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
+ ("[powerpc,%d] illegal row index %d", __LINE__, ri));
+
+ caps = a->pm_caps;
+
+ pe = a->pm_ev;
+ for (i = 0; i < powerpc_event_codes_size; i++) {
+ if (powerpc_event_codes[i].pe_ev == pe) {
+ config = powerpc_event_codes[i].pe_code;
+ counter = powerpc_event_codes[i].pe_counter_mask;
+ break;
+ }
+ }
+ if (i == powerpc_event_codes_size)
+ return (EINVAL);
+
+ if ((counter & (1 << ri)) == 0)
+ return (EINVAL);
+
+ if (caps & PMC_CAP_SYSTEM)
+ config |= POWERPC_PMC_KERNEL_ENABLE;
+ if (caps & PMC_CAP_USER)
+ config |= POWERPC_PMC_USER_ENABLE;
+ if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
+ config |= POWERPC_PMC_ENABLE;
+
+ pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
+
+ PMCDBG(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+static int
+mpc7xxx_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ KASSERT(phw->phw_pmc == NULL,
+ ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ return 0;
+}
+
+static int
+mpc7xxx_intr(int cpu, struct trapframe *tf)
+{
+ int i, error, retval;
+ uint32_t config;
+ struct pmc *pm;
+ struct powerpc_cpu *pac;
+ pmc_value_t v;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
+ TRAPF_USERMODE(tf));
+
+ retval = 0;
+
+ pac = powerpc_pcpu[cpu];
+
+ config = mfspr(SPR_MMCR0);
+ mtspr(SPR_MMCR0, config | SPR_MMCR0_FC);
+
+ /*
+ * look for all PMCs that have interrupted:
+ * - look for a running, sampling PMC which has overflowed
+ * and which has a valid 'struct pmc' association
+ *
+ * If found, we call a helper to process the interrupt.
+ */
+
+ for (i = 0; i < MPC7XXX_MAX_PMCS; i++) {
+ if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ continue;
+ }
+
+ if (!MPC7XXX_PMC_HAS_OVERFLOWED(i))
+ continue;
+
+ retval = 1; /* Found an interrupting PMC. */
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ /* Stop the PMC, reload count. */
+ v = pm->pm_sc.pm_reloadcount;
+ mpc7xxx_pmcn_write(i, v);
+
+ /* Restart the counter if logging succeeded. */
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error != 0)
+ mpc7xxx_stop_pmc(cpu, i);
+ atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ }
+
+ /* Re-enable PERF exceptions. */
+ mtspr(SPR_MMCR0, config | SPR_MMCR0_PMXE);
+
+ return (retval);
+}
+
+int
+pmc_mpc7xxx_initialize(struct pmc_mdep *pmc_mdep)
+{
+ struct pmc_classdep *pcd;
+
+ pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450];
+ pcd->pcd_caps = POWERPC_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_PPC7450;
+ pcd->pcd_num = MPC7XXX_MAX_PMCS;
+ pcd->pcd_ri = pmc_mdep->pmd_npmc;
+ pcd->pcd_width = 32; /* All PMCs, even in ppc970, are 32-bit */
+
+ pcd->pcd_allocate_pmc = mpc7xxx_allocate_pmc;
+ pcd->pcd_config_pmc = mpc7xxx_config_pmc;
+ pcd->pcd_pcpu_fini = mpc7xxx_pcpu_fini;
+ pcd->pcd_pcpu_init = mpc7xxx_pcpu_init;
+ pcd->pcd_read_pmc = mpc7xxx_read_pmc;
+ pcd->pcd_release_pmc = mpc7xxx_release_pmc;
+ pcd->pcd_start_pmc = mpc7xxx_start_pmc;
+ pcd->pcd_stop_pmc = mpc7xxx_stop_pmc;
+ pcd->pcd_write_pmc = mpc7xxx_write_pmc;
+
+ pmc_mdep->pmd_npmc += MPC7XXX_MAX_PMCS;
+ pmc_mdep->pmd_intr = mpc7xxx_intr;
+
+ return 0;
+}
diff --git a/sys/dev/hwpmc/hwpmc_powerpc.c b/sys/dev/hwpmc/hwpmc_powerpc.c
index a54ee62..25a32fa 100644
--- a/sys/dev/hwpmc/hwpmc_powerpc.c
+++ b/sys/dev/hwpmc/hwpmc_powerpc.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2011 Justin Hibbits
+ * Copyright (c) 2011,2013 Justin Hibbits
* Copyright (c) 2005, Joseph Koshy
* All rights reserved.
*
@@ -36,677 +36,50 @@ __FBSDID("$FreeBSD$");
#include <machine/pmc_mdep.h>
#include <machine/spr.h>
+#include <machine/pte.h>
+#include <machine/sr.h>
#include <machine/cpu.h>
+#include <machine/vmparam.h> /* For VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS */
-#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
- PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
- PMC_CAP_THRESHOLD | PMC_CAP_READ | \
- PMC_CAP_WRITE | PMC_CAP_INVERT | \
- PMC_CAP_QUALIFIER)
+#include "hwpmc_powerpc.h"
-#define PPC_SET_PMC1SEL(r, x) ((r & ~(SPR_MMCR0_PMC1SEL(0x3f))) | SPR_MMCR0_PMC1SEL(x))
-#define PPC_SET_PMC2SEL(r, x) ((r & ~(SPR_MMCR0_PMC2SEL(0x3f))) | SPR_MMCR0_PMC2SEL(x))
-#define PPC_SET_PMC3SEL(r, x) ((r & ~(SPR_MMCR1_PMC3SEL(0x1f))) | SPR_MMCR1_PMC3SEL(x))
-#define PPC_SET_PMC4SEL(r, x) ((r & ~(SPR_MMCR1_PMC4SEL(0x1f))) | SPR_MMCR1_PMC4SEL(x))
-#define PPC_SET_PMC5SEL(r, x) ((r & ~(SPR_MMCR1_PMC5SEL(0x1f))) | SPR_MMCR1_PMC5SEL(x))
-#define PPC_SET_PMC6SEL(r, x) ((r & ~(SPR_MMCR1_PMC6SEL(0x3f))) | SPR_MMCR1_PMC6SEL(x))
+#define INKERNEL(x) (((vm_offset_t)(x)) <= VM_MAX_KERNEL_ADDRESS && \
+ ((vm_offset_t)(x)) >= VM_MIN_KERNEL_ADDRESS)
-/* Change this when we support more than just the 7450. */
-#define PPC_MAX_PMCS 6
-
-#define POWERPC_PMC_KERNEL_ENABLE (0x1 << 30)
-#define POWERPC_PMC_USER_ENABLE (0x1 << 31)
-
-#define POWERPC_PMC_ENABLE (POWERPC_PMC_KERNEL_ENABLE | POWERPC_PMC_USER_ENABLE)
-#define POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (0x80000000-(V))
-#define POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(P) ((P)-0x80000000)
-#define POWERPC_PMC_HAS_OVERFLOWED(x) (powerpc_pmcn_read(x) & (0x1 << 31))
-
-
-/*
- * This should work for every 32-bit PowerPC implementation I know of (G3 and G4
- * specifically). PoewrPC 970 will take more work.
- */
-
-/*
- * Per-processor information.
- */
-struct powerpc_cpu {
- struct pmc_hw *pc_ppcpmcs;
-};
-
-static struct powerpc_cpu **powerpc_pcpu;
-
-struct powerpc_event_code_map {
- enum pmc_event pe_ev; /* enum value */
- uint8_t pe_counter_mask; /* Which counter this can be counted in. */
- uint8_t pe_code; /* numeric code */
-};
-
-#define PPC_PMC_MASK1 0
-#define PPC_PMC_MASK2 1
-#define PPC_PMC_MASK3 2
-#define PPC_PMC_MASK4 3
-#define PPC_PMC_MASK5 4
-#define PPC_PMC_MASK6 5
-#define PPC_PMC_MASK_ALL 0x3f
-
-#define PMC_POWERPC_EVENT(id, mask, number) \
- { .pe_ev = PMC_EV_PPC7450_##id, .pe_counter_mask = mask, .pe_code = number }
-
-static struct powerpc_event_code_map powerpc_event_codes[] = {
- PMC_POWERPC_EVENT(CYCLE,PPC_PMC_MASK_ALL, 1),
- PMC_POWERPC_EVENT(INSTR_COMPLETED, 0x0f, 2),
- PMC_POWERPC_EVENT(TLB_BIT_TRANSITIONS, 0x0f, 3),
- PMC_POWERPC_EVENT(INSTR_DISPATCHED, 0x0f, 4),
- PMC_POWERPC_EVENT(PMON_EXCEPT, 0x0f, 5),
- PMC_POWERPC_EVENT(PMON_SIG, 0x0f, 7),
- PMC_POWERPC_EVENT(VPU_INSTR_COMPLETED, 0x03, 8),
- PMC_POWERPC_EVENT(VFPU_INSTR_COMPLETED, 0x03, 9),
- PMC_POWERPC_EVENT(VIU1_INSTR_COMPLETED, 0x03, 10),
- PMC_POWERPC_EVENT(VIU2_INSTR_COMPLETED, 0x03, 11),
- PMC_POWERPC_EVENT(MTVSCR_INSTR_COMPLETED, 0x03, 12),
- PMC_POWERPC_EVENT(MTVRSAVE_INSTR_COMPLETED, 0x03, 13),
- PMC_POWERPC_EVENT(VPU_INSTR_WAIT_CYCLES, 0x03, 14),
- PMC_POWERPC_EVENT(VFPU_INSTR_WAIT_CYCLES, 0x03, 15),
- PMC_POWERPC_EVENT(VIU1_INSTR_WAIT_CYCLES, 0x03, 16),
- PMC_POWERPC_EVENT(VIU2_INSTR_WAIT_CYCLES, 0x03, 17),
- PMC_POWERPC_EVENT(MFVSCR_SYNC_CYCLES, 0x03, 18),
- PMC_POWERPC_EVENT(VSCR_SAT_SET, 0x03, 19),
- PMC_POWERPC_EVENT(STORE_INSTR_COMPLETED, 0x03, 20),
- PMC_POWERPC_EVENT(L1_INSTR_CACHE_MISSES, 0x03, 21),
- PMC_POWERPC_EVENT(L1_DATA_SNOOPS, 0x03, 22),
- PMC_POWERPC_EVENT(UNRESOLVED_BRANCHES, 0x01, 23),
- PMC_POWERPC_EVENT(SPEC_BUFFER_CYCLES, 0x01, 24),
- PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_CYCLES, 0x01, 25),
- PMC_POWERPC_EVENT(TRUE_BRANCH_TARGET_HITS, 0x01, 26),
- PMC_POWERPC_EVENT(BRANCH_LINK_STAC_PREDICTED, 0x01, 27),
- PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_DISPATCHES, 0x01, 28),
- PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_DISPATCHED, 0x01, 29),
- PMC_POWERPC_EVENT(THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 30),
- PMC_POWERPC_EVENT(THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 31),
- PMC_POWERPC_EVENT(CYCLES_NO_COMPLETED_INSTRS, 0x01, 32),
- PMC_POWERPC_EVENT(IU2_INSTR_COMPLETED, 0x01, 33),
- PMC_POWERPC_EVENT(BRANCHES_COMPLETED, 0x01, 34),
- PMC_POWERPC_EVENT(EIEIO_INSTR_COMPLETED, 0x01, 35),
- PMC_POWERPC_EVENT(MTSPR_INSTR_COMPLETED, 0x01, 36),
- PMC_POWERPC_EVENT(SC_INSTR_COMPLETED, 0x01, 37),
- PMC_POWERPC_EVENT(LS_LM_COMPLETED, 0x01, 38),
- PMC_POWERPC_EVENT(ITLB_HW_TABLE_SEARCH_CYCLES, 0x01, 39),
- PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x01, 40),
- PMC_POWERPC_EVENT(L1_INSTR_CACHE_ACCESSES, 0x01, 41),
- PMC_POWERPC_EVENT(INSTR_BKPT_MATCHES, 0x01, 42),
- PMC_POWERPC_EVENT(L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD, 0x01, 43),
- PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_ON_MODIFIED, 0x01, 44),
- PMC_POWERPC_EVENT(LOAD_MISS_ALIAS, 0x01, 45),
- PMC_POWERPC_EVENT(LOAD_MISS_ALIAS_ON_TOUCH, 0x01, 46),
- PMC_POWERPC_EVENT(TOUCH_ALIAS, 0x01, 47),
- PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT_QUEUE, 0x01, 48),
- PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT, 0x01, 49),
- PMC_POWERPC_EVENT(L1_DATA_SNOOP_HITS, 0x01, 50),
- PMC_POWERPC_EVENT(WRITE_THROUGH_STORES, 0x01, 51),
- PMC_POWERPC_EVENT(CACHE_INHIBITED_STORES, 0x01, 52),
- PMC_POWERPC_EVENT(L1_DATA_LOAD_HIT, 0x01, 53),
- PMC_POWERPC_EVENT(L1_DATA_TOUCH_HIT, 0x01, 54),
- PMC_POWERPC_EVENT(L1_DATA_STORE_HIT, 0x01, 55),
- PMC_POWERPC_EVENT(L1_DATA_TOTAL_HITS, 0x01, 56),
- PMC_POWERPC_EVENT(DST_INSTR_DISPATCHED, 0x01, 57),
- PMC_POWERPC_EVENT(REFRESHED_DSTS, 0x01, 58),
- PMC_POWERPC_EVENT(SUCCESSFUL_DST_TABLE_SEARCHES, 0x01, 59),
- PMC_POWERPC_EVENT(DSS_INSTR_COMPLETED, 0x01, 60),
- PMC_POWERPC_EVENT(DST_STREAM_0_CACHE_LINE_FETCHES, 0x01, 61),
- PMC_POWERPC_EVENT(VTQ_SUSPENDS_DUE_TO_CTX_CHANGE, 0x01, 62),
- PMC_POWERPC_EVENT(VTQ_LINE_FETCH_HIT, 0x01, 63),
- PMC_POWERPC_EVENT(VEC_LOAD_INSTR_COMPLETED, 0x01, 64),
- PMC_POWERPC_EVENT(FP_STORE_INSTR_COMPLETED_IN_LSU, 0x01, 65),
- PMC_POWERPC_EVENT(FPU_RENORMALIZATION, 0x01, 66),
- PMC_POWERPC_EVENT(FPU_DENORMALIZATION, 0x01, 67),
- PMC_POWERPC_EVENT(FP_STORE_CAUSES_STALL_IN_LSU, 0x01, 68),
- PMC_POWERPC_EVENT(LD_ST_TRUE_ALIAS_STALL, 0x01, 70),
- PMC_POWERPC_EVENT(LSU_INDEXED_ALIAS_STALL, 0x01, 71),
- PMC_POWERPC_EVENT(LSU_ALIAS_VS_FSQ_WB0_WB1, 0x01, 72),
- PMC_POWERPC_EVENT(LSU_ALIAS_VS_CSQ, 0x01, 73),
- PMC_POWERPC_EVENT(LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0, 0x01, 74),
- PMC_POWERPC_EVENT(LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0, 0x01, 75),
- PMC_POWERPC_EVENT(LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1, 0x01, 76),
- PMC_POWERPC_EVENT(LSU_TOUCH_ALIAS_VS_CSQ, 0x01, 77),
- PMC_POWERPC_EVENT(LSU_LMQ_FULL_STALL, 0x01, 78),
- PMC_POWERPC_EVENT(FP_LOAD_INSTR_COMPLETED_IN_LSU, 0x01, 79),
- PMC_POWERPC_EVENT(FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU, 0x01, 80),
- PMC_POWERPC_EVENT(FP_LOAD_DOUBLE_COMPLETED_IN_LSU, 0x01, 81),
- PMC_POWERPC_EVENT(LSU_RA_LATCH_STALL, 0x01, 82),
- PMC_POWERPC_EVENT(LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL, 0x01, 83),
- PMC_POWERPC_EVENT(LSU_LMQ_INDEX_ALIAS, 0x01, 84),
- PMC_POWERPC_EVENT(LSU_STORE_QUEUE_INDEX_ALIAS, 0x01, 85),
- PMC_POWERPC_EVENT(LSU_CSQ_FORWARDING, 0x01, 86),
- PMC_POWERPC_EVENT(LSU_MISALIGNED_LOAD_FINISH, 0x01, 87),
- PMC_POWERPC_EVENT(LSU_MISALIGN_STORE_COMPLETED, 0x01, 88),
- PMC_POWERPC_EVENT(LSU_MISALIGN_STALL, 0x01, 89),
- PMC_POWERPC_EVENT(FP_ONE_QUARTER_FPSCR_RENAMES_BUSY, 0x01, 90),
- PMC_POWERPC_EVENT(FP_ONE_HALF_FPSCR_RENAMES_BUSY, 0x01, 91),
- PMC_POWERPC_EVENT(FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY, 0x01, 92),
- PMC_POWERPC_EVENT(FP_ALL_FPSCR_RENAMES_BUSY, 0x01, 93),
- PMC_POWERPC_EVENT(FP_DENORMALIZED_RESULT, 0x01, 94),
- PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISSES, 0x02, 23),
- PMC_POWERPC_EVENT(DISPATCHES_TO_FPR_ISSUE_QUEUE, 0x02, 24),
- PMC_POWERPC_EVENT(LSU_INSTR_COMPLETED, 0x02, 25),
- PMC_POWERPC_EVENT(LOAD_INSTR_COMPLETED, 0x02, 26),
- PMC_POWERPC_EVENT(SS_SM_INSTR_COMPLETED, 0x02, 27),
- PMC_POWERPC_EVENT(TLBIE_INSTR_COMPLETED, 0x02, 28),
- PMC_POWERPC_EVENT(LWARX_INSTR_COMPLETED, 0x02, 29),
- PMC_POWERPC_EVENT(MFSPR_INSTR_COMPLETED, 0x02, 30),
- PMC_POWERPC_EVENT(REFETCH_SERIALIZATION, 0x02, 31),
- PMC_POWERPC_EVENT(COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD, 0x02, 32),
- PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x02, 33),
- PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x02, 34),
- PMC_POWERPC_EVENT(ITLB_NON_SPECULATIVE_MISSES, 0x02, 35),
- PMC_POWERPC_EVENT(CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS, 0x02, 36),
- PMC_POWERPC_EVENT(L1_DATA_LOAD_ACCESS_MISS, 0x02, 37),
- PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS, 0x02, 38),
- PMC_POWERPC_EVENT(L1_DATA_STORE_MISS, 0x02, 39),
- PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS_CYCLES, 0x02, 40),
- PMC_POWERPC_EVENT(L1_DATA_CYCLES_USED, 0x02, 41),
- PMC_POWERPC_EVENT(DST_STREAM_1_CACHE_LINE_FETCHES, 0x02, 42),
- PMC_POWERPC_EVENT(VTQ_STREAM_CANCELED_PREMATURELY, 0x02, 43),
- PMC_POWERPC_EVENT(VTQ_RESUMES_DUE_TO_CTX_CHANGE, 0x02, 44),
- PMC_POWERPC_EVENT(VTQ_LINE_FETCH_MISS, 0x02, 45),
- PMC_POWERPC_EVENT(VTQ_LINE_FETCH, 0x02, 46),
- PMC_POWERPC_EVENT(TLBIE_SNOOPS, 0x02, 47),
- PMC_POWERPC_EVENT(L1_INSTR_CACHE_RELOADS, 0x02, 48),
- PMC_POWERPC_EVENT(L1_DATA_CACHE_RELOADS, 0x02, 49),
- PMC_POWERPC_EVENT(L1_DATA_CACHE_CASTOUTS_TO_L2, 0x02, 50),
- PMC_POWERPC_EVENT(STORE_MERGE_GATHER, 0x02, 51),
- PMC_POWERPC_EVENT(CACHEABLE_STORE_MERGE_TO_32_BYTES, 0x02, 52),
- PMC_POWERPC_EVENT(DATA_BKPT_MATCHES, 0x02, 53),
- PMC_POWERPC_EVENT(FALL_THROUGH_BRANCHES_PROCESSED, 0x02, 54),
- PMC_POWERPC_EVENT(FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x02, 55),
- PMC_POWERPC_EVENT(SECOND_SPECULATION_BUFFER_ACTIVE, 0x02, 56),
- PMC_POWERPC_EVENT(BPU_STALL_ON_LR_DEPENDENCY, 0x02, 57),
- PMC_POWERPC_EVENT(BTIC_MISS, 0x02, 58),
- PMC_POWERPC_EVENT(BRANCH_LINK_STACK_CORRECTLY_RESOLVED, 0x02, 59),
- PMC_POWERPC_EVENT(FPR_ISSUE_STALLED, 0x02, 60),
- PMC_POWERPC_EVENT(SWITCHES_BETWEEN_PRIV_USER, 0x02, 61),
- PMC_POWERPC_EVENT(LSU_COMPLETES_FP_STORE_SINGLE, 0x02, 62),
- PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x04, 8),
- PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x04, 9),
- PMC_POWERPC_EVENT(VR_ISSUE_QUEUE_DISPATCHES, 0x04, 10),
- PMC_POWERPC_EVENT(VR_STALLS, 0x04, 11),
- PMC_POWERPC_EVENT(GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD, 0x04, 12),
- PMC_POWERPC_EVENT(FPR_ISSUE_QUEUE_ENTRIES, 0x04, 13),
- PMC_POWERPC_EVENT(FPU_INSTR_COMPLETED, 0x04, 14),
- PMC_POWERPC_EVENT(STWCX_INSTR_COMPLETED, 0x04, 15),
- PMC_POWERPC_EVENT(LS_LM_INSTR_PIECES, 0x04, 16),
- PMC_POWERPC_EVENT(ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x04, 17),
- PMC_POWERPC_EVENT(DTLB_MISSES, 0x04, 18),
- PMC_POWERPC_EVENT(CANCELLED_L1_INSTR_CACHE_MISSES, 0x04, 19),
- PMC_POWERPC_EVENT(L1_DATA_CACHE_OP_HIT, 0x04, 20),
- PMC_POWERPC_EVENT(L1_DATA_LOAD_MISS_CYCLES, 0x04, 21),
- PMC_POWERPC_EVENT(L1_DATA_PUSHES, 0x04, 22),
- PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISS, 0x04, 23),
- PMC_POWERPC_EVENT(VT2_FETCHES, 0x04, 24),
- PMC_POWERPC_EVENT(TAKEN_BRANCHES_PROCESSED, 0x04, 25),
- PMC_POWERPC_EVENT(BRANCH_FLUSHES, 0x04, 26),
- PMC_POWERPC_EVENT(SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x04, 27),
- PMC_POWERPC_EVENT(THIRD_SPECULATION_BUFFER_ACTIVE, 0x04, 28),
- PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY, 0x04, 29),
- PMC_POWERPC_EVENT(FAST_BTIC_HIT, 0x04, 30),
- PMC_POWERPC_EVENT(BRANCH_LINK_STACK_MISPREDICTED, 0x04, 31),
- PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_COMPLETED, 0x08, 14),
- PMC_POWERPC_EVENT(CYCLES_NO_INSTR_DISPATCHED, 0x08, 15),
- PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD, 0x08, 16),
- PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_STALLED, 0x08, 17),
- PMC_POWERPC_EVENT(IU1_INSTR_COMPLETED, 0x08, 18),
- PMC_POWERPC_EVENT(DSSALL_INSTR_COMPLETED, 0x08, 19),
- PMC_POWERPC_EVENT(TLBSYNC_INSTR_COMPLETED, 0x08, 20),
- PMC_POWERPC_EVENT(SYNC_INSTR_COMPLETED, 0x08, 21),
- PMC_POWERPC_EVENT(SS_SM_INSTR_PIECES, 0x08, 22),
- PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES, 0x08, 23),
- PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x08, 24),
- PMC_POWERPC_EVENT(SUCCESSFUL_STWCX, 0x08, 25),
- PMC_POWERPC_EVENT(DST_STREAM_3_CACHE_LINE_FETCHES, 0x08, 26),
- PMC_POWERPC_EVENT(THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x08, 27),
- PMC_POWERPC_EVENT(MISPREDICTED_BRANCHES, 0x08, 28),
- PMC_POWERPC_EVENT(FOLDED_BRANCHES, 0x08, 29),
- PMC_POWERPC_EVENT(FP_STORE_DOUBLE_COMPLETES_IN_LSU, 0x08, 30),
- PMC_POWERPC_EVENT(L2_CACHE_HITS, 0x30, 2),
- PMC_POWERPC_EVENT(L3_CACHE_HITS, 0x30, 3),
- PMC_POWERPC_EVENT(L2_INSTR_CACHE_MISSES, 0x30, 4),
- PMC_POWERPC_EVENT(L3_INSTR_CACHE_MISSES, 0x30, 5),
- PMC_POWERPC_EVENT(L2_DATA_CACHE_MISSES, 0x30, 6),
- PMC_POWERPC_EVENT(L3_DATA_CACHE_MISSES, 0x30, 7),
- PMC_POWERPC_EVENT(L2_LOAD_HITS, 0x10, 8),
- PMC_POWERPC_EVENT(L2_STORE_HITS, 0x10, 9),
- PMC_POWERPC_EVENT(L3_LOAD_HITS, 0x10, 10),
- PMC_POWERPC_EVENT(L3_STORE_HITS, 0x10, 11),
- PMC_POWERPC_EVENT(L2_TOUCH_HITS, 0x30, 13),
- PMC_POWERPC_EVENT(L3_TOUCH_HITS, 0x30, 14),
- PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x30, 15),
- PMC_POWERPC_EVENT(SNOOP_MODIFIED, 0x10, 16),
- PMC_POWERPC_EVENT(SNOOP_VALID, 0x10, 17),
- PMC_POWERPC_EVENT(INTERVENTION, 0x30, 18),
- PMC_POWERPC_EVENT(L2_CACHE_MISSES, 0x10, 19),
- PMC_POWERPC_EVENT(L3_CACHE_MISSES, 0x10, 20),
- PMC_POWERPC_EVENT(L2_CACHE_CASTOUTS, 0x20, 8),
- PMC_POWERPC_EVENT(L3_CACHE_CASTOUTS, 0x20, 9),
- PMC_POWERPC_EVENT(L2SQ_FULL_CYCLES, 0x20, 10),
- PMC_POWERPC_EVENT(L3SQ_FULL_CYCLES, 0x20, 11),
- PMC_POWERPC_EVENT(RAQ_FULL_CYCLES, 0x20, 16),
- PMC_POWERPC_EVENT(WAQ_FULL_CYCLES, 0x20, 17),
- PMC_POWERPC_EVENT(L1_EXTERNAL_INTERVENTIONS, 0x20, 19),
- PMC_POWERPC_EVENT(L2_EXTERNAL_INTERVENTIONS, 0x20, 20),
- PMC_POWERPC_EVENT(L3_EXTERNAL_INTERVENTIONS, 0x20, 21),
- PMC_POWERPC_EVENT(EXTERNAL_INTERVENTIONS, 0x20, 22),
- PMC_POWERPC_EVENT(EXTERNAL_PUSHES, 0x20, 23),
- PMC_POWERPC_EVENT(EXTERNAL_SNOOP_RETRY, 0x20, 24),
- PMC_POWERPC_EVENT(DTQ_FULL_CYCLES, 0x20, 25),
- PMC_POWERPC_EVENT(BUS_RETRY, 0x20, 26),
- PMC_POWERPC_EVENT(L2_VALID_REQUEST, 0x20, 27),
- PMC_POWERPC_EVENT(BORDQ_FULL, 0x20, 28),
- PMC_POWERPC_EVENT(BUS_TAS_FOR_READS, 0x20, 42),
- PMC_POWERPC_EVENT(BUS_TAS_FOR_WRITES, 0x20, 43),
- PMC_POWERPC_EVENT(BUS_READS_NOT_RETRIED, 0x20, 44),
- PMC_POWERPC_EVENT(BUS_WRITES_NOT_RETRIED, 0x20, 45),
- PMC_POWERPC_EVENT(BUS_READS_WRITES_NOT_RETRIED, 0x20, 46),
- PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_L1_RETRY, 0x20, 47),
- PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT, 0x20, 48),
- PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_COLLISION, 0x20, 49),
- PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_INTERVENTION_ORDERING, 0x20, 50),
- PMC_POWERPC_EVENT(SNOOP_REQUESTS, 0x20, 51),
- PMC_POWERPC_EVENT(PREFETCH_ENGINE_REQUEST, 0x20, 52),
- PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD, 0x20, 53),
- PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_STORE, 0x20, 54),
- PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH, 0x20, 55),
- PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH, 0x20, 56),
- PMC_POWERPC_EVENT(PREFETCH_ENGINE_FULL, 0x20, 57)
-};
-
-const size_t powerpc_event_codes_size =
- sizeof(powerpc_event_codes) / sizeof(powerpc_event_codes[0]);
+struct powerpc_cpu **powerpc_pcpu;
int
pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
struct trapframe *tf)
{
- (void) cc;
- (void) maxsamples;
- (void) tf;
- return (0);
-}
+ int frames = 0;
+ uintptr_t *sp;
-static pmc_value_t
-powerpc_pmcn_read(unsigned int pmc)
-{
- switch (pmc) {
- case 0:
- return mfspr(SPR_PMC1);
- break;
- case 1:
- return mfspr(SPR_PMC2);
- break;
- case 2:
- return mfspr(SPR_PMC3);
- break;
- case 3:
- return mfspr(SPR_PMC4);
- break;
- case 4:
- return mfspr(SPR_PMC5);
- break;
- case 5:
- return mfspr(SPR_PMC6);
- default:
- panic("Invalid PMC number: %d\n", pmc);
- }
-}
-
-static void
-powerpc_pmcn_write(unsigned int pmc, uint32_t val)
-{
- switch (pmc) {
- case 0:
- mtspr(SPR_PMC1, val);
- break;
- case 1:
- mtspr(SPR_PMC2, val);
- break;
- case 2:
- mtspr(SPR_PMC3, val);
- break;
- case 3:
- mtspr(SPR_PMC4, val);
- break;
- case 4:
- mtspr(SPR_PMC5, val);
- break;
- case 5:
- mtspr(SPR_PMC6, val);
- break;
- default:
- panic("Invalid PMC number: %d\n", pmc);
- }
-}
-
-static int
-powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm,
- const struct pmc_op_pmcallocate *a)
-{
- enum pmc_event pe;
- uint32_t caps, config, counter;
- int i;
+ cc[frames++] = tf->srr0;
+ sp = (uintptr_t *)tf->fixreg[1];
- KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
- ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
- KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
- ("[powerpc,%d] illegal row index %d", __LINE__, ri));
-
- caps = a->pm_caps;
-
- /*
- * TODO: Check actual class for different generations.
- */
- if (a->pm_class != PMC_CLASS_PPC7450)
- return (EINVAL);
- pe = a->pm_ev;
- for (i = 0; i < powerpc_event_codes_size; i++) {
- if (powerpc_event_codes[i].pe_ev == pe) {
- config = powerpc_event_codes[i].pe_code;
- counter = powerpc_event_codes[i].pe_counter_mask;
+ for (frames = 1; frames < maxsamples; frames++) {
+ if (!INKERNEL(sp))
break;
- }
- }
- if (i == powerpc_event_codes_size)
- return (EINVAL);
-
- if ((counter & (1 << ri)) == 0)
- return (EINVAL);
-
- if (caps & PMC_CAP_SYSTEM)
- config |= POWERPC_PMC_KERNEL_ENABLE;
- if (caps & PMC_CAP_USER)
- config |= POWERPC_PMC_USER_ENABLE;
- if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
- config |= POWERPC_PMC_ENABLE;
-
- pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
-
- PMCDBG(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config);
-
- return 0;
-}
-
-static int
-powerpc_read_pmc(int cpu, int ri, pmc_value_t *v)
-{
- struct pmc *pm;
- pmc_value_t tmp;
-
- KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
- ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
- KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
- ("[powerpc,%d] illegal row index %d", __LINE__, ri));
-
- pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
- tmp = powerpc_pmcn_read(ri);
- PMCDBG(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
- if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
- *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
- else
- *v = tmp;
-
- return 0;
-}
-
-static int
-powerpc_write_pmc(int cpu, int ri, pmc_value_t v)
-{
- struct pmc *pm;
-
- KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
- ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
- KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
- ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
-
- pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
-
- if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
- v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
-
- PMCDBG(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
-
- powerpc_pmcn_write(ri, v);
-
- return 0;
-}
-
-static int
-powerpc_config_pmc(int cpu, int ri, struct pmc *pm)
-{
- struct pmc_hw *phw;
-
- PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
-
- KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
- ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
- KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
- ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
-
- phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
-
- KASSERT(pm == NULL || phw->phw_pmc == NULL,
- ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
- __LINE__, pm, phw->phw_pmc));
-
- phw->phw_pmc = pm;
-
- return 0;
-}
-
-static int
-powerpc_start_pmc(int cpu, int ri)
-{
- uint32_t config;
- struct pmc *pm;
- struct pmc_hw *phw;
- register_t pmc_mmcr;
-
- phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
- pm = phw->phw_pmc;
- config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE;
-
- /* Enable the PMC. */
- switch (ri) {
- case 0:
- pmc_mmcr = mfspr(SPR_MMCR0);
- pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, config);
- mtspr(SPR_MMCR0, pmc_mmcr);
- break;
- case 1:
- pmc_mmcr = mfspr(SPR_MMCR0);
- pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, config);
- mtspr(SPR_MMCR0, pmc_mmcr);
- break;
- case 2:
- pmc_mmcr = mfspr(SPR_MMCR1);
- pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, config);
- mtspr(SPR_MMCR1, pmc_mmcr);
- break;
- case 3:
- pmc_mmcr = mfspr(SPR_MMCR0);
- pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, config);
- mtspr(SPR_MMCR0, pmc_mmcr);
- break;
- case 4:
- pmc_mmcr = mfspr(SPR_MMCR1);
- pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, config);
- mtspr(SPR_MMCR1, pmc_mmcr);
- break;
- case 5:
- pmc_mmcr = mfspr(SPR_MMCR1);
- pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, config);
- mtspr(SPR_MMCR1, pmc_mmcr);
- break;
- default:
- break;
+ cc[frames++] = *(sp + 1);
+ sp = (uintptr_t *)*sp;
}
-
- /* The mask is inverted (enable is 1) compared to the flags in MMCR0, which
- * are Freeze flags.
- */
- config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE;
-
- pmc_mmcr = mfspr(SPR_MMCR0);
- pmc_mmcr &= ~SPR_MMCR0_FC;
- pmc_mmcr |= config;
- mtspr(SPR_MMCR0, pmc_mmcr);
-
- return 0;
-}
-
-static int
-powerpc_stop_pmc(int cpu, int ri)
-{
- struct pmc *pm;
- struct pmc_hw *phw;
- register_t pmc_mmcr;
-
- phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
- pm = phw->phw_pmc;
-
- /*
- * Disable the PMCs.
- */
- switch (ri) {
- case 0:
- pmc_mmcr = mfspr(SPR_MMCR0);
- pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, 0);
- mtspr(SPR_MMCR0, pmc_mmcr);
- break;
- case 1:
- pmc_mmcr = mfspr(SPR_MMCR0);
- pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, 0);
- mtspr(SPR_MMCR0, pmc_mmcr);
- break;
- case 2:
- pmc_mmcr = mfspr(SPR_MMCR1);
- pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, 0);
- mtspr(SPR_MMCR1, pmc_mmcr);
- break;
- case 3:
- pmc_mmcr = mfspr(SPR_MMCR0);
- pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, 0);
- mtspr(SPR_MMCR0, pmc_mmcr);
- break;
- case 4:
- pmc_mmcr = mfspr(SPR_MMCR1);
- pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, 0);
- mtspr(SPR_MMCR1, pmc_mmcr);
- break;
- case 5:
- pmc_mmcr = mfspr(SPR_MMCR1);
- pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, 0);
- mtspr(SPR_MMCR1, pmc_mmcr);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int
-powerpc_release_pmc(int cpu, int ri, struct pmc *pmc)
-{
- struct pmc_hw *phw;
-
- KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
- ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
- KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
- ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
-
- phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
- KASSERT(phw->phw_pmc == NULL,
- ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
-
- return 0;
+ return (frames);
}
static int
powerpc_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
{
- return 0;
+ return (0);
}
static int
powerpc_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
{
- return 0;
-}
-
-static int
-powerpc_intr(int cpu, struct trapframe *tf)
-{
- int i, error, retval;
- uint32_t config;
- struct pmc *pm;
- struct powerpc_cpu *pac;
- pmc_value_t v;
-
- KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
- ("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
-
- PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
- TRAPF_USERMODE(tf));
-
- retval = 0;
-
- pac = powerpc_pcpu[cpu];
-
- /*
- * look for all PMCs that have interrupted:
- * - look for a running, sampling PMC which has overflowed
- * and which has a valid 'struct pmc' association
- *
- * If found, we call a helper to process the interrupt.
- */
-
- for (i = 0; i < PPC_MAX_PMCS; i++) {
- if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
- !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
- continue;
- }
-
- if (!POWERPC_PMC_HAS_OVERFLOWED(i))
- continue;
-
- retval = 1; /* Found an interrupting PMC. */
-
- if (pm->pm_state != PMC_STATE_RUNNING)
- continue;
-
- /* Stop the PMC, reload count. */
- v = pm->pm_sc.pm_reloadcount;
- config = mfspr(SPR_MMCR0);
-
- mtspr(SPR_MMCR0, config | SPR_MMCR0_FC);
- powerpc_pmcn_write(i, v);
-
- /* Restart the counter if logging succeeded. */
- error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
- TRAPF_USERMODE(tf));
- mtspr(SPR_MMCR0, config);
- if (error != 0)
- powerpc_stop_pmc(cpu, i);
- atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
- &pmc_stats.pm_intr_ignored, 1);
-
- }
-
- /* Re-enable PERF exceptions. */
- mtspr(SPR_MMCR0, mfspr(SPR_MMCR0) | SPR_MMCR0_PMXE);
-
- return (retval);
+ return (0);
}
-static int
+int
powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
{
int error;
@@ -715,8 +88,6 @@ powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d], illegal CPU %d", __LINE__, cpu));
- KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
- ("[powerpc,%d] row-index %d out of range", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
snprintf(powerpc_name, sizeof(powerpc_name), "POWERPC-%d", ri);
@@ -735,65 +106,20 @@ powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
return (0);
}
-static int
+int
powerpc_get_config(int cpu, int ri, struct pmc **ppm)
{
*ppm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
- return 0;
-}
-
-static int
-powerpc_pcpu_init(struct pmc_mdep *md, int cpu)
-{
- int first_ri, i;
- struct pmc_cpu *pc;
- struct powerpc_cpu *pac;
- struct pmc_hw *phw;
-
- KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
- ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
- PMCDBG(MDP,INI,1,"powerpc-init cpu=%d", cpu);
-
- powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
- M_WAITOK|M_ZERO);
- pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * PPC_MAX_PMCS,
- M_PMC, M_WAITOK|M_ZERO);
- pc = pmc_pcpu[cpu];
- first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450].pcd_ri;
- KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
-
- for (i = 0, phw = pac->pc_ppcpmcs; i < PPC_MAX_PMCS; i++, phw++) {
- phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
- PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
- phw->phw_pmc = NULL;
- pc->pc_hwpmcs[i + first_ri] = phw;
- }
-
- /* Clear the MMCRs, and set FC, to disable all PMCs. */
- mtspr(SPR_MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE);
- mtspr(SPR_MMCR1, 0);
-
- return 0;
-}
-
-static int
-powerpc_pcpu_fini(struct pmc_mdep *md, int cpu)
-{
- uint32_t mmcr0 = mfspr(SPR_MMCR0);
-
- mmcr0 |= SPR_MMCR0_FC;
- mtspr(SPR_MMCR0, mmcr0);
- free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
- free(powerpc_pcpu[cpu], M_PMC);
- return 0;
+ return (0);
}
struct pmc_mdep *
pmc_md_initialize()
{
struct pmc_mdep *pmc_mdep;
- struct pmc_classdep *pcd;
+ int error;
+ uint16_t vers;
/*
* Allocate space for pointers to PMC HW descriptors and for
@@ -807,30 +133,31 @@ pmc_md_initialize()
pmc_mdep->pmd_cputype = PMC_CPU_PPC_7450;
- pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450];
- pcd->pcd_caps = POWERPC_PMC_CAPS;
- pcd->pcd_class = PMC_CLASS_PPC7450;
- pcd->pcd_num = PPC_MAX_PMCS;
- pcd->pcd_ri = pmc_mdep->pmd_npmc;
- pcd->pcd_width = 32; /* All PMCs, even in ppc970, are 32-bit */
-
- pcd->pcd_allocate_pmc = powerpc_allocate_pmc;
- pcd->pcd_config_pmc = powerpc_config_pmc;
- pcd->pcd_pcpu_fini = powerpc_pcpu_fini;
- pcd->pcd_pcpu_init = powerpc_pcpu_init;
- pcd->pcd_describe = powerpc_describe;
- pcd->pcd_get_config = powerpc_get_config;
- pcd->pcd_read_pmc = powerpc_read_pmc;
- pcd->pcd_release_pmc = powerpc_release_pmc;
- pcd->pcd_start_pmc = powerpc_start_pmc;
- pcd->pcd_stop_pmc = powerpc_stop_pmc;
- pcd->pcd_write_pmc = powerpc_write_pmc;
+ vers = mfpvr() >> 16;
- pmc_mdep->pmd_intr = powerpc_intr;
pmc_mdep->pmd_switch_in = powerpc_switch_in;
pmc_mdep->pmd_switch_out = powerpc_switch_out;
- pmc_mdep->pmd_npmc += PPC_MAX_PMCS;
+ switch (vers) {
+ case MPC7447A:
+ case MPC7448:
+ case MPC7450:
+ case MPC7455:
+ case MPC7457:
+ error = pmc_mpc7xxx_initialize(pmc_mdep);
+ case IBM970:
+ case IBM970FX:
+ case IBM970MP:
+ default:
+ error = -1;
+ break;
+ }
+
+ if (error != 0) {
+ pmc_mdep_free(pmc_mdep);
+ pmc_mdep = NULL;
+ return NULL;
+ }
return (pmc_mdep);
}
diff --git a/sys/dev/hwpmc/hwpmc_powerpc.h b/sys/dev/hwpmc/hwpmc_powerpc.h
new file mode 100644
index 0000000..a9b54f4
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_powerpc.h
@@ -0,0 +1,59 @@
+/*-
+ * Copyright (c) 2013 Justin Hibbits
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_POWERPC_H_
+#define _DEV_HWPMC_POWERPC_H_ 1
+
+#ifdef _KERNEL
+
+#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
+
+#define POWERPC_PMC_KERNEL_ENABLE (0x1 << 30)
+#define POWERPC_PMC_USER_ENABLE (0x1 << 31)
+
+#define POWERPC_PMC_ENABLE (POWERPC_PMC_KERNEL_ENABLE | POWERPC_PMC_USER_ENABLE)
+#define POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (0x80000000-(V))
+#define POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (0x80000000-(P))
+
+struct powerpc_cpu {
+ struct pmc_hw *pc_ppcpmcs;
+};
+
+extern struct powerpc_cpu **powerpc_pcpu;
+
+extern int pmc_mpc7xxx_initialize(struct pmc_mdep *pmc_mdep);
+
+extern int powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc);
+extern int powerpc_get_config(int cpu, int ri, struct pmc **ppm);
+#endif /* _KERNEL */
+
+#endif /* _DEV_HWPMC_POWERPC_H_ */
diff --git a/sys/dev/ipmi/ipmi_linux.c b/sys/dev/ipmi/ipmi_linux.c
index 430bd08..b6b38f2 100644
--- a/sys/dev/ipmi/ipmi_linux.c
+++ b/sys/dev/ipmi/ipmi_linux.c
@@ -89,11 +89,13 @@ MODULE_DEPEND(ipmi_linux, linux, 1, 1, 1);
static int
ipmi_linux_ioctl(struct thread *td, struct linux_ioctl_args *args)
{
+ cap_rights_t rights;
struct file *fp;
u_long cmd;
int error;
- if ((error = fget(td, args->fd, CAP_IOCTL, &fp)) != 0)
+ error = fget(td, args->fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
+ if (error != 0)
return (error);
cmd = args->cmd;
diff --git a/sys/dev/iscsi_initiator/iscsi.c b/sys/dev/iscsi_initiator/iscsi.c
index 4dbf163..4a1cb96 100644
--- a/sys/dev/iscsi_initiator/iscsi.c
+++ b/sys/dev/iscsi_initiator/iscsi.c
@@ -382,16 +382,19 @@ i_ping(struct cdev *dev)
static int
i_setsoc(isc_session_t *sp, int fd, struct thread *td)
{
+ cap_rights_t rights;
int error = 0;
if(sp->soc != NULL)
isc_stop_receiver(sp);
- error = fget(td, fd, CAP_SOCK_CLIENT, &sp->fp);
+ error = fget(td, fd, cap_rights_init(&rights, CAP_SOCK_CLIENT), &sp->fp);
if(error)
return error;
- if((error = fgetsock(td, fd, CAP_SOCK_CLIENT, &sp->soc, 0)) == 0) {
+ error = fgetsock(td, fd, cap_rights_init(&rights, CAP_SOCK_CLIENT),
+ &sp->soc, 0);
+ if(error == 0) {
sp->td = td;
isc_start_receiver(sp);
}
diff --git a/sys/dev/mfi/mfi_linux.c b/sys/dev/mfi/mfi_linux.c
index 3328a66..429d496 100644
--- a/sys/dev/mfi/mfi_linux.c
+++ b/sys/dev/mfi/mfi_linux.c
@@ -84,6 +84,7 @@ MODULE_DEPEND(mfi, linux, 1, 1, 1);
static int
mfi_linux_ioctl(struct thread *p, struct linux_ioctl_args *args)
{
+ cap_rights_t rights;
struct file *fp;
int error;
u_long cmd = args->cmd;
@@ -97,7 +98,8 @@ mfi_linux_ioctl(struct thread *p, struct linux_ioctl_args *args)
break;
}
- if ((error = fget(p, args->fd, CAP_IOCTL, &fp)) != 0)
+ error = fget(p, args->fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
+ if (error != 0)
return (error);
error = fo_ioctl(fp, cmd, (caddr_t)args->arg, p->td_ucred, p);
fdrop(fp, p);
diff --git a/sys/dev/ntb/if_ntb/if_ntb.c b/sys/dev/ntb/if_ntb/if_ntb.c
index 55b19c5..e86ed53 100644
--- a/sys/dev/ntb/if_ntb/if_ntb.c
+++ b/sys/dev/ntb/if_ntb/if_ntb.c
@@ -104,7 +104,7 @@ struct ntb_transport_qp {
bool client_ready;
bool qp_link;
- uint8_t qp_num; /* Only 64 QP's are allowed. 0-63 */
+ uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */
struct ntb_rx_info *rx_info;
struct ntb_rx_info *remote_rx_info;
@@ -279,14 +279,14 @@ ntb_handle_module_events(struct module *m, int what, void *arg)
return (err);
}
-static moduledata_t ntb_transport_mod = {
- "ntb_transport",
+static moduledata_t if_ntb_mod = {
+ "if_ntb",
ntb_handle_module_events,
NULL
};
-DECLARE_MODULE(ntb_transport, ntb_transport_mod, SI_SUB_KLD, SI_ORDER_ANY);
-MODULE_DEPEND(ntb_transport, ntb_hw, 1, 1, 1);
+DECLARE_MODULE(if_ntb, if_ntb_mod, SI_SUB_KLD, SI_ORDER_ANY);
+MODULE_DEPEND(if_ntb, ntb_hw, 1, 1, 1);
static int
ntb_setup_interface()
@@ -297,7 +297,7 @@ ntb_setup_interface()
net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0);
if (net_softc.ntb == NULL) {
- printf("ntb: Can't find devclass\n");
+ printf("ntb: Cannot find devclass\n");
return (ENXIO);
}
@@ -334,14 +334,19 @@ ntb_setup_interface()
static int
ntb_teardown_interface()
{
- struct ifnet *ifp = net_softc.ifp;
- ntb_transport_link_down(net_softc.qp);
+ if (net_softc.qp != NULL)
+ ntb_transport_link_down(net_softc.qp);
- ether_ifdetach(ifp);
- if_free(ifp);
- ntb_transport_free_queue(net_softc.qp);
- ntb_transport_free(&net_softc);
+ if (net_softc.ifp != NULL) {
+ ether_ifdetach(net_softc.ifp);
+ if_free(net_softc.ifp);
+ }
+
+ if (net_softc.qp != NULL) {
+ ntb_transport_free_queue(net_softc.qp);
+ ntb_transport_free(&net_softc);
+ }
return (0);
}
@@ -405,7 +410,7 @@ ntb_start(struct ifnet *ifp)
m_length(m_head, NULL));
if (rc != 0) {
CTR1(KTR_NTB,
- "TX: couldn't tx mbuf %p. Returning to snd q",
+ "TX: could not tx mbuf %p. Returning to snd q",
m_head);
if (rc == EAGAIN) {
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
@@ -475,8 +480,11 @@ ntb_transport_init(struct ntb_softc *ntb)
if (rc != 0)
goto err;
- if (ntb_query_link_status(ntb))
+ if (ntb_query_link_status(ntb)) {
+ if (bootverbose)
+ device_printf(ntb_get_device(ntb), "link up\n");
callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
+ }
return (0);
@@ -497,7 +505,7 @@ ntb_transport_free(void *transport)
callout_drain(&nt->link_work);
- /* verify that all the qp's are freed */
+ /* verify that all the qps are freed */
for (i = 0; i < nt->max_qps; i++)
if (!test_bit(i, &nt->qp_bitmap))
ntb_transport_free_queue(&nt->qps[i]);
@@ -673,6 +681,8 @@ ntb_transport_link_up(struct ntb_transport_qp *qp)
return;
qp->client_ready = NTB_LINK_UP;
+ if (bootverbose)
+ device_printf(ntb_get_device(qp->ntb), "qp client ready\n");
if (qp->transport->transport_link == NTB_LINK_UP)
callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
@@ -709,7 +719,7 @@ ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
if (entry == NULL) {
- CTR0(KTR_NTB, "TX: couldn't get entry from tx_free_q");
+ CTR0(KTR_NTB, "TX: could not get entry from tx_free_q");
return (ENOMEM);
}
CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry);
@@ -988,9 +998,13 @@ ntb_transport_event_callback(void *data, enum ntb_hw_event event)
switch (event) {
case NTB_EVENT_HW_LINK_UP:
+ if (bootverbose)
+ device_printf(ntb_get_device(nt->ntb), "HW link up\n");
callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
break;
case NTB_EVENT_HW_LINK_DOWN:
+ if (bootverbose)
+ device_printf(ntb_get_device(nt->ntb), "HW link down\n");
ntb_transport_link_cleanup(nt);
break;
default:
@@ -1071,6 +1085,8 @@ ntb_transport_link_work(void *arg)
return;
nt->transport_link = NTB_LINK_UP;
+ if (bootverbose)
+ device_printf(ntb_get_device(ntb), "transport link up\n");
for (i = 0; i < nt->max_qps; i++) {
qp = &nt->qps[i];
@@ -1176,6 +1192,8 @@ ntb_qp_link_work(void *arg)
qp->qp_link = NTB_LINK_UP;
if (qp->event_handler != NULL)
qp->event_handler(qp->cb_data, NTB_LINK_UP);
+ if (bootverbose)
+ device_printf(ntb_get_device(ntb), "qp link up\n");
} else if (nt->transport_link == NTB_LINK_UP) {
callout_reset(&qp->link_work,
NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
diff --git a/sys/dev/ntb/ntb_hw/ntb_hw.c b/sys/dev/ntb/ntb_hw/ntb_hw.c
index 72314dd..019f2a7 100644
--- a/sys/dev/ntb/ntb_hw/ntb_hw.c
+++ b/sys/dev/ntb/ntb_hw/ntb_hw.c
@@ -76,10 +76,18 @@ enum ntb_device_type {
NTB_SOC
};
+/* Device features and workarounds */
+#define HAS_FEATURE(feature) \
+ ((ntb->features & (feature)) != 0)
+
+#define NTB_BAR_SIZE_4K (1 << 0)
+#define NTB_REGS_THRU_MW (1 << 1)
+
struct ntb_hw_info {
uint32_t device_id;
- enum ntb_device_type type;
const char *desc;
+ enum ntb_device_type type;
+ uint64_t features;
};
struct ntb_pci_bar_info {
@@ -108,6 +116,7 @@ struct ntb_db_cb {
struct ntb_softc {
device_t device;
enum ntb_device_type type;
+ uint64_t features;
struct ntb_pci_bar_info bar_info[NTB_MAX_BARS];
struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS];
@@ -145,26 +154,31 @@ struct ntb_softc {
uint8_t link_speed;
};
-#define ntb_reg_read(SIZE, offset) \
- bus_space_read_ ## SIZE (ntb->bar_info[NTB_CONFIG_BAR].pci_bus_tag, \
- ntb->bar_info[NTB_CONFIG_BAR].pci_bus_handle, (offset))
+#define ntb_bar_read(SIZE, bar, offset) \
+ bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
+ ntb->bar_info[(bar)].pci_bus_handle, (offset))
+#define ntb_bar_write(SIZE, bar, offset, val) \
+ bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
+ ntb->bar_info[(bar)].pci_bus_handle, (offset), (val))
+#define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
#define ntb_reg_write(SIZE, offset, val) \
- bus_space_write_ ## SIZE (ntb->bar_info[NTB_CONFIG_BAR].pci_bus_tag, \
- ntb->bar_info[NTB_CONFIG_BAR].pci_bus_handle, (offset), (val))
-
-#define ntb_read_1(offset) ntb_reg_read(1, (offset))
-#define ntb_read_2(offset) ntb_reg_read(2, (offset))
-#define ntb_read_4(offset) ntb_reg_read(4, (offset))
-#define ntb_read_8(offset) ntb_reg_read(8, (offset))
-#define ntb_write_1(offset, val) ntb_reg_write(1, (offset), (val))
-#define ntb_write_2(offset, val) ntb_reg_write(2, (offset), (val))
-#define ntb_write_4(offset, val) ntb_reg_write(4, (offset), (val))
-#define ntb_write_8(offset, val) ntb_reg_write(8, (offset), (val))
+ ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
+#define ntb_mw_read(SIZE, offset) ntb_bar_read(SIZE, NTB_B2B_BAR_2, offset)
+#define ntb_mw_write(SIZE, offset, val) \
+ ntb_bar_write(SIZE, NTB_B2B_BAR_2, offset, val)
+
+typedef int (*bar_map_strategy)(struct ntb_softc *ntb,
+ struct ntb_pci_bar_info *bar);
static int ntb_probe(device_t device);
static int ntb_attach(device_t device);
static int ntb_detach(device_t device);
-static int ntb_map_pci_bar(struct ntb_softc *ntb);
+static int ntb_map_pci_bars(struct ntb_softc *ntb);
+static int map_pci_bar(struct ntb_softc *ntb, bar_map_strategy strategy,
+ struct ntb_pci_bar_info *bar);
+static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar);
+static int map_memory_window_bar(struct ntb_softc *ntb,
+ struct ntb_pci_bar_info *bar);
static void ntb_unmap_pci_bar(struct ntb_softc *ntb);
static int ntb_setup_interrupts(struct ntb_softc *ntb);
static void ntb_teardown_interrupts(struct ntb_softc *ntb);
@@ -178,17 +192,21 @@ static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id);
static int ntb_initialize_hw(struct ntb_softc *ntb);
static int ntb_setup_xeon(struct ntb_softc *ntb);
static int ntb_setup_soc(struct ntb_softc *ntb);
+static void configure_soc_secondary_side_bars(struct ntb_softc *ntb);
+static void configure_xeon_secondary_side_bars(struct ntb_softc *ntb);
static void ntb_handle_heartbeat(void *arg);
static void ntb_handle_link_event(struct ntb_softc *ntb, int link_state);
static void recover_soc_link(void *arg);
static int ntb_check_link_status(struct ntb_softc *ntb);
-static bool is_bar_for_data_transfer(int bar_num);
+static void save_bar_parameters(struct ntb_pci_bar_info *bar);
static struct ntb_hw_info pci_ids[] = {
- { 0x3C0D8086, NTB_XEON, "Xeon E5/Core i7 Non-Transparent Bridge B2B" },
- { 0x0C4E8086, NTB_SOC, "Atom Processor S1200 NTB Primary B2B" },
- { 0x0E0D8086, NTB_XEON, "Xeon E5 V2 Non-Transparent Bridge B2B" },
- { 0x00000000, NTB_SOC, NULL }
+ { 0x3C0D8086, "Xeon E5/Core i7 Non-Transparent Bridge B2B", NTB_XEON,
+ NTB_REGS_THRU_MW },
+ { 0x0C4E8086, "Atom Processor S1200 NTB Primary B2B", NTB_SOC, 0 },
+ { 0x0E0D8086, "Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON,
+ NTB_REGS_THRU_MW | NTB_BAR_SIZE_4K },
+ { 0x00000000, NULL, NTB_SOC, 0 }
};
/*
@@ -245,12 +263,13 @@ ntb_attach(device_t device)
ntb->device = device;
ntb->type = p->type;
+ ntb->features = p->features;
/* Heartbeat timer for NTB_SOC since there is no link interrupt */
callout_init(&ntb->heartbeat_timer, CALLOUT_MPSAFE);
callout_init(&ntb->lr_timer, CALLOUT_MPSAFE);
- DETACH_ON_ERROR(ntb_map_pci_bar(ntb));
+ DETACH_ON_ERROR(ntb_map_pci_bars(ntb));
DETACH_ON_ERROR(ntb_initialize_hw(ntb));
DETACH_ON_ERROR(ntb_setup_interrupts(ntb));
@@ -273,59 +292,122 @@ ntb_detach(device_t device)
}
static int
-ntb_map_pci_bar(struct ntb_softc *ntb)
+ntb_map_pci_bars(struct ntb_softc *ntb)
{
- struct ntb_pci_bar_info *current_bar;
- int rc, i;
+ int rc;
ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0);
+ rc = map_pci_bar(ntb, map_mmr_bar, &ntb->bar_info[NTB_CONFIG_BAR]);
+ if (rc != 0)
+ return rc;
+
ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2);
+ rc = map_pci_bar(ntb, map_memory_window_bar,
+ &ntb->bar_info[NTB_B2B_BAR_1]);
+ if (rc != 0)
+ return rc;
+
ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4);
+ if (HAS_FEATURE(NTB_REGS_THRU_MW))
+ rc = map_pci_bar(ntb, map_mmr_bar,
+ &ntb->bar_info[NTB_B2B_BAR_2]);
+ else
+ rc = map_pci_bar(ntb, map_memory_window_bar,
+ &ntb->bar_info[NTB_B2B_BAR_2]);
+ if (rc != 0)
+ return rc;
- for (i = 0; i< NTB_MAX_BARS; i++) {
- current_bar = &ntb->bar_info[i];
- current_bar->pci_resource =
- bus_alloc_resource(ntb->device,
- SYS_RES_MEMORY,
- &current_bar->pci_resource_id, 0, ~0, 1,
- RF_ACTIVE);
+ return (0);
+}
- if (current_bar->pci_resource == NULL) {
- device_printf(ntb->device,
- "unable to allocate pci resource\n");
- return (ENXIO);
+static int
+map_pci_bar(struct ntb_softc *ntb, bar_map_strategy strategy,
+ struct ntb_pci_bar_info *bar)
+{
+ int rc;
+
+ rc = strategy(ntb, bar);
+ if (rc != 0) {
+ device_printf(ntb->device,
+ "unable to allocate pci resource\n");
+ } else {
+ device_printf(ntb->device,
+ "Bar size = %lx, v %p, p %p\n",
+ bar->size, bar->vbase,
+ (void *)(bar->pbase));
+ }
+ return (rc);
+}
+
+static int
+map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
+{
+
+ bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
+ &bar->pci_resource_id, RF_ACTIVE);
+
+ if (bar->pci_resource == NULL)
+ return (ENXIO);
+ else {
+ save_bar_parameters(bar);
+ return (0);
+ }
+}
+
+static int
+map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
+{
+ int rc;
+ uint8_t bar_size_bits = 0;
+
+ bar->pci_resource = bus_alloc_resource_any(ntb->device,
+ SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE);
+
+ if (bar->pci_resource == NULL)
+ return (ENXIO);
+ else {
+ save_bar_parameters(bar);
+ /*
+ * Ivytown NTB BAR sizes are misreported by the hardware due to
+ * a hardware issue. To work around this, query the size it
+ * should be configured to by the device and modify the resource
+ * to correspond to this new size. The BIOS on systems with this
+ * problem is required to provide enough address space to allow
+ * the driver to make this change safely.
+ *
+ * Ideally I could have just specified the size when I allocated
+ * the resource like:
+ * bus_alloc_resource(ntb->device,
+ * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul,
+ * 1ul << bar_size_bits, RF_ACTIVE);
+ * but the PCI driver does not honor the size in this call, so
+ * we have to modify it after the fact.
+ */
+ if (HAS_FEATURE(NTB_BAR_SIZE_4K)) {
+ if (bar->pci_resource_id == PCIR_BAR(2))
+ bar_size_bits = pci_read_config(ntb->device,
+ XEON_PBAR23SZ_OFFSET, 1);
+ else
+ bar_size_bits = pci_read_config(ntb->device,
+ XEON_PBAR45SZ_OFFSET, 1);
+ rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY,
+ bar->pci_resource, bar->pbase,
+ bar->pbase + (1ul << bar_size_bits) - 1);
+ if (rc != 0 ) {
+ device_printf(ntb->device,
+ "unable to resize bar\n");
+ return (rc);
+ } else
+ save_bar_parameters(bar);
}
- else {
- current_bar->pci_bus_tag =
- rman_get_bustag(current_bar->pci_resource);
- current_bar->pci_bus_handle =
- rman_get_bushandle(current_bar->pci_resource);
- current_bar->pbase =
- rman_get_start(current_bar->pci_resource);
- current_bar->size =
- rman_get_size(current_bar->pci_resource);
- current_bar->vbase =
- rman_get_virtual(current_bar->pci_resource);
- if (is_bar_for_data_transfer(i)) {
- /*
- * Mark bar region as write combining to improve
- * performance.
- */
- rc = pmap_change_attr(
- (vm_offset_t)current_bar->vbase,
- current_bar->size,
- VM_MEMATTR_WRITE_COMBINING);
- if (rc != 0) {
- device_printf(ntb->device,
- "Couldn't mark bar as"
- " WRITE_COMBINING\n");
- return (rc);
- }
- }
- device_printf(ntb->device,
- "Bar size = %lx, v %p, p %p\n",
- current_bar->size, current_bar->vbase,
- (void *)(current_bar->pbase));
+
+ /* Mark bar region as write combining to improve performance. */
+ rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size,
+ VM_MEMATTR_WRITE_COMBINING);
+ if (rc != 0) {
+ device_printf(ntb->device, "unable to mark bar as"
+ " WRITE_COMBINING\n");
+ return (rc);
}
}
return (0);
@@ -361,9 +443,9 @@ ntb_setup_interrupts(struct ntb_softc *ntb)
* Interrupt. The rest will be unmasked as callbacks are registered.
*/
if (ntb->type == NTB_SOC)
- ntb_write_8(ntb->reg_ofs.pdb_mask, ~0);
+ ntb_reg_write(8, ntb->reg_ofs.pdb_mask, ~0);
else
- ntb_write_2(ntb->reg_ofs.pdb_mask,
+ ntb_reg_write(2, ntb->reg_ofs.pdb_mask,
~(1 << ntb->limits.max_db_bits));
num_vectors = MIN(pci_msix_count(ntb->device),
@@ -393,7 +475,8 @@ ntb_setup_interrupts(struct ntb_softc *ntb)
int_arg = &ntb->db_cb[i];
} else {
if (i == num_vectors - 1) {
- interrupt_handler = handle_xeon_event_irq;
+ interrupt_handler =
+ handle_xeon_event_irq;
int_arg = ntb;
} else {
interrupt_handler =
@@ -413,8 +496,8 @@ ntb_setup_interrupts(struct ntb_softc *ntb)
}
else {
ntb->int_info[0].rid = 0;
- ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ,
- &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE);
+ ntb->int_info[0].res = bus_alloc_resource_any(ntb->device,
+ SYS_RES_IRQ, &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE);
interrupt_handler = ntb_handle_legacy_interrupt;
if (ntb->int_info[0].res == NULL) {
device_printf(ntb->device,
@@ -463,7 +546,7 @@ handle_soc_irq(void *arg)
struct ntb_db_cb *db_cb = arg;
struct ntb_softc *ntb = db_cb->ntb;
- ntb_write_8(ntb->reg_ofs.pdb, (uint64_t) 1 << db_cb->db_num);
+ ntb_reg_write(8, ntb->reg_ofs.pdb, (uint64_t) 1 << db_cb->db_num);
if (db_cb->callback != NULL)
db_cb->callback(db_cb->data, db_cb->db_num);
@@ -481,7 +564,7 @@ handle_xeon_irq(void *arg)
* vectors, with the 4th having a single bit for link
* interrupts.
*/
- ntb_write_2(ntb->reg_ofs.pdb,
+ ntb_reg_write(2, ntb->reg_ofs.pdb,
((1 << ntb->bits_per_vector) - 1) <<
(db_cb->db_num * ntb->bits_per_vector));
@@ -501,7 +584,7 @@ handle_xeon_event_irq(void *arg)
device_printf(ntb->device, "Error determining link status\n");
/* bit 15 is always the link bit */
- ntb_write_2(ntb->reg_ofs.pdb, 1 << ntb->limits.max_db_bits);
+ ntb_reg_write(2, ntb->reg_ofs.pdb, 1 << ntb->limits.max_db_bits);
}
static void
@@ -513,7 +596,7 @@ ntb_handle_legacy_interrupt(void *arg)
uint16_t pdb16;
if (ntb->type == NTB_SOC) {
- pdb64 = ntb_read_8(ntb->reg_ofs.pdb);
+ pdb64 = ntb_reg_read(8, ntb->reg_ofs.pdb);
while (pdb64) {
i = ffs(pdb64);
@@ -521,7 +604,7 @@ ntb_handle_legacy_interrupt(void *arg)
handle_soc_irq(&ntb->db_cb[i]);
}
} else {
- pdb16 = ntb_read_2(ntb->reg_ofs.pdb);
+ pdb16 = ntb_reg_read(2, ntb->reg_ofs.pdb);
if ((pdb16 & XEON_DB_HW_LINK) != 0) {
handle_xeon_event_irq(ntb);
@@ -634,10 +717,15 @@ ntb_setup_xeon(struct ntb_softc *ntb)
ntb->limits.msix_cnt = XEON_MSIX_CNT;
ntb->bits_per_vector = XEON_DB_BITS_PER_VEC;
+ configure_xeon_secondary_side_bars(ntb);
/* Enable Bus Master and Memory Space on the secondary side */
- ntb_write_2(ntb->reg_ofs.spci_cmd,
+ ntb_reg_write(2, ntb->reg_ofs.spci_cmd,
PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
+ /* Enable link training */
+ ntb_reg_write(4, ntb->reg_ofs.lnk_cntl,
+ NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP);
+
return (0);
}
@@ -698,49 +786,63 @@ ntb_setup_soc(struct ntb_softc *ntb)
*/
pci_write_config(ntb->device, 0xFC, 0x4, 4);
- /*
- * Some BIOSes aren't filling out the XLAT offsets.
- * Check and correct the issue.
- */
- if (ntb->dev_type == NTB_DEV_USD) {
- if (ntb_read_8(SOC_PBAR2XLAT_OFFSET) == 0)
- ntb_write_8(SOC_PBAR2XLAT_OFFSET,
- SOC_PBAR2XLAT_USD_ADDR);
-
- if (ntb_read_8(SOC_PBAR4XLAT_OFFSET) == 0)
- ntb_write_8(SOC_PBAR4XLAT_OFFSET,
- SOC_PBAR4XLAT_USD_ADDR);
+ configure_soc_secondary_side_bars(ntb);
- if (ntb_read_8(SOC_MBAR23_OFFSET) == 0xC)
- ntb_write_8(SOC_MBAR23_OFFSET, SOC_MBAR23_USD_ADDR);
-
- if (ntb_read_8(SOC_MBAR45_OFFSET) == 0xC)
- ntb_write_8(SOC_MBAR45_OFFSET, SOC_MBAR45_USD_ADDR);
- } else {
- if (ntb_read_8(SOC_PBAR2XLAT_OFFSET) == 0)
- ntb_write_8(SOC_PBAR2XLAT_OFFSET,
- SOC_PBAR2XLAT_DSD_ADDR);
+ /* Enable Bus Master and Memory Space on the secondary side */
+ ntb_reg_write(2, ntb->reg_ofs.spci_cmd,
+ PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
+ callout_reset(&ntb->heartbeat_timer, 0, ntb_handle_heartbeat, ntb);
- if (ntb_read_8(SOC_PBAR4XLAT_OFFSET) == 0)
- ntb_write_8(SOC_PBAR4XLAT_OFFSET,
- SOC_PBAR4XLAT_DSD_ADDR);
+ return (0);
+}
- if (ntb_read_8(SOC_MBAR23_OFFSET) == 0xC)
- ntb_write_8(SOC_MBAR23_OFFSET, SOC_MBAR23_DSD_ADDR);
+static void
+configure_soc_secondary_side_bars(struct ntb_softc *ntb)
+{
- if (ntb_read_8(SOC_MBAR45_OFFSET) == 0xC)
- ntb_write_8(SOC_MBAR45_OFFSET, SOC_MBAR45_DSD_ADDR);
+ if (ntb->dev_type == NTB_DEV_USD) {
+ ntb_reg_write(8, SOC_PBAR2XLAT_OFFSET, PBAR2XLAT_USD_ADDR);
+ ntb_reg_write(8, SOC_PBAR4XLAT_OFFSET, PBAR4XLAT_USD_ADDR);
+ ntb_reg_write(8, SOC_MBAR23_OFFSET, MBAR23_USD_ADDR);
+ ntb_reg_write(8, SOC_MBAR45_OFFSET, MBAR45_USD_ADDR);
+ } else {
+ ntb_reg_write(8, SOC_PBAR2XLAT_OFFSET, PBAR2XLAT_DSD_ADDR);
+ ntb_reg_write(8, SOC_PBAR4XLAT_OFFSET, PBAR4XLAT_DSD_ADDR);
+ ntb_reg_write(8, SOC_MBAR23_OFFSET, MBAR23_DSD_ADDR);
+ ntb_reg_write(8, SOC_MBAR45_OFFSET, MBAR45_DSD_ADDR);
}
+}
- /* Enable Bus Master and Memory Space on the secondary side */
- ntb_write_2(ntb->reg_ofs.spci_cmd,
- PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
- callout_reset(&ntb->heartbeat_timer, 0, ntb_handle_heartbeat, ntb);
+static void
+configure_xeon_secondary_side_bars(struct ntb_softc *ntb)
+{
- return (0);
+ if (ntb->dev_type == NTB_DEV_USD) {
+ ntb_reg_write(8, XEON_PBAR2XLAT_OFFSET, PBAR2XLAT_USD_ADDR);
+ if (HAS_FEATURE(NTB_REGS_THRU_MW))
+ ntb_reg_write(8, XEON_PBAR4XLAT_OFFSET,
+ MBAR01_DSD_ADDR);
+ else
+ ntb_reg_write(8, XEON_PBAR4XLAT_OFFSET,
+ PBAR4XLAT_USD_ADDR);
+ ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, MBAR01_USD_ADDR);
+ ntb_reg_write(8, XEON_SBAR2BASE_OFFSET, MBAR23_USD_ADDR);
+ ntb_reg_write(8, XEON_SBAR4BASE_OFFSET, MBAR45_USD_ADDR);
+ } else {
+ ntb_reg_write(8, XEON_PBAR2XLAT_OFFSET, PBAR2XLAT_DSD_ADDR);
+ if (HAS_FEATURE(NTB_REGS_THRU_MW))
+ ntb_reg_write(8, XEON_PBAR4XLAT_OFFSET,
+ MBAR01_USD_ADDR);
+ else
+ ntb_reg_write(8, XEON_PBAR4XLAT_OFFSET,
+ PBAR4XLAT_DSD_ADDR);
+ ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, MBAR01_DSD_ADDR);
+ ntb_reg_write(8, XEON_SBAR2BASE_OFFSET, MBAR23_DSD_ADDR);
+ ntb_reg_write(8, XEON_SBAR4BASE_OFFSET, MBAR45_DSD_ADDR);
+ }
}
-/* SOC doesn't have link status interrupt, poll on that platform */
+/* SOC does not have link status interrupt, poll on that platform */
static void
ntb_handle_heartbeat(void *arg)
{
@@ -753,7 +855,7 @@ ntb_handle_heartbeat(void *arg)
"Error determining link status\n");
/* Check to see if a link error is the cause of the link down */
if (ntb->link_status == NTB_LINK_DOWN) {
- status32 = ntb_read_4(SOC_LTSSMSTATEJMP_OFFSET);
+ status32 = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET);
if ((status32 & SOC_LTSSMSTATEJMP_FORCEDETECT) != 0) {
callout_reset(&ntb->lr_timer, 0, recover_soc_link,
ntb);
@@ -771,37 +873,37 @@ soc_perform_link_restart(struct ntb_softc *ntb)
uint32_t status;
/* Driver resets the NTB ModPhy lanes - magic! */
- ntb_write_1(SOC_MODPHY_PCSREG6, 0xe0);
- ntb_write_1(SOC_MODPHY_PCSREG4, 0x40);
- ntb_write_1(SOC_MODPHY_PCSREG4, 0x60);
- ntb_write_1(SOC_MODPHY_PCSREG6, 0x60);
+ ntb_reg_write(1, SOC_MODPHY_PCSREG6, 0xe0);
+ ntb_reg_write(1, SOC_MODPHY_PCSREG4, 0x40);
+ ntb_reg_write(1, SOC_MODPHY_PCSREG4, 0x60);
+ ntb_reg_write(1, SOC_MODPHY_PCSREG6, 0x60);
/* Driver waits 100ms to allow the NTB ModPhy to settle */
pause("ModPhy", hz / 10);
/* Clear AER Errors, write to clear */
- status = ntb_read_4(SOC_ERRCORSTS_OFFSET);
+ status = ntb_reg_read(4, SOC_ERRCORSTS_OFFSET);
status &= PCIM_AER_COR_REPLAY_ROLLOVER;
- ntb_write_4(SOC_ERRCORSTS_OFFSET, status);
+ ntb_reg_write(4, SOC_ERRCORSTS_OFFSET, status);
/* Clear unexpected electrical idle event in LTSSM, write to clear */
- status = ntb_read_4(SOC_LTSSMERRSTS0_OFFSET);
+ status = ntb_reg_read(4, SOC_LTSSMERRSTS0_OFFSET);
status |= SOC_LTSSMERRSTS0_UNEXPECTEDEI;
- ntb_write_4(SOC_LTSSMERRSTS0_OFFSET, status);
+ ntb_reg_write(4, SOC_LTSSMERRSTS0_OFFSET, status);
/* Clear DeSkew Buffer error, write to clear */
- status = ntb_read_4(SOC_DESKEWSTS_OFFSET);
+ status = ntb_reg_read(4, SOC_DESKEWSTS_OFFSET);
status |= SOC_DESKEWSTS_DBERR;
- ntb_write_4(SOC_DESKEWSTS_OFFSET, status);
+ ntb_reg_write(4, SOC_DESKEWSTS_OFFSET, status);
- status = ntb_read_4(SOC_IBSTERRRCRVSTS0_OFFSET);
+ status = ntb_reg_read(4, SOC_IBSTERRRCRVSTS0_OFFSET);
status &= SOC_IBIST_ERR_OFLOW;
- ntb_write_4(SOC_IBSTERRRCRVSTS0_OFFSET, status);
+ ntb_reg_write(4, SOC_IBSTERRRCRVSTS0_OFFSET, status);
/* Releases the NTB state machine to allow the link to retrain */
- status = ntb_read_4(SOC_LTSSMSTATEJMP_OFFSET);
+ status = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET);
status &= ~SOC_LTSSMSTATEJMP_FORCEDETECT;
- ntb_write_4(SOC_LTSSMSTATEJMP_OFFSET, status);
+ ntb_reg_write(4, SOC_LTSSMSTATEJMP_OFFSET, status);
}
static void
@@ -819,7 +921,7 @@ ntb_handle_link_event(struct ntb_softc *ntb, int link_state)
event = NTB_EVENT_HW_LINK_UP;
if (ntb->type == NTB_SOC)
- status = ntb_read_2(ntb->reg_ofs.lnk_stat);
+ status = ntb_reg_read(2, ntb->reg_ofs.lnk_stat);
else
status = pci_read_config(ntb->device,
XEON_LINK_STATUS_OFFSET, 2);
@@ -833,7 +935,7 @@ ntb_handle_link_event(struct ntb_softc *ntb, int link_state)
device_printf(ntb->device, "Link Down\n");
ntb->link_status = NTB_LINK_DOWN;
event = NTB_EVENT_HW_LINK_DOWN;
- /* Don't modify link width/speed, we need it in link recovery */
+ /* Do not modify link width/speed, we need it in link recovery */
}
/* notify the upper layer if we have an event change */
@@ -852,15 +954,15 @@ recover_soc_link(void *arg)
soc_perform_link_restart(ntb);
pause("Link", SOC_LINK_RECOVERY_TIME * hz / 1000);
- status32 = ntb_read_4(SOC_LTSSMSTATEJMP_OFFSET);
+ status32 = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET);
if ((status32 & SOC_LTSSMSTATEJMP_FORCEDETECT) != 0)
goto retry;
- status32 = ntb_read_4(SOC_IBSTERRRCRVSTS0_OFFSET);
+ status32 = ntb_reg_read(4, SOC_IBSTERRRCRVSTS0_OFFSET);
if ((status32 & SOC_IBIST_ERR_OFLOW) != 0)
goto retry;
- status16 = ntb_read_2(ntb->reg_ofs.lnk_stat);
+ status16 = ntb_reg_read(2, ntb->reg_ofs.lnk_stat);
width = (status16 & NTB_LINK_WIDTH_MASK) >> 4;
speed = (status16 & NTB_LINK_SPEED_MASK);
if (ntb->link_width != width || ntb->link_speed != speed)
@@ -883,7 +985,7 @@ ntb_check_link_status(struct ntb_softc *ntb)
uint16_t status;
if (ntb->type == NTB_SOC) {
- ntb_cntl = ntb_read_4(ntb->reg_ofs.lnk_cntl);
+ ntb_cntl = ntb_reg_read(4, ntb->reg_ofs.lnk_cntl);
if ((ntb_cntl & SOC_CNTL_LINK_DOWN) != 0)
link_state = NTB_LINK_DOWN;
else
@@ -965,9 +1067,9 @@ ntb_register_db_callback(struct ntb_softc *ntb, unsigned int idx, void *data,
ntb->db_cb[idx].data = data;
/* unmask interrupt */
- mask = ntb_read_2(ntb->reg_ofs.pdb_mask);
+ mask = ntb_reg_read(2, ntb->reg_ofs.pdb_mask);
mask &= ~(1 << (idx * ntb->bits_per_vector));
- ntb_write_2(ntb->reg_ofs.pdb_mask, mask);
+ ntb_reg_write(2, ntb->reg_ofs.pdb_mask, mask);
return (0);
}
@@ -988,9 +1090,9 @@ ntb_unregister_db_callback(struct ntb_softc *ntb, unsigned int idx)
if (idx >= ntb->allocated_interrupts || !ntb->db_cb[idx].callback)
return;
- mask = ntb_read_2(ntb->reg_ofs.pdb_mask);
+ mask = ntb_reg_read(2, ntb->reg_ofs.pdb_mask);
mask |= 1 << (idx * ntb->bits_per_vector);
- ntb_write_2(ntb->reg_ofs.pdb_mask, mask);
+ ntb_reg_write(2, ntb->reg_ofs.pdb_mask, mask);
ntb->db_cb[idx].callback = NULL;
}
@@ -1091,7 +1193,7 @@ ntb_write_local_spad(struct ntb_softc *ntb, unsigned int idx, uint32_t val)
if (idx >= ntb->limits.max_spads)
return (EINVAL);
- ntb_write_4(ntb->reg_ofs.spad_local + idx * 4, val);
+ ntb_reg_write(4, ntb->reg_ofs.spad_local + idx * 4, val);
return (0);
}
@@ -1114,7 +1216,7 @@ ntb_read_local_spad(struct ntb_softc *ntb, unsigned int idx, uint32_t *val)
if (idx >= ntb->limits.max_spads)
return (EINVAL);
- *val = ntb_read_4(ntb->reg_ofs.spad_local + idx * 4);
+ *val = ntb_reg_read(4, ntb->reg_ofs.spad_local + idx * 4);
return (0);
}
@@ -1137,7 +1239,10 @@ ntb_write_remote_spad(struct ntb_softc *ntb, unsigned int idx, uint32_t val)
if (idx >= ntb->limits.max_spads)
return (EINVAL);
- ntb_write_4(ntb->reg_ofs.spad_remote + idx * 4, val);
+ if (HAS_FEATURE(NTB_REGS_THRU_MW))
+ ntb_mw_write(4, XEON_SHADOW_SPAD_OFFSET + idx * 4, val);
+ else
+ ntb_reg_write(4, ntb->reg_ofs.spad_remote + idx * 4, val);
return (0);
}
@@ -1160,7 +1265,10 @@ ntb_read_remote_spad(struct ntb_softc *ntb, unsigned int idx, uint32_t *val)
if (idx >= ntb->limits.max_spads)
return (EINVAL);
- *val = ntb_read_4(ntb->reg_ofs.spad_remote + idx * 4);
+ if (HAS_FEATURE(NTB_REGS_THRU_MW))
+ *val = ntb_mw_read(4, XEON_SHADOW_SPAD_OFFSET + idx * 4);
+ else
+ *val = ntb_reg_read(4, ntb->reg_ofs.spad_remote + idx * 4);
return (0);
}
@@ -1233,10 +1341,10 @@ ntb_set_mw_addr(struct ntb_softc *ntb, unsigned int mw, uint64_t addr)
switch (NTB_MW_TO_BAR(mw)) {
case NTB_B2B_BAR_1:
- ntb_write_8(ntb->reg_ofs.sbar2_xlat, addr);
+ ntb_reg_write(8, ntb->reg_ofs.sbar2_xlat, addr);
break;
case NTB_B2B_BAR_2:
- ntb_write_8(ntb->reg_ofs.sbar4_xlat, addr);
+ ntb_reg_write(8, ntb->reg_ofs.sbar4_xlat, addr);
break;
}
}
@@ -1256,11 +1364,16 @@ ntb_ring_sdb(struct ntb_softc *ntb, unsigned int db)
{
if (ntb->type == NTB_SOC)
- ntb_write_8(ntb->reg_ofs.sdb, (uint64_t) 1 << db);
+ ntb_reg_write(8, ntb->reg_ofs.sdb, (uint64_t) 1 << db);
else
- ntb_write_2(ntb->reg_ofs.sdb,
- ((1 << ntb->bits_per_vector) - 1) <<
- (db * ntb->bits_per_vector));
+ if (HAS_FEATURE(NTB_REGS_THRU_MW))
+ ntb_mw_write(2, XEON_SHADOW_PDOORBELL_OFFSET,
+ ((1 << ntb->bits_per_vector) - 1) <<
+ (db * ntb->bits_per_vector));
+ else
+ ntb_reg_write(2, ntb->reg_ofs.sdb,
+ ((1 << ntb->bits_per_vector) - 1) <<
+ (db * ntb->bits_per_vector));
}
/**
@@ -1278,11 +1391,24 @@ ntb_query_link_status(struct ntb_softc *ntb)
return (ntb->link_status == NTB_LINK_UP);
}
-static bool
-is_bar_for_data_transfer(int bar_num)
+static void
+save_bar_parameters(struct ntb_pci_bar_info *bar)
{
- if ((bar_num > NTB_CONFIG_BAR) && (bar_num < NTB_MAX_BARS))
- return true;
- else
- return false;
+ bar->pci_bus_tag =
+ rman_get_bustag(bar->pci_resource);
+ bar->pci_bus_handle =
+ rman_get_bushandle(bar->pci_resource);
+ bar->pbase =
+ rman_get_start(bar->pci_resource);
+ bar->size =
+ rman_get_size(bar->pci_resource);
+ bar->vbase =
+ rman_get_virtual(bar->pci_resource);
+
+}
+
+device_t ntb_get_device(struct ntb_softc *ntb)
+{
+
+ return (ntb->device);
}
diff --git a/sys/dev/ntb/ntb_hw/ntb_hw.h b/sys/dev/ntb/ntb_hw/ntb_hw.h
index 4f44031..c6c1274 100644
--- a/sys/dev/ntb/ntb_hw/ntb_hw.h
+++ b/sys/dev/ntb/ntb_hw/ntb_hw.h
@@ -69,5 +69,6 @@ u_long ntb_get_mw_size(struct ntb_softc *ntb, unsigned int mw);
void ntb_set_mw_addr(struct ntb_softc *ntb, unsigned int mw, uint64_t addr);
void ntb_ring_sdb(struct ntb_softc *ntb, unsigned int db);
bool ntb_query_link_status(struct ntb_softc *ntb);
+device_t ntb_get_device(struct ntb_softc *ntb);
#endif /* _NTB_HW_H_ */
diff --git a/sys/dev/ntb/ntb_hw/ntb_regs.h b/sys/dev/ntb/ntb_hw/ntb_regs.h
index 34ad779..bd55a59 100644
--- a/sys/dev/ntb/ntb_hw/ntb_regs.h
+++ b/sys/dev/ntb/ntb_hw/ntb_regs.h
@@ -39,14 +39,14 @@
#define XEON_MAX_SPADS 16
#define XEON_MAX_COMPAT_SPADS 8
/* Reserve the uppermost bit for link interrupt */
-#define XEON_MAX_DB_BITS 15
+#define XEON_MAX_DB_BITS 15
#define XEON_DB_BITS_PER_VEC 5
#define XEON_DB_HW_LINK 0x8000
#define XEON_PCICMD_OFFSET 0x0504
#define XEON_DEVCTRL_OFFSET 0x0598
-#define XEON_LINK_STATUS_OFFSET 0x01A2
+#define XEON_LINK_STATUS_OFFSET 0x01a2
#define XEON_PBAR2LMT_OFFSET 0x0000
#define XEON_PBAR4LMT_OFFSET 0x0008
@@ -60,13 +60,13 @@
#define XEON_SBAR2BASE_OFFSET 0x0048
#define XEON_SBAR4BASE_OFFSET 0x0050
#define XEON_NTBCNTL_OFFSET 0x0058
-#define XEON_SBDF_OFFSET 0x005C
+#define XEON_SBDF_OFFSET 0x005c
#define XEON_PDOORBELL_OFFSET 0x0060
#define XEON_PDBMSK_OFFSET 0x0062
#define XEON_SDOORBELL_OFFSET 0x0064
#define XEON_SDBMSK_OFFSET 0x0066
#define XEON_USMEMMISS 0x0070
-#define XEON_SPAD_OFFSET 0x0080
+#define XEON_SPAD_OFFSET 0x0080
#define XEON_SPADSEMA4_OFFSET 0x00c0
#define XEON_WCCNTRL_OFFSET 0x00e0
#define XEON_B2B_SPAD_OFFSET 0x0100
@@ -105,7 +105,7 @@
#define SOC_MODPHY_PCSREG4 0x1c004
#define SOC_MODPHY_PCSREG6 0x1c006
-#define SOC_IP_BASE 0xC000
+#define SOC_IP_BASE 0xc000
#define SOC_DESKEWSTS_OFFSET (SOC_IP_BASE + 0x3024)
#define SOC_LTSSMERRSTS0_OFFSET (SOC_IP_BASE + 0x3180)
#define SOC_LTSSMSTATEJMP_OFFSET (SOC_IP_BASE + 0x3040)
@@ -114,13 +114,15 @@
#define SOC_DESKEWSTS_DBERR (1 << 15)
#define SOC_LTSSMERRSTS0_UNEXPECTEDEI (1 << 20)
#define SOC_LTSSMSTATEJMP_FORCEDETECT (1 << 2)
-#define SOC_IBIST_ERR_OFLOW 0x7FFF7FFF
+#define SOC_IBIST_ERR_OFLOW 0x7fff7fff
#define NTB_CNTL_BAR23_SNOOP (1 << 2)
#define NTB_CNTL_BAR45_SNOOP (1 << 6)
#define SOC_CNTL_LINK_DOWN (1 << 16)
-#define NTB_PPD_OFFSET 0x00D4
+#define XEON_PBAR23SZ_OFFSET 0x00d0
+#define XEON_PBAR45SZ_OFFSET 0x00d1
+#define NTB_PPD_OFFSET 0x00d4
#define XEON_PPD_CONN_TYPE 0x0003
#define XEON_PPD_DEV_TYPE 0x0010
#define SOC_PPD_INIT_LINK 0x0008
@@ -134,13 +136,19 @@
#define NTB_DEV_DSD 1
#define NTB_DEV_USD 0
-#define SOC_PBAR2XLAT_USD_ADDR 0x0000004000000000
-#define SOC_PBAR4XLAT_USD_ADDR 0x0000008000000000
-#define SOC_MBAR23_USD_ADDR 0x000000410000000C
-#define SOC_MBAR45_USD_ADDR 0x000000810000000C
-#define SOC_PBAR2XLAT_DSD_ADDR 0x0000004100000000
-#define SOC_PBAR4XLAT_DSD_ADDR 0x0000008100000000
-#define SOC_MBAR23_DSD_ADDR 0x000000400000000C
-#define SOC_MBAR45_DSD_ADDR 0x000000800000000C
+#define PBAR2XLAT_USD_ADDR 0x0000004000000000
+#define PBAR4XLAT_USD_ADDR 0x0000008000000000
+#define MBAR01_USD_ADDR 0x000000210000000c
+#define MBAR23_USD_ADDR 0x000000410000000c
+#define MBAR45_USD_ADDR 0x000000810000000c
+#define PBAR2XLAT_DSD_ADDR 0x0000004100000000
+#define PBAR4XLAT_DSD_ADDR 0x0000008100000000
+#define MBAR01_DSD_ADDR 0x000000200000000c
+#define MBAR23_DSD_ADDR 0x000000400000000c
+#define MBAR45_DSD_ADDR 0x000000800000000c
+
+/* XEON Shadowed MMIO Space */
+#define XEON_SHADOW_PDOORBELL_OFFSET 0x60
+#define XEON_SHADOW_SPAD_OFFSET 0x80
#endif /* _NTB_REGS_H_ */
diff --git a/sys/dev/tdfx/tdfx_linux.c b/sys/dev/tdfx/tdfx_linux.c
index 0b769f0..fa39ab1 100644
--- a/sys/dev/tdfx/tdfx_linux.c
+++ b/sys/dev/tdfx/tdfx_linux.c
@@ -45,6 +45,7 @@ LINUX_IOCTL_SET(tdfx, LINUX_IOCTL_TDFX_MIN, LINUX_IOCTL_TDFX_MAX);
static int
linux_ioctl_tdfx(struct thread *td, struct linux_ioctl_args* args)
{
+ cap_rights_t rights;
int error = 0;
u_long cmd = args->cmd & 0xffff;
@@ -54,7 +55,8 @@ linux_ioctl_tdfx(struct thread *td, struct linux_ioctl_args* args)
struct file *fp;
- if ((error = fget(td, args->fd, CAP_IOCTL, &fp)) != 0)
+ error = fget(td, args->fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
+ if (error != 0)
return (error);
/* We simply copy the data and send it right to ioctl */
copyin((caddr_t)args->arg, &d_pio, sizeof(d_pio));
diff --git a/sys/dev/usb/serial/uftdi.c b/sys/dev/usb/serial/uftdi.c
index bc971ec..439cd8f 100644
--- a/sys/dev/usb/serial/uftdi.c
+++ b/sys/dev/usb/serial/uftdi.c
@@ -243,6 +243,7 @@ static const STRUCT_USB_HOST_ID uftdi_devs[] = {
UFTDI_DEV(FALCOM, TWIST, UFTDI_TYPE_8U232AM),
UFTDI_DEV(FIC, NEO1973_DEBUG, UFTDI_TYPE_AUTO | UFTDI_FLAG_JTAG),
UFTDI_DEV(FIC, NEO1973_DEBUG, UFTDI_TYPE_AUTO | UFTDI_FLAG_JTAG),
+ UFTDI_DEV(FTDI, 232EX, UFTDI_TYPE_AUTO),
UFTDI_DEV(FTDI, 232H, UFTDI_TYPE_AUTO),
UFTDI_DEV(FTDI, 232RL, UFTDI_TYPE_AUTO),
UFTDI_DEV(FTDI, 4N_GALAXY_DE_1, UFTDI_TYPE_AUTO),
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 498d424..e6a32ae 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -1547,6 +1547,7 @@ product DLINK DWLG122 0x3c00 DWL-G122 b1 Wireless Adapter
product DLINK DUBE100B1 0x3c05 DUB-E100 rev B1
product DLINK RT2870 0x3c09 RT2870
product DLINK RT3072 0x3c0a RT3072
+product DLINK DWA127 0x3c1b DWA-127 Wireless Adapter
product DLINK DSB650C 0x4000 10Mbps Ethernet
product DLINK DSB650TX1 0x4001 10/100 Ethernet
product DLINK DSB650TX 0x4002 10/100 Ethernet
@@ -1776,6 +1777,7 @@ product FTDI SERIAL_8U232AM4 0x6004 8U232AM Serial
product FTDI SERIAL_232RL 0x6006 FT232RL Serial
product FTDI SERIAL_2232C 0x6010 FT2232C Dual port Serial
product FTDI 232H 0x6014 FTDI compatible adapter
+product FTDI 232EX 0x6015 FTDI compatible adapter
product FTDI SERIAL_2232D 0x9e90 FT2232D Dual port Serial
product FTDI SERIAL_4232H 0x6011 FT4232H Quad port Serial
product FTDI BEAGLEBONE 0xa6d0 BeagleBone
diff --git a/sys/dev/usb/wlan/if_run.c b/sys/dev/usb/wlan/if_run.c
index aed07a2..5b4587f 100644
--- a/sys/dev/usb/wlan/if_run.c
+++ b/sys/dev/usb/wlan/if_run.c
@@ -171,6 +171,7 @@ static const STRUCT_USB_HOST_ID run_devs[] = {
RUN_DEV(CYBERTAN, RT2870),
RUN_DEV(DLINK, RT2870),
RUN_DEV(DLINK, RT3072),
+ RUN_DEV(DLINK, DWA127),
RUN_DEV(DLINK2, DWA130),
RUN_DEV(DLINK2, RT2870_1),
RUN_DEV(DLINK2, RT2870_2),
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index f757394..93c6007 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -592,6 +592,9 @@ vtnet_setup_features(struct vtnet_softc *sc)
vtnet_negotiate_features(sc);
+ if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
+ sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
+
if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
/* This feature should always be negotiated. */
sc->vtnet_flags |= VTNET_FLAG_MAC;
@@ -1531,7 +1534,7 @@ vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
*/
#if 0
if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
- "protocol eth_type=%#x proto=%d csum_start=%d
+ "protocol eth_type=%#x proto=%d csum_start=%d "
"csum_offset=%d\n", __func__, eth_type, proto,
hdr->csum_start, hdr->csum_offset);
#endif
@@ -2155,6 +2158,8 @@ vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
sc->vtnet_link_active == 0)
return;
+ vtnet_txq_eof(txq);
+
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
if (virtqueue_full(vq))
break;
@@ -2226,6 +2231,8 @@ vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
return (error);
}
+ vtnet_txq_eof(txq);
+
while ((m = drbr_peek(ifp, br)) != NULL) {
error = vtnet_txq_encap(txq, &m);
if (error) {
@@ -2471,6 +2478,8 @@ vtnet_watchdog(struct vtnet_txq *txq)
sc = txq->vtntx_sc;
VTNET_TXQ_LOCK(txq);
+ if (sc->vtnet_flags & VTNET_FLAG_EVENT_IDX)
+ vtnet_txq_eof(txq);
if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
VTNET_TXQ_UNLOCK(txq);
return (0);
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
index 5921103..7f04a93 100644
--- a/sys/dev/virtio/network/if_vtnetvar.h
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -138,6 +138,7 @@ struct vtnet_softc {
#define VTNET_FLAG_MRG_RXBUFS 0x0080
#define VTNET_FLAG_LRO_NOMRG 0x0100
#define VTNET_FLAG_MULTIQ 0x0200
+#define VTNET_FLAG_EVENT_IDX 0x0400
int vtnet_link_active;
int vtnet_hdr_size;
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index beff14c..5eda6cd 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -449,10 +449,10 @@ virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
switch (hint) {
case VQ_POSTPONE_SHORT:
- ndesc /= 4;
+ ndesc = ndesc / 4;
break;
case VQ_POSTPONE_LONG:
- ndesc *= 3 / 4;
+ ndesc = (ndesc * 3) / 4;
break;
case VQ_POSTPONE_EMPTIED:
break;
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index 33f6faf..21fbb41 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -230,7 +230,7 @@ struct xbb_xen_reqlist {
int num_children;
/**
- * Number of I/O requests dispatched to the backend.
+ * Number of I/O requests still pending on the backend.
*/
int pendcnt;
@@ -327,13 +327,6 @@ struct xbb_xen_req {
int nr_512b_sectors;
/**
- * The number of struct bio requests still outstanding for this
- * request on the backend device. This field is only used for
- * device (rather than file) backed I/O.
- */
- int pendcnt;
-
- /**
* BLKIF_OP code for this request.
*/
int operation;
@@ -1239,6 +1232,8 @@ xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
nreq->reqlist = *reqlist;
nreq->req_ring_idx = ring_idx;
+ nreq->id = ring_req->id;
+ nreq->operation = ring_req->operation;
if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req));
@@ -1608,7 +1603,6 @@ xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
req_ring_idx = nreq->req_ring_idx;
nr_sects = 0;
nseg = ring_req->nr_segments;
- nreq->id = ring_req->id;
nreq->nr_pages = nseg;
nreq->nr_512b_sectors = 0;
req_seg_idx = 0;
@@ -2062,7 +2056,6 @@ xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
{
struct xbb_dev_data *dev_data;
struct bio *bios[XBB_MAX_SEGMENTS_PER_REQLIST];
- struct xbb_xen_req *nreq;
off_t bio_offset;
struct bio *bio;
struct xbb_sg *xbb_sg;
@@ -2080,7 +2073,6 @@ xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
bio_idx = 0;
if (operation == BIO_FLUSH) {
- nreq = STAILQ_FIRST(&reqlist->contig_req_list);
bio = g_new_bio();
if (__predict_false(bio == NULL)) {
DPRINTF("Unable to allocate bio for BIO_FLUSH\n");
@@ -2094,10 +2086,10 @@ xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
bio->bio_offset = 0;
bio->bio_data = 0;
bio->bio_done = xbb_bio_done;
- bio->bio_caller1 = nreq;
+ bio->bio_caller1 = reqlist;
bio->bio_pblkno = 0;
- nreq->pendcnt = 1;
+ reqlist->pendcnt = 1;
SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush,
device_get_unit(xbb->dev));
OpenPOWER on IntegriCloud