summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorian <ian@FreeBSD.org>2015-10-18 18:26:19 +0000
committerian <ian@FreeBSD.org>2015-10-18 18:26:19 +0000
commit8a5f64069af62a6675dc9ba61325d2a112ad869c (patch)
tree9d37d8568c7526eb1b62ec1eeae4588221837a89
parent93afe7bdc606349a681a0e5518afa7640d4ee4d6 (diff)
downloadFreeBSD-src-8a5f64069af62a6675dc9ba61325d2a112ad869c.zip
FreeBSD-src-8a5f64069af62a6675dc9ba61325d2a112ad869c.tar.gz
Import ARM_INTRNG, the "next generation" interrupt architecture for arm
and armv6 architecures. The primary enhancement over the old design is support for hierarchical interrupt controllers (such as a gpio driver which can receive interrupts from a root PIC and act as a PIC itself for clients interested in handling a change of gpio pin state as an interrupt). The new code also provides an infrastructure for mapping interrupts described in metadata in the form of a "controller reference plus interrupt number" tuple into the simple "0-n" flat numeric space understood by rman and the bus resource mechanisms. Use of the new code is enabled by setting the ARM_INTRNG option, and by making a few simple changes to the platform's support code. In addition each existing PIC driver needs changes to be ready for INTRNG; this commit contains the changes for the arm/gic driver, which most armv6 SoCs use, but it does not enable the new code yet on any platform. This project has been many years in the making, starting as a GSoC project by Jakub Klama (jceel@) in 2012. That didn't get committed right away and the source base evolved out from under it to some degree. In 2014 I rebased the diffs to then -current and did some enhancements in the area of mapping interrupt numbers and storing associated fdt data, then the project went cold again for a while. Eventually Svata Kraus took that work in progress and did another big round of work on it, removing most of the remaining rough edges. Finally I took that and made one more pass through it, mostly disabling the "INTR_SOLO" feature for now, pending further design discussions on how to most efficiently dispatch a pending interrupt through more than one layer of PIC. The current code with the INTR_SOLO feature disabled uses approximate 100 extra cpu cycles for each cascaded PIC the interrupt has to be passed to, so what's left to do is about efficiency, not correct operation. Differential Revision: https://reviews.freebsd.org/D2047
-rw-r--r--sys/arm/arm/gic.c620
-rw-r--r--sys/arm/arm/intrng.c1458
-rw-r--r--sys/arm/arm/mp_machdep.c125
-rw-r--r--sys/arm/arm/nexus.c47
-rw-r--r--sys/arm/arm/pic_if.m124
-rw-r--r--sys/arm/include/fdt.h4
-rw-r--r--sys/arm/include/intr.h103
-rw-r--r--sys/arm/include/smp.h16
-rw-r--r--sys/conf/files.arm4
-rw-r--r--sys/conf/options.arm1
10 files changed, 2475 insertions, 27 deletions
diff --git a/sys/arm/arm/gic.c b/sys/arm/arm/gic.c
index a9f171e..9ceac48 100644
--- a/sys/arm/arm/gic.c
+++ b/sys/arm/arm/gic.c
@@ -34,18 +34,25 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_platform.h"
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/module.h>
+#include <sys/malloc.h>
#include <sys/rman.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/cpuset.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
+#ifdef ARM_INTRNG
+#include <sys/sched.h>
+#endif
#include <machine/bus.h>
#include <machine/intr.h>
#include <machine/smp.h>
@@ -55,6 +62,10 @@ __FBSDID("$FreeBSD$");
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
+#ifdef ARM_INTRNG
+#include "pic_if.h"
+#endif
+
/* We are using GICv2 register naming */
/* Distributor Registers */
@@ -83,8 +94,8 @@ __FBSDID("$FreeBSD$");
#define GICC_ABPR 0x001C /* v1 ICCABPR */
#define GICC_IIDR 0x00FC /* v1 ICCIIDR*/
-#define GIC_FIRST_IPI 0 /* Irqs 0-15 are SGIs/IPIs. */
-#define GIC_LAST_IPI 15
+#define GIC_FIRST_SGI 0 /* Irqs 0-15 are SGIs/IPIs. */
+#define GIC_LAST_SGI 15
#define GIC_FIRST_PPI 16 /* Irqs 16-31 are private (per */
#define GIC_LAST_PPI 31 /* core) peripheral interrupts. */
#define GIC_FIRST_SPI 32 /* Irqs 32+ are shared peripherals. */
@@ -102,8 +113,18 @@ __FBSDID("$FreeBSD$");
#define GIC_DEFAULT_ICFGR_INIT 0x00000000
#endif
+#ifdef ARM_INTRNG
+static u_int gic_irq_cpu;
+static int arm_gic_intr(void *);
+static int arm_gic_bind(device_t dev, struct arm_irqsrc *isrc);
+#endif
+
struct arm_gic_softc {
device_t gic_dev;
+#ifdef ARM_INTRNG
+ void * gic_intrhand;
+ struct arm_irqsrc ** gic_irqs;
+#endif
struct resource * gic_res[3];
bus_space_tag_t gic_c_bst;
bus_space_tag_t gic_d_bst;
@@ -117,10 +138,13 @@ struct arm_gic_softc {
static struct resource_spec arm_gic_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */
+#ifdef ARM_INTRNG
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
+#endif
{ -1, 0 }
};
-static struct arm_gic_softc *arm_gic_sc = NULL;
+static struct arm_gic_softc *gic_sc = NULL;
#define gic_c_read_4(_sc, _reg) \
bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
@@ -128,12 +152,16 @@ static struct arm_gic_softc *arm_gic_sc = NULL;
bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
#define gic_d_read_4(_sc, _reg) \
bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
+#define gic_d_write_1(_sc, _reg, _val) \
+ bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
#define gic_d_write_4(_sc, _reg, _val) \
bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
+#ifndef ARM_INTRNG
static int gic_config_irq(int irq, enum intr_trigger trig,
enum intr_polarity pol);
static void gic_post_filter(void *);
+#endif
static struct ofw_compat_data compat_data[] = {
{"arm,gic", true}, /* Non-standard, used in FreeBSD dts. */
@@ -159,6 +187,72 @@ arm_gic_probe(device_t dev)
return (BUS_PROBE_DEFAULT);
}
+#ifdef ARM_INTRNG
+static inline void
+gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
+{
+
+ gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F)));
+}
+
+static inline void
+gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
+{
+
+ gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F)));
+}
+#endif
+
+#ifdef SMP
+#ifdef ARM_INTRNG
+static void
+arm_gic_init_secondary(device_t dev)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ struct arm_irqsrc *isrc;
+ u_int irq;
+
+ for (irq = 0; irq < sc->nirqs; irq += 4)
+ gic_d_write_4(sc, GICD_IPRIORITYR(irq >> 2), 0);
+
+ /* Set all the interrupts to be in Group 0 (secure) */
+ for (irq = 0; irq < sc->nirqs; irq += 32) {
+ gic_d_write_4(sc, GICD_IGROUPR(irq >> 5), 0);
+ }
+
+ /* Enable CPU interface */
+ gic_c_write_4(sc, GICC_CTLR, 1);
+
+ /* Set priority mask register. */
+ gic_c_write_4(sc, GICC_PMR, 0xff);
+
+ /* Enable interrupt distribution */
+ gic_d_write_4(sc, GICD_CTLR, 0x01);
+
+ /* Unmask attached SGI interrupts. */
+ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
+ isrc = sc->gic_irqs[irq];
+ if (isrc != NULL && isrc->isrc_handlers != 0) {
+ CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
+ gic_irq_unmask(sc, irq);
+ }
+ }
+
+ /* Unmask attached PPI interrupts. */
+ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
+ isrc = sc->gic_irqs[irq];
+ if (isrc == NULL || isrc->isrc_handlers == 0)
+ continue;
+ if (isrc->isrc_flags & ARM_ISRCF_BOUND) {
+ if (CPU_ISSET(PCPU_GET(cpuid), &isrc->isrc_cpu))
+ gic_irq_unmask(sc, irq);
+ } else {
+ CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
+ gic_irq_unmask(sc, irq);
+ }
+ }
+}
+#else
static void
arm_gic_init_secondary(device_t dev)
{
@@ -189,7 +283,10 @@ arm_gic_init_secondary(device_t dev)
gic_d_write_4(sc, GICD_ISENABLER(29 >> 5), (1UL << (29 & 0x1F)));
gic_d_write_4(sc, GICD_ISENABLER(30 >> 5), (1UL << (30 & 0x1F)));
}
-
+#endif /* ARM_INTRNG */
+#endif /* SMP */
+
+#ifndef ARM_INTRNG
int
gic_decode_fdt(uint32_t iparent, uint32_t *intr, int *interrupt,
int *trig, int *pol)
@@ -234,6 +331,19 @@ gic_decode_fdt(uint32_t iparent, uint32_t *intr, int *interrupt,
}
return (0);
}
+#endif
+
+#ifdef ARM_INTRNG
+static inline intptr_t
+gic_xref(device_t dev)
+{
+#ifdef FDT
+ return (OF_xref_from_node(ofw_bus_get_node(dev)));
+#else
+ return (0);
+#endif
+}
+#endif
static int
arm_gic_attach(device_t dev)
@@ -241,8 +351,11 @@ arm_gic_attach(device_t dev)
struct arm_gic_softc *sc;
int i;
uint32_t icciidr;
+#ifdef ARM_INTRNG
+ intptr_t xref = gic_xref(dev);
+#endif
- if (arm_gic_sc)
+ if (gic_sc)
return (ENXIO);
sc = device_get_softc(dev);
@@ -253,7 +366,7 @@ arm_gic_attach(device_t dev)
}
sc->gic_dev = dev;
- arm_gic_sc = sc;
+ gic_sc = sc;
/* Initialize mutex */
mtx_init(&sc->mutex, "GIC lock", "", MTX_SPIN);
@@ -273,9 +386,14 @@ arm_gic_attach(device_t dev)
sc->nirqs = gic_d_read_4(sc, GICD_TYPER);
sc->nirqs = 32 * ((sc->nirqs & 0x1f) + 1);
+#ifdef ARM_INTRNG
+ sc->gic_irqs = malloc(sc->nirqs * sizeof (*sc->gic_irqs), M_DEVBUF,
+ M_WAITOK | M_ZERO);
+#else
/* Set up function pointers */
arm_post_filter = gic_post_filter;
arm_config_irq = gic_config_irq;
+#endif
icciidr = gic_c_read_4(sc, GICC_IIDR);
device_printf(dev,"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
@@ -311,10 +429,455 @@ arm_gic_attach(device_t dev)
/* Enable interrupt distribution */
gic_d_write_4(sc, GICD_CTLR, 0x01);
+#ifndef ARM_INTRNG
+ return (0);
+#else
+ /*
+ * Now, when everything is initialized, it's right time to
+ * register interrupt controller to interrupt framefork.
+ */
+ if (arm_pic_register(dev, xref) != 0) {
+ device_printf(dev, "could not register PIC\n");
+ goto cleanup;
+ }
+
+ if (sc->gic_res[2] == NULL) {
+ if (arm_pic_claim_root(dev, xref, arm_gic_intr, sc,
+ GIC_LAST_SGI - GIC_FIRST_SGI + 1) != 0) {
+ device_printf(dev, "could not set PIC as a root\n");
+ arm_pic_unregister(dev, xref);
+ goto cleanup;
+ }
+ } else {
+ if (bus_setup_intr(dev, sc->gic_res[2], INTR_TYPE_CLK,
+ arm_gic_intr, NULL, sc, &sc->gic_intrhand)) {
+ device_printf(dev, "could not setup irq handler\n");
+ arm_pic_unregister(dev, xref);
+ goto cleanup;
+ }
+ }
return (0);
+
+cleanup:
+ /*
+ * XXX - not implemented arm_gic_detach() should be called !
+ */
+ if (sc->gic_irqs != NULL)
+ free(sc->gic_irqs, M_DEVBUF);
+ bus_release_resources(dev, arm_gic_spec, sc->gic_res);
+ return(ENXIO);
+#endif
}
+#ifdef ARM_INTRNG
+static int
+arm_gic_intr(void *arg)
+{
+ struct arm_gic_softc *sc = arg;
+ struct arm_irqsrc *isrc;
+ uint32_t irq_active_reg, irq;
+ struct trapframe *tf;
+
+ irq_active_reg = gic_c_read_4(sc, GICC_IAR);
+ irq = irq_active_reg & 0x3FF;
+
+ /*
+ * 1. We do EOI here because recent read value from active interrupt
+ * register must be used for it. Another approach is to save this
+ * value into associated interrupt source.
+ * 2. EOI must be done on same CPU where interrupt has fired. Thus
+ * we must ensure that interrupted thread does not migrate to
+ * another CPU.
+ * 3. EOI cannot be delayed by any preemption which could happen on
+ * critical_exit() used in MI intr code, when interrupt thread is
+ * scheduled. See next point.
+ * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
+ * an action and any use of critical_exit() could break this
+ * assumption. See comments within smp_rendezvous_action().
+ * 5. We always return FILTER_HANDLED as this is an interrupt
+ * controller dispatch function. Otherwise, in cascaded interrupt
+ * case, the whole interrupt subtree would be masked.
+ */
+
+ if (irq >= sc->nirqs) {
+ device_printf(sc->gic_dev, "Spurious interrupt detected\n");
+ gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
+ return (FILTER_HANDLED);
+ }
+
+ tf = curthread->td_intr_frame;
+dispatch_irq:
+ isrc = sc->gic_irqs[irq];
+ if (isrc == NULL) {
+ device_printf(sc->gic_dev, "Stray interrupt %u detected\n", irq);
+ gic_irq_mask(sc, irq);
+ gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
+ goto next_irq;
+ }
+
+ /*
+ * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
+ * as compiler complains that comparing u_int >= 0 is always true.
+ */
+ if (irq <= GIC_LAST_SGI) {
+#ifdef SMP
+ /* Call EOI for all IPI before dispatch. */
+ gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
+ arm_ipi_dispatch(isrc, tf);
+ goto next_irq;
+#else
+ printf("SGI %u on UP system detected\n", irq - GIC_FIRST_SGI);
+ gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
+ goto next_irq;
+#endif
+ }
+
+ if (isrc->isrc_trig == INTR_TRIGGER_EDGE)
+ gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
+
+ arm_irq_dispatch(isrc, tf);
+
+next_irq:
+// arm_irq_memory_barrier(irq); /* XXX */
+// irq_active_reg = gic_c_read_4(sc, GICC_IAR);
+// irq = irq_active_reg & 0x3FF;
+ if (0 && irq < sc->nirqs)
+ goto dispatch_irq;
+
+ return (FILTER_HANDLED);
+}
+
+static int
+gic_attach_isrc(struct arm_gic_softc *sc, struct arm_irqsrc *isrc, u_int irq)
+{
+ const char *name;
+
+ /*
+ * 1. The link between ISRC and controller must be set atomically.
+ * 2. Just do things only once in rare case when consumers
+ * of shared interrupt came here at the same moment.
+ */
+ mtx_lock_spin(&sc->mutex);
+ if (sc->gic_irqs[irq] != NULL) {
+ mtx_unlock_spin(&sc->mutex);
+ return (sc->gic_irqs[irq] == isrc ? 0 : EEXIST);
+ }
+ sc->gic_irqs[irq] = isrc;
+ isrc->isrc_data = irq;
+ mtx_unlock_spin(&sc->mutex);
+
+ name = device_get_nameunit(sc->gic_dev);
+ if (irq <= GIC_LAST_SGI)
+ arm_irq_set_name(isrc, "%s,i%u", name, irq - GIC_FIRST_SGI);
+ else if (irq <= GIC_LAST_PPI)
+ arm_irq_set_name(isrc, "%s,p%u", name, irq - GIC_FIRST_PPI);
+ else
+ arm_irq_set_name(isrc, "%s,s%u", name, irq - GIC_FIRST_SPI);
+ return (0);
+}
+
+static int
+gic_detach_isrc(struct arm_gic_softc *sc, struct arm_irqsrc *isrc, u_int irq)
+{
+
+ mtx_lock_spin(&sc->mutex);
+ if (sc->gic_irqs[irq] != isrc) {
+ mtx_unlock_spin(&sc->mutex);
+ return (sc->gic_irqs[irq] == NULL ? 0 : EINVAL);
+ }
+ sc->gic_irqs[irq] = NULL;
+ isrc->isrc_data = 0;
+ mtx_unlock_spin(&sc->mutex);
+
+ arm_irq_set_name(isrc, "");
+ return (0);
+}
+
+static void
+gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
+ enum intr_polarity pol)
+{
+ uint32_t reg;
+ uint32_t mask;
+
+ if (irq < GIC_FIRST_SPI)
+ return;
+
+ mtx_lock_spin(&sc->mutex);
+
+ reg = gic_d_read_4(sc, GICD_ICFGR(irq >> 4));
+ mask = (reg >> 2*(irq % 16)) & 0x3;
+
+ if (pol == INTR_POLARITY_LOW) {
+ mask &= ~GICD_ICFGR_POL_MASK;
+ mask |= GICD_ICFGR_POL_LOW;
+ } else if (pol == INTR_POLARITY_HIGH) {
+ mask &= ~GICD_ICFGR_POL_MASK;
+ mask |= GICD_ICFGR_POL_HIGH;
+ }
+
+ if (trig == INTR_TRIGGER_LEVEL) {
+ mask &= ~GICD_ICFGR_TRIG_MASK;
+ mask |= GICD_ICFGR_TRIG_LVL;
+ } else if (trig == INTR_TRIGGER_EDGE) {
+ mask &= ~GICD_ICFGR_TRIG_MASK;
+ mask |= GICD_ICFGR_TRIG_EDGE;
+ }
+
+ /* Set mask */
+ reg = reg & ~(0x3 << 2*(irq % 16));
+ reg = reg | (mask << 2*(irq % 16));
+ gic_d_write_4(sc, GICD_ICFGR(irq >> 4), reg);
+
+ mtx_unlock_spin(&sc->mutex);
+}
+
+static int
+gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
+{
+ uint32_t cpu, end, mask;
+
+ end = min(mp_ncpus, 8);
+ for (cpu = end; cpu < MAXCPU; cpu++)
+ if (CPU_ISSET(cpu, cpus))
+ return (EINVAL);
+
+ for (mask = 0, cpu = 0; cpu < end; cpu++)
+ if (CPU_ISSET(cpu, cpus))
+ mask |= 1 << cpu;
+
+ gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
+ return (0);
+}
+
+static int
+gic_irq_from_nspc(struct arm_gic_softc *sc, u_int type, u_int num, u_int *irqp)
+{
+
+ switch (type) {
+ case ARM_IRQ_NSPC_PLAIN:
+ *irqp = num;
+ return (*irqp < sc->nirqs ? 0 : EINVAL);
+
+ case ARM_IRQ_NSPC_IRQ:
+ *irqp = num + GIC_FIRST_PPI;
+ return (*irqp < sc->nirqs ? 0 : EINVAL);
+
+ case ARM_IRQ_NSPC_IPI:
+ *irqp = num + GIC_FIRST_SGI;
+ return (*irqp < GIC_LAST_SGI ? 0 : EINVAL);
+
+ default:
+ return (EINVAL);
+ }
+}
+
+static int
+gic_map_nspc(struct arm_gic_softc *sc, struct arm_irqsrc *isrc, u_int *irqp)
+{
+ int error;
+
+ error = gic_irq_from_nspc(sc, isrc->isrc_nspc_type, isrc->isrc_nspc_num,
+ irqp);
+ if (error != 0)
+ return (error);
+ return (gic_attach_isrc(sc, isrc, *irqp));
+}
+
+#ifdef FDT
+static int
+gic_map_fdt(struct arm_gic_softc *sc, struct arm_irqsrc *isrc, u_int *irqp)
+{
+ u_int irq, tripol;
+ enum intr_trigger trig;
+ enum intr_polarity pol;
+ int error;
+
+ if (isrc->isrc_ncells == 1) {
+ irq = isrc->isrc_cells[0];
+ pol = INTR_POLARITY_CONFORM;
+ trig = INTR_TRIGGER_CONFORM;
+ } else if (isrc->isrc_ncells == 3) {
+ if (isrc->isrc_cells[0] == 0)
+ irq = isrc->isrc_cells[1] + GIC_FIRST_SPI;
+ else
+ irq = isrc->isrc_cells[1] + GIC_FIRST_PPI;
+
+ /*
+ * In intr[2], bits[3:0] are trigger type and level flags.
+ * 1 = low-to-high edge triggered
+ * 2 = high-to-low edge triggered
+ * 4 = active high level-sensitive
+ * 8 = active low level-sensitive
+ * The hardware only supports active-high-level or rising-edge.
+ */
+ tripol = isrc->isrc_cells[2];
+ if (tripol & 0x0a) {
+ printf("unsupported trigger/polarity configuration "
+ "0x%2x\n", tripol & 0x0f);
+ return (ENOTSUP);
+ }
+ pol = INTR_POLARITY_CONFORM;
+ if (tripol & 0x01)
+ trig = INTR_TRIGGER_EDGE;
+ else
+ trig = INTR_TRIGGER_LEVEL;
+ } else
+ return (EINVAL);
+
+ if (irq >= sc->nirqs)
+ return (EINVAL);
+
+ error = gic_attach_isrc(sc, isrc, irq);
+ if (error != 0)
+ return (error);
+
+ isrc->isrc_nspc_type = ARM_IRQ_NSPC_PLAIN;
+ isrc->isrc_nspc_num = irq;
+ isrc->isrc_trig = trig;
+ isrc->isrc_pol = pol;
+
+ *irqp = irq;
+ return (0);
+}
+#endif
+
+static int
+arm_gic_register(device_t dev, struct arm_irqsrc *isrc, boolean_t *is_percpu)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ u_int irq;
+ int error;
+
+ if (isrc->isrc_type == ARM_ISRCT_NAMESPACE)
+ error = gic_map_nspc(sc, isrc, &irq);
+#ifdef FDT
+ else if (isrc->isrc_type == ARM_ISRCT_FDT)
+ error = gic_map_fdt(sc, isrc, &irq);
+#endif
+ else
+ return (EINVAL);
+
+ if (error == 0)
+ *is_percpu = irq < GIC_FIRST_SPI ? TRUE : FALSE;
+ return (error);
+}
+
+static void
+arm_gic_enable_intr(device_t dev, struct arm_irqsrc *isrc)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ u_int irq = isrc->isrc_data;
+
+ if (isrc->isrc_trig == INTR_TRIGGER_CONFORM)
+ isrc->isrc_trig = INTR_TRIGGER_LEVEL;
+
+ /*
+ * XXX - In case that per CPU interrupt is going to be enabled in time
+ * when SMP is already started, we need some IPI call which
+ * enables it on others CPUs. Further, it's more complicated as
+ * pic_enable_source() and pic_disable_source() should act on
+ * per CPU basis only. Thus, it should be solved here somehow.
+ */
+ if (isrc->isrc_flags & ARM_ISRCF_PERCPU)
+ CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
+
+ gic_config(sc, irq, isrc->isrc_trig, isrc->isrc_pol);
+ arm_gic_bind(dev, isrc);
+}
+
+static void
+arm_gic_enable_source(device_t dev, struct arm_irqsrc *isrc)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ u_int irq = isrc->isrc_data;
+
+ arm_irq_memory_barrier(irq);
+ gic_irq_unmask(sc, irq);
+}
+
+static void
+arm_gic_disable_source(device_t dev, struct arm_irqsrc *isrc)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ u_int irq = isrc->isrc_data;
+
+ gic_irq_mask(sc, irq);
+}
+
+static int
+arm_gic_unregister(device_t dev, struct arm_irqsrc *isrc)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ u_int irq = isrc->isrc_data;
+
+ return (gic_detach_isrc(sc, isrc, irq));
+}
+
+static void
+arm_gic_pre_ithread(device_t dev, struct arm_irqsrc *isrc)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+
+ arm_gic_disable_source(dev, isrc);
+ gic_c_write_4(sc, GICC_EOIR, isrc->isrc_data);
+}
+
+static void
+arm_gic_post_ithread(device_t dev, struct arm_irqsrc *isrc)
+{
+
+ arm_irq_memory_barrier(0);
+ arm_gic_enable_source(dev, isrc);
+}
+
+static void
+arm_gic_post_filter(device_t dev, struct arm_irqsrc *isrc)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+
+ /* EOI for edge-triggered done earlier. */
+ if (isrc->isrc_trig == INTR_TRIGGER_EDGE)
+ return;
+
+ arm_irq_memory_barrier(0);
+ gic_c_write_4(sc, GICC_EOIR, isrc->isrc_data);
+}
+
+#ifdef SMP
+static int
+arm_gic_bind(device_t dev, struct arm_irqsrc *isrc)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ uint32_t irq = isrc->isrc_data;
+
+ if (irq < GIC_FIRST_SPI)
+ return (EINVAL);
+
+ if (CPU_EMPTY(&isrc->isrc_cpu)) {
+ gic_irq_cpu = arm_irq_next_cpu(gic_irq_cpu, &all_cpus);
+ CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
+ }
+ return (gic_bind(sc, irq, &isrc->isrc_cpu));
+}
+
+static void
+arm_gic_ipi_send(device_t dev, struct arm_irqsrc *isrc, cpuset_t cpus)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ uint32_t irq, val = 0, i;
+
+ irq = isrc->isrc_data;
+
+ for (i = 0; i < MAXCPU; i++)
+ if (CPU_ISSET(i, &cpus))
+ val |= 1 << (16 + i);
+
+ gic_d_write_4(sc, GICD_SGIR(0), val | irq);
+}
+#endif
+#else
static int
arm_gic_next_irq(struct arm_gic_softc *sc, int last_irq)
{
@@ -327,7 +890,7 @@ arm_gic_next_irq(struct arm_gic_softc *sc, int last_irq)
* bits (ie CPU number), not just the IRQ number, and we do not
* have this information later.
*/
- if ((active_irq & 0x3ff) <= GIC_LAST_IPI)
+ if ((active_irq & 0x3ff) <= GIC_LAST_SGI)
gic_c_write_4(sc, GICC_EOIR, active_irq);
active_irq &= 0x3FF;
@@ -400,7 +963,7 @@ arm_gic_mask(device_t dev, int irq)
struct arm_gic_softc *sc = device_get_softc(dev);
gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F)));
- gic_c_write_4(sc, GICC_EOIR, irq);
+ gic_c_write_4(sc, GICC_EOIR, irq); /* XXX - not allowed */
}
static void
@@ -408,7 +971,7 @@ arm_gic_unmask(device_t dev, int irq)
{
struct arm_gic_softc *sc = device_get_softc(dev);
- if (irq > GIC_LAST_IPI)
+ if (irq > GIC_LAST_SGI)
arm_irq_memory_barrier(irq);
gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F)));
@@ -455,10 +1018,10 @@ arm_gic_ipi_clear(device_t dev, int ipi)
static void
gic_post_filter(void *arg)
{
- struct arm_gic_softc *sc = arm_gic_sc;
+ struct arm_gic_softc *sc = gic_sc;
uintptr_t irq = (uintptr_t) arg;
- if (irq > GIC_LAST_IPI)
+ if (irq > GIC_LAST_SGI)
arm_irq_memory_barrier(irq);
gic_c_write_4(sc, GICC_EOIR, irq);
}
@@ -467,64 +1030,81 @@ static int
gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol)
{
- return (arm_gic_config(arm_gic_sc->gic_dev, irq, trig, pol));
+ return (arm_gic_config(gic_sc->gic_dev, irq, trig, pol));
}
void
arm_mask_irq(uintptr_t nb)
{
- arm_gic_mask(arm_gic_sc->gic_dev, nb);
+ arm_gic_mask(gic_sc->gic_dev, nb);
}
void
arm_unmask_irq(uintptr_t nb)
{
- arm_gic_unmask(arm_gic_sc->gic_dev, nb);
+ arm_gic_unmask(gic_sc->gic_dev, nb);
}
int
arm_get_next_irq(int last_irq)
{
- return (arm_gic_next_irq(arm_gic_sc, last_irq));
+ return (arm_gic_next_irq(gic_sc, last_irq));
}
+#ifdef SMP
void
arm_pic_init_secondary(void)
{
- arm_gic_init_secondary(arm_gic_sc->gic_dev);
+ arm_gic_init_secondary(gic_sc->gic_dev);
}
-#ifdef SMP
void
pic_ipi_send(cpuset_t cpus, u_int ipi)
{
- arm_gic_ipi_send(arm_gic_sc->gic_dev, cpus, ipi);
+ arm_gic_ipi_send(gic_sc->gic_dev, cpus, ipi);
}
int
pic_ipi_read(int i)
{
- return (arm_gic_ipi_read(arm_gic_sc->gic_dev, i));
+ return (arm_gic_ipi_read(gic_sc->gic_dev, i));
}
void
pic_ipi_clear(int ipi)
{
- arm_gic_ipi_clear(arm_gic_sc->gic_dev, ipi);
+ arm_gic_ipi_clear(gic_sc->gic_dev, ipi);
}
#endif
+#endif /* ARM_INTRNG */
static device_method_t arm_gic_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, arm_gic_probe),
DEVMETHOD(device_attach, arm_gic_attach),
+#ifdef ARM_INTRNG
+ /* Interrupt controller interface */
+ DEVMETHOD(pic_disable_source, arm_gic_disable_source),
+ DEVMETHOD(pic_enable_intr, arm_gic_enable_intr),
+ DEVMETHOD(pic_enable_source, arm_gic_enable_source),
+ DEVMETHOD(pic_post_filter, arm_gic_post_filter),
+ DEVMETHOD(pic_post_ithread, arm_gic_post_ithread),
+ DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread),
+ DEVMETHOD(pic_register, arm_gic_register),
+ DEVMETHOD(pic_unregister, arm_gic_unregister),
+#ifdef SMP
+ DEVMETHOD(pic_bind, arm_gic_bind),
+ DEVMETHOD(pic_init_secondary, arm_gic_init_secondary),
+ DEVMETHOD(pic_ipi_send, arm_gic_ipi_send),
+#endif
+#endif
{ 0, 0 }
};
diff --git a/sys/arm/arm/intrng.c b/sys/arm/arm/intrng.c
new file mode 100644
index 0000000..c30ec68
--- /dev/null
+++ b/sys/arm/arm/intrng.c
@@ -0,0 +1,1458 @@
+/*-
+ * Copyright (c) 2012-2014 Jakub Wojciech Klama <jceel@FreeBSD.org>.
+ * Copyright (c) 2015 Svatopluk Kraus
+ * Copyright (c) 2015 Michal Meloun
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * ARM Interrupt Framework
+ *
+ * TODO: - to support IPI (PPI) enabling on other CPUs if already started
+ * - to complete things for removable PICs
+ */
+
+#include "opt_ddb.h"
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/syslog.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/conf.h>
+#include <sys/cpuset.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <machine/atomic.h>
+#include <machine/intr.h>
+#include <machine/cpu.h>
+#include <machine/smp.h>
+#include <machine/stdarg.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/fdt/fdt_common.h>
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+#include "pic_if.h"
+
+#define INTRNAME_LEN (2*MAXCOMLEN + 1)
+
+#ifdef DEBUG
+#define debugf(fmt, args...) do { printf("%s(): ", __func__); \
+ printf(fmt,##args); } while (0)
+#else
+#define debugf(fmt, args...)
+#endif
+
+MALLOC_DECLARE(M_INTRNG);
+MALLOC_DEFINE(M_INTRNG, "intrng", "ARM interrupt handling");
+
+/* Main ARM interrupt handler called from assembler -> 'hidden' for C code. */
+void arm_irq_handler(struct trapframe *tf);
+
+/* Root interrupt controller stuff. */
+static struct arm_irqsrc *irq_root_isrc;
+static device_t irq_root_dev;
+static arm_irq_filter_t *irq_root_filter;
+static void *irq_root_arg;
+static u_int irq_root_ipicount;
+
+/* Interrupt controller definition. */
+struct arm_pic {
+ SLIST_ENTRY(arm_pic) pic_next;
+ intptr_t pic_xref; /* hardware identification */
+ device_t pic_dev;
+};
+
+static struct mtx pic_list_lock;
+static SLIST_HEAD(, arm_pic) pic_list;
+
+static struct arm_pic *pic_lookup(device_t dev, intptr_t xref);
+
+/* Interrupt source definition. */
+static struct mtx isrc_table_lock;
+static struct arm_irqsrc *irq_sources[NIRQ];
+u_int irq_next_free;
+
+#define IRQ_INVALID nitems(irq_sources)
+
+#ifdef SMP
+static boolean_t irq_assign_cpu = FALSE;
+
+static struct arm_irqsrc ipi_sources[ARM_IPI_COUNT];
+static u_int ipi_next_num;
+#endif
+
+/*
+ * - 2 counters for each I/O interrupt.
+ * - MAXCPU counters for each IPI counters for SMP.
+ */
+#ifdef SMP
+#define INTRCNT_COUNT (NIRQ * 2 + ARM_IPI_COUNT * MAXCPU)
+#else
+#define INTRCNT_COUNT (NIRQ * 2)
+#endif
+
+/* Data for MI statistics reporting. */
+u_long intrcnt[INTRCNT_COUNT];
+char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
+size_t sintrcnt = sizeof(intrcnt);
+size_t sintrnames = sizeof(intrnames);
+static u_int intrcnt_index;
+
+/*
+ * Interrupt framework initialization routine.
+ */
+static void
+arm_irq_init(void *dummy __unused)
+{
+
+ SLIST_INIT(&pic_list);
+ mtx_init(&pic_list_lock, "arm pic list", NULL, MTX_DEF);
+ mtx_init(&isrc_table_lock, "arm isrc table", NULL, MTX_DEF);
+}
+SYSINIT(arm_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, arm_irq_init, NULL);
+
+static void
+intrcnt_setname(const char *name, int index)
+{
+
+ snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
+ INTRNAME_LEN - 1, name);
+}
+
+/*
+ * Update name for interrupt source with interrupt event.
+ */
+static void
+intrcnt_updatename(struct arm_irqsrc *isrc)
+{
+
+ /* QQQ: What about stray counter name? */
+ mtx_assert(&isrc_table_lock, MA_OWNED);
+ intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
+}
+
+/*
+ * Virtualization for interrupt source interrupt counter increment.
+ */
+static inline void
+isrc_increment_count(struct arm_irqsrc *isrc)
+{
+
+ /*
+ * XXX - It should be atomic for PPI interrupts. It was proven that
+ * the lost is measurable easily for timer PPI interrupts.
+ */
+ isrc->isrc_count[0]++;
+ /*atomic_add_long(&isrc->isrc_count[0], 1);*/
+}
+
+/*
+ * Virtualization for interrupt source interrupt stray counter increment.
+ */
+static inline void
+isrc_increment_straycount(struct arm_irqsrc *isrc)
+{
+
+ isrc->isrc_count[1]++;
+}
+
+/*
+ * Virtualization for interrupt source interrupt name update.
+ */
+static void
+isrc_update_name(struct arm_irqsrc *isrc, const char *name)
+{
+ char str[INTRNAME_LEN];
+
+ mtx_assert(&isrc_table_lock, MA_OWNED);
+
+ if (name != NULL) {
+ snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
+ intrcnt_setname(str, isrc->isrc_index);
+ snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
+ name);
+ intrcnt_setname(str, isrc->isrc_index + 1);
+ } else {
+ snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
+ intrcnt_setname(str, isrc->isrc_index);
+ snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
+ intrcnt_setname(str, isrc->isrc_index + 1);
+ }
+}
+
+/*
+ * Virtualization for interrupt source interrupt counters setup.
+ */
+static void
+isrc_setup_counters(struct arm_irqsrc *isrc)
+{
+ u_int index;
+
+ /*
+ * XXX - it does not work well with removable controllers and
+ * interrupt sources !!!
+ */
+ index = atomic_fetchadd_int(&intrcnt_index, 2);
+ isrc->isrc_index = index;
+ isrc->isrc_count = &intrcnt[index];
+ isrc_update_name(isrc, NULL);
+}
+
+#ifdef SMP
+/*
+ * Virtualization for interrupt source IPI counter increment.
+ */
+static inline void
+isrc_increment_ipi_count(struct arm_irqsrc *isrc, u_int cpu)
+{
+
+ isrc->isrc_count[cpu]++;
+}
+
+/*
+ * Virtualization for interrupt source IPI counters setup.
+ */
+static void
+isrc_setup_ipi_counters(struct arm_irqsrc *isrc, const char *name)
+{
+ u_int index, i;
+ char str[INTRNAME_LEN];
+
+ index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
+ isrc->isrc_index = index;
+ isrc->isrc_count = &intrcnt[index];
+
+ for (i = 0; i < MAXCPU; i++) {
+ /*
+ * We do not expect any race in IPI case here,
+ * so locking is not needed.
+ */
+ snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
+ intrcnt_setname(str, index + i);
+ }
+}
+#endif
+
+/*
+ * Main ARM interrupt dispatch handler. It's called straight
+ * from the assembler, where CPU interrupt is served.
+ */
+void
+arm_irq_handler(struct trapframe *tf)
+{
+ struct trapframe * oldframe;
+ struct thread * td;
+
+ KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
+
+ PCPU_INC(cnt.v_intr);
+ critical_enter();
+ td = curthread;
+ oldframe = td->td_intr_frame;
+ td->td_intr_frame = tf;
+ irq_root_filter(irq_root_arg);
+ td->td_intr_frame = oldframe;
+ critical_exit();
+}
+
+/*
+ * ARM interrupt controller dispatch function for interrupts. It should
+ * be called straight from the interrupt controller, when associated interrupt
+ * source is learned.
+ */
+void
+arm_irq_dispatch(struct arm_irqsrc *isrc, struct trapframe *tf)
+{
+
+ KASSERT(isrc != NULL, ("%s: no source", __func__));
+
+ isrc_increment_count(isrc);
+
+#ifdef INTR_SOLO
+ if (isrc->isrc_filter != NULL) {
+ int error;
+ error = isrc->isrc_filter(isrc->isrc_arg, tf);
+ PIC_POST_FILTER(isrc->isrc_dev, isrc);
+ if (error == FILTER_HANDLED)
+ return;
+ } else
+#endif
+ if (isrc->isrc_event != NULL) {
+ if (intr_event_handle(isrc->isrc_event, tf) == 0)
+ return;
+ }
+
+ isrc_increment_straycount(isrc);
+ PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc);
+
+ device_printf(isrc->isrc_dev, "stray irq <%s> disabled",
+ isrc->isrc_name);
+}
+
+/*
+ * Allocate interrupt source.
+ */
+static struct arm_irqsrc *
+isrc_alloc(u_int type, u_int extsize)
+{
+ struct arm_irqsrc *isrc;
+
+ isrc = malloc(sizeof(*isrc) + extsize, M_INTRNG, M_WAITOK | M_ZERO);
+ isrc->isrc_irq = IRQ_INVALID; /* just to be safe */
+ isrc->isrc_type = type;
+ isrc->isrc_nspc_type = ARM_IRQ_NSPC_NONE;
+ isrc->isrc_trig = INTR_TRIGGER_CONFORM;
+ isrc->isrc_pol = INTR_POLARITY_CONFORM;
+ CPU_ZERO(&isrc->isrc_cpu);
+ return (isrc);
+}
+
+/*
+ * Free interrupt source.
+ */
+static void
+isrc_free(struct arm_irqsrc *isrc)
+{
+
+ free(isrc, M_INTRNG);
+}
+
+void
+arm_irq_set_name(struct arm_irqsrc *isrc, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(isrc->isrc_name, ARM_ISRC_NAMELEN, fmt, ap);
+ va_end(ap);
+}
+
+/*
+ * Alloc unique interrupt number (resource handle) for interrupt source.
+ *
+ * There could be various strategies how to allocate free interrupt number
+ * (resource handle) for new interrupt source.
+ *
+ * 1. Handles are always allocated forward, so handles are not recycled
+ * immediately. However, if only one free handle left which is reused
+ * constantly...
+ */
+static int
+isrc_alloc_irq_locked(struct arm_irqsrc *isrc)
+{
+ u_int maxirqs, irq;
+
+ mtx_assert(&isrc_table_lock, MA_OWNED);
+
+ maxirqs = nitems(irq_sources);
+ if (irq_next_free >= maxirqs)
+ return (ENOSPC);
+
+ for (irq = irq_next_free; irq < maxirqs; irq++) {
+ if (irq_sources[irq] == NULL)
+ goto found;
+ }
+ for (irq = 0; irq < irq_next_free; irq++) {
+ if (irq_sources[irq] == NULL)
+ goto found;
+ }
+
+ irq_next_free = maxirqs;
+ return (ENOSPC);
+
+found:
+ isrc->isrc_irq = irq;
+ irq_sources[irq] = isrc;
+
+ arm_irq_set_name(isrc, "irq%u", irq);
+ isrc_setup_counters(isrc);
+
+ irq_next_free = irq + 1;
+ if (irq_next_free >= maxirqs)
+ irq_next_free = 0;
+ return (0);
+}
+#ifdef notyet
+/*
+ * Free unique interrupt number (resource handle) from interrupt source.
+ */
+static int
+isrc_free_irq(struct arm_irqsrc *isrc)
+{
+ u_int maxirqs;
+
+ mtx_assert(&isrc_table_lock, MA_NOTOWNED);
+
+ maxirqs = nitems(irq_sources);
+ if (isrc->isrc_irq >= maxirqs)
+ return (EINVAL);
+
+ mtx_lock(&isrc_table_lock);
+ if (irq_sources[isrc->isrc_irq] != isrc) {
+ mtx_unlock(&isrc_table_lock);
+ return (EINVAL);
+ }
+
+ irq_sources[isrc->isrc_irq] = NULL;
+ isrc->isrc_irq = IRQ_INVALID; /* just to be safe */
+ mtx_unlock(&isrc_table_lock);
+
+ return (0);
+}
+#endif
+/*
+ * Lookup interrupt source by interrupt number (resource handle).
+ */
+static struct arm_irqsrc *
+isrc_lookup(u_int irq)
+{
+
+ if (irq < nitems(irq_sources))
+ return (irq_sources[irq]);
+ return (NULL);
+}
+
+/*
+ * Lookup interrupt source by namespace description.
+ */
+static struct arm_irqsrc *
+isrc_namespace_lookup(device_t dev, uint16_t type, uint16_t num)
+{
+ u_int irq;
+ struct arm_irqsrc *isrc;
+
+ mtx_assert(&isrc_table_lock, MA_OWNED);
+
+ for (irq = 0; irq < nitems(irq_sources); irq++) {
+ isrc = irq_sources[irq];
+ if (isrc != NULL && isrc->isrc_dev == dev &&
+ isrc->isrc_nspc_type == type && isrc->isrc_nspc_num == num)
+ return (isrc);
+ }
+ return (NULL);
+}
+
+/*
+ * Map interrupt source according to namespace into framework. If such mapping
+ * does not exist, create it. Return unique interrupt number (resource handle)
+ * associated with mapped interrupt source.
+ */
+u_int
+arm_namespace_map_irq(device_t dev, uint16_t type, uint16_t num)
+{
+ struct arm_irqsrc *isrc, *new_isrc;
+ int error;
+
+ new_isrc = isrc_alloc(ARM_ISRCT_NAMESPACE, 0);
+
+ mtx_lock(&isrc_table_lock);
+ isrc = isrc_namespace_lookup(dev, type, num);
+ if (isrc != NULL) {
+ mtx_unlock(&isrc_table_lock);
+ isrc_free(new_isrc);
+ return (isrc->isrc_irq); /* already mapped */
+ }
+
+ error = isrc_alloc_irq_locked(new_isrc);
+ if (error != 0) {
+ mtx_unlock(&isrc_table_lock);
+ isrc_free(new_isrc);
+ return (IRQ_INVALID); /* no space left */
+ }
+
+ new_isrc->isrc_dev = dev;
+ new_isrc->isrc_nspc_type = type;
+ new_isrc->isrc_nspc_num = num;
+ mtx_unlock(&isrc_table_lock);
+
+ return (new_isrc->isrc_irq);
+}
+
+#ifdef FDT
+/*
+ * Lookup interrupt source by FDT description.
+ */
+static struct arm_irqsrc *
+isrc_fdt_lookup(intptr_t xref, pcell_t *cells, u_int ncells)
+{
+ u_int irq, cellsize;
+ struct arm_irqsrc *isrc;
+
+ mtx_assert(&isrc_table_lock, MA_OWNED);
+
+ cellsize = ncells * sizeof(*cells);
+ for (irq = 0; irq < nitems(irq_sources); irq++) {
+ isrc = irq_sources[irq];
+ if (isrc != NULL && isrc->isrc_type == ARM_ISRCT_FDT &&
+ isrc->isrc_xref == xref && isrc->isrc_ncells == ncells &&
+ memcmp(isrc->isrc_cells, cells, cellsize) == 0)
+ return (isrc);
+ }
+ return (NULL);
+}
+
+/*
+ * Map interrupt source according to FDT data into framework. If such mapping
+ * does not exist, create it. Return unique interrupt number (resource handle)
+ * associated with mapped interrupt source.
+ */
+u_int
+arm_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells)
+{
+ struct arm_irqsrc *isrc, *new_isrc;
+ u_int cellsize;
+ intptr_t xref;
+ int error;
+
+ xref = (intptr_t)node; /* It's so simple for now. */
+
+ cellsize = ncells * sizeof(*cells);
+ new_isrc = isrc_alloc(ARM_ISRCT_FDT, cellsize);
+
+ mtx_lock(&isrc_table_lock);
+ isrc = isrc_fdt_lookup(xref, cells, ncells);
+ if (isrc != NULL) {
+ mtx_unlock(&isrc_table_lock);
+ isrc_free(new_isrc);
+ return (isrc->isrc_irq); /* already mapped */
+ }
+
+ error = isrc_alloc_irq_locked(new_isrc);
+ if (error != 0) {
+ mtx_unlock(&isrc_table_lock);
+ isrc_free(new_isrc);
+ return (IRQ_INVALID); /* no space left */
+ }
+
+ new_isrc->isrc_xref = xref;
+ new_isrc->isrc_ncells = ncells;
+ memcpy(new_isrc->isrc_cells, cells, cellsize);
+ mtx_unlock(&isrc_table_lock);
+
+ return (new_isrc->isrc_irq);
+}
+#endif
+
+/*
+ * Register interrupt source into interrupt controller.
+ */
+static int
+isrc_register(struct arm_irqsrc *isrc)
+{
+ struct arm_pic *pic;
+ boolean_t is_percpu;
+ int error;
+
+ if (isrc->isrc_flags & ARM_ISRCF_REGISTERED)
+ return (0);
+
+ if (isrc->isrc_dev == NULL) {
+ pic = pic_lookup(NULL, isrc->isrc_xref);
+ if (pic == NULL || pic->pic_dev == NULL)
+ return (ESRCH);
+ isrc->isrc_dev = pic->pic_dev;
+ }
+
+ error = PIC_REGISTER(isrc->isrc_dev, isrc, &is_percpu);
+ if (error != 0)
+ return (error);
+
+ mtx_lock(&isrc_table_lock);
+ isrc->isrc_flags |= ARM_ISRCF_REGISTERED;
+ if (is_percpu)
+ isrc->isrc_flags |= ARM_ISRCF_PERCPU;
+ isrc_update_name(isrc, NULL);
+ mtx_unlock(&isrc_table_lock);
+ return (0);
+}
+
+#ifdef INTR_SOLO
+/*
+ * Setup filter into interrupt source.
+ */
+static int
+iscr_setup_filter(struct arm_irqsrc *isrc, const char *name,
+ arm_irq_filter_t *filter, void *arg, void **cookiep)
+{
+
+ if (filter == NULL)
+ return (EINVAL);
+
+ mtx_lock(&isrc_table_lock);
+ /*
+ * Make sure that we do not mix the two ways
+ * how we handle interrupt sources.
+ */
+ if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
+ mtx_unlock(&isrc_table_lock);
+ return (EBUSY);
+ }
+ isrc->isrc_filter = filter;
+ isrc->isrc_arg = arg;
+ isrc_update_name(isrc, name);
+ mtx_unlock(&isrc_table_lock);
+
+ *cookiep = isrc;
+ return (0);
+}
+#endif
+
+/*
+ * Interrupt source pre_ithread method for MI interrupt framework.
+ */
+static void
+arm_isrc_pre_ithread(void *arg)
+{
+ struct arm_irqsrc *isrc = arg;
+
+ PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
+}
+
+/*
+ * Interrupt source post_ithread method for MI interrupt framework.
+ */
+static void
+arm_isrc_post_ithread(void *arg)
+{
+ struct arm_irqsrc *isrc = arg;
+
+ PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
+}
+
+/*
+ * Interrupt source post_filter method for MI interrupt framework.
+ */
+static void
+arm_isrc_post_filter(void *arg)
+{
+ struct arm_irqsrc *isrc = arg;
+
+ PIC_POST_FILTER(isrc->isrc_dev, isrc);
+}
+
+/*
+ * Interrupt source assign_cpu method for MI interrupt framework.
+ */
+static int
+arm_isrc_assign_cpu(void *arg, int cpu)
+{
+#ifdef SMP
+ struct arm_irqsrc *isrc = arg;
+ int error;
+
+ if (isrc->isrc_dev != irq_root_dev)
+ return (EINVAL);
+
+ mtx_lock(&isrc_table_lock);
+ if (cpu == NOCPU) {
+ CPU_ZERO(&isrc->isrc_cpu);
+ isrc->isrc_flags &= ~ARM_ISRCF_BOUND;
+ } else {
+ CPU_SETOF(cpu, &isrc->isrc_cpu);
+ isrc->isrc_flags |= ARM_ISRCF_BOUND;
+ }
+
+ /*
+ * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
+ * re-balance it to another CPU or enable it on more CPUs. However,
+ * PIC is expected to change isrc_cpu appropriately to keep us well
+ * informed if the call is successfull.
+ */
+ if (irq_assign_cpu) {
+ error = PIC_BIND(isrc->isrc_dev, isrc);
+ if (error) {
+ CPU_ZERO(&isrc->isrc_cpu);
+ mtx_unlock(&isrc_table_lock);
+ return (error);
+ }
+ }
+ mtx_unlock(&isrc_table_lock);
+ return (0);
+#else
+ return (EOPNOTSUPP);
+#endif
+}
+
+/*
+ * Create interrupt event for interrupt source.
+ */
+static int
+isrc_event_create(struct arm_irqsrc *isrc)
+{
+ struct intr_event *ie;
+ int error;
+
+ error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
+ arm_isrc_pre_ithread, arm_isrc_post_ithread, arm_isrc_post_filter,
+ arm_isrc_assign_cpu, "%s:", isrc->isrc_name);
+ if (error)
+ return (error);
+
+ mtx_lock(&isrc_table_lock);
+ /*
+ * Make sure that we do not mix the two ways
+ * how we handle interrupt sources. Let contested event wins.
+ */
+ if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
+ mtx_unlock(&isrc_table_lock);
+ intr_event_destroy(ie);
+ return (isrc->isrc_event != NULL ? EBUSY : 0);
+ }
+ isrc->isrc_event = ie;
+ mtx_unlock(&isrc_table_lock);
+
+ return (0);
+}
+#ifdef notyet
+/*
+ * Destroy interrupt event for interrupt source.
+ */
+static void
+isrc_event_destroy(struct arm_irqsrc *isrc)
+{
+ struct intr_event *ie;
+
+ mtx_lock(&isrc_table_lock);
+ ie = isrc->isrc_event;
+ isrc->isrc_event = NULL;
+ mtx_unlock(&isrc_table_lock);
+
+ if (ie != NULL)
+ intr_event_destroy(ie);
+}
+#endif
+/*
+ * Add handler to interrupt source.
+ */
+static int
+isrc_add_handler(struct arm_irqsrc *isrc, const char *name,
+ driver_filter_t filter, driver_intr_t handler, void *arg,
+ enum intr_type flags, void **cookiep)
+{
+ int error;
+
+ if (isrc->isrc_event == NULL) {
+ error = isrc_event_create(isrc);
+ if (error)
+ return (error);
+ }
+
+ error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
+ arg, intr_priority(flags), flags, cookiep);
+ if (error == 0) {
+ mtx_lock(&isrc_table_lock);
+ intrcnt_updatename(isrc);
+ mtx_unlock(&isrc_table_lock);
+ }
+
+ return (error);
+}
+
+/*
+ * Lookup interrupt controller locked.
+ */
+static struct arm_pic *
+pic_lookup_locked(device_t dev, intptr_t xref)
+{
+ struct arm_pic *pic;
+
+ mtx_assert(&pic_list_lock, MA_OWNED);
+
+ SLIST_FOREACH(pic, &pic_list, pic_next) {
+ if (pic->pic_xref != xref)
+ continue;
+ if (pic->pic_xref != 0 || pic->pic_dev == dev)
+ return (pic);
+ }
+ return (NULL);
+}
+
+/*
+ * Lookup interrupt controller.
+ */
+static struct arm_pic *
+pic_lookup(device_t dev, intptr_t xref)
+{
+ struct arm_pic *pic;
+
+ mtx_lock(&pic_list_lock);
+ pic = pic_lookup_locked(dev, xref);
+ mtx_unlock(&pic_list_lock);
+
+ return (pic);
+}
+
+/*
+ * Create interrupt controller.
+ */
+static struct arm_pic *
+pic_create(device_t dev, intptr_t xref)
+{
+ struct arm_pic *pic;
+
+ mtx_lock(&pic_list_lock);
+ pic = pic_lookup_locked(dev, xref);
+ if (pic != NULL) {
+ mtx_unlock(&pic_list_lock);
+ return (pic);
+ }
+ pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
+ pic->pic_xref = xref;
+ pic->pic_dev = dev;
+ SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
+ mtx_unlock(&pic_list_lock);
+
+ return (pic);
+}
+#ifdef notyet
+/*
+ * Destroy interrupt controller.
+ */
+static void
+pic_destroy(device_t dev, intptr_t xref)
+{
+ struct arm_pic *pic;
+
+ mtx_lock(&pic_list_lock);
+ pic = pic_lookup_locked(dev, xref);
+ if (pic == NULL) {
+ mtx_unlock(&pic_list_lock);
+ return;
+ }
+ SLIST_REMOVE(&pic_list, pic, arm_pic, pic_next);
+ mtx_unlock(&pic_list_lock);
+
+ free(pic, M_INTRNG);
+}
+#endif
+/*
+ * Register interrupt controller.
+ */
+int
+arm_pic_register(device_t dev, intptr_t xref)
+{
+ struct arm_pic *pic;
+
+ pic = pic_create(dev, xref);
+ if (pic == NULL)
+ return (ENOMEM);
+ if (pic->pic_dev != dev)
+ return (EINVAL); /* XXX it could be many things. */
+
+ debugf("PIC %p registered for %s <xref %x>\n", pic,
+ device_get_nameunit(dev), xref);
+ return (0);
+}
+
+/*
+ * Unregister interrupt controller.
+ */
+int
+arm_pic_unregister(device_t dev, intptr_t xref)
+{
+
+ panic("%s: not implemented", __func__);
+}
+
+/*
+ * Mark interrupt controller (itself) as a root one.
+ *
+ * Note that only an interrupt controller can really know its position
+ * in interrupt controller's tree. So root PIC must claim itself as a root.
+ *
+ * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
+ * page 30:
+ * "The root of the interrupt tree is determined when traversal
+ * of the interrupt tree reaches an interrupt controller node without
+ * an interrupts property and thus no explicit interrupt parent."
+ */
+int
+arm_pic_claim_root(device_t dev, intptr_t xref, arm_irq_filter_t *filter,
+ void *arg, u_int ipicount)
+{
+ int error;
+ u_int rootirq;
+
+ if (pic_lookup(dev, xref) == NULL) {
+ device_printf(dev, "not registered\n");
+ return (EINVAL);
+ }
+ if (filter == NULL) {
+ device_printf(dev, "filter missing\n");
+ return (EINVAL);
+ }
+
+ /*
+ * Only one interrupt controllers could be on the root for now.
+ * Note that we further suppose that there is not threaded interrupt
+ * routine (handler) on the root. See arm_irq_handler().
+ */
+ if (irq_root_dev != NULL) {
+ device_printf(dev, "another root already set\n");
+ return (EBUSY);
+ }
+
+ rootirq = arm_namespace_map_irq(device_get_parent(dev), 0, 0);
+ if (rootirq == IRQ_INVALID) {
+ device_printf(dev, "failed to map an irq for the root pic\n");
+ return (ENOMEM);
+ }
+
+ /* Create the isrc. */
+ irq_root_isrc = isrc_lookup(rootirq);
+
+ /* XXX "register" with the PIC. We are the "pic" here, so fake it. */
+ irq_root_isrc->isrc_flags |= ARM_ISRCF_REGISTERED;
+
+ error = arm_irq_add_handler(device_get_parent(dev),
+ (void*)filter, NULL, arg, rootirq, INTR_TYPE_CLK, NULL);
+ if (error != 0) {
+ device_printf(dev, "failed to install root pic handler\n");
+ return (error);
+ }
+ irq_root_dev = dev;
+ irq_root_filter = filter;
+ irq_root_arg = arg;
+ irq_root_ipicount = ipicount;
+
+ debugf("irq root set to %s\n", device_get_nameunit(dev));
+ return (0);
+}
+
+int
+arm_irq_add_handler(device_t dev, driver_filter_t filt, driver_intr_t hand,
+ void *arg, u_int irq, int flags, void **cookiep)
+{
+ const char *name;
+ struct arm_irqsrc *isrc;
+ int error;
+
+ name = device_get_nameunit(dev);
+
+#ifdef INTR_SOLO
+ /*
+ * Standard handling is done thru MI interrupt framework. However,
+ * some interrupts could request solely own special handling. This
+ * non standard handling can be used for interrupt controllers without
+ * handler (filter only), so in case that interrupt controllers are
+ * chained, MI interrupt framework is called only in leaf controller.
+ *
+ * Note that root interrupt controller routine is served as well,
+ * however in arm_irq_handler(), i.e. main system dispatch routine.
+ */
+ if (flags & INTR_SOLO && hand != NULL) {
+ debugf("irq %u cannot solo on %s\n", irq, name);
+ return (EINVAL);
+ }
+#endif
+
+ isrc = isrc_lookup(irq);
+ if (isrc == NULL) {
+ debugf("irq %u without source on %s\n", irq, name);
+ return (EINVAL);
+ }
+
+ error = isrc_register(isrc);
+ if (error != 0) {
+ debugf("irq %u map error %d on %s\n", irq, error, name);
+ return (error);
+ }
+
+#ifdef INTR_SOLO
+ if (flags & INTR_SOLO) {
+ error = iscr_setup_filter(isrc, name, (arm_irq_filter_t *)filt,
+ arg, cookiep);
+ debugf("irq %u setup filter error %d on %s\n", irq, error,
+ name);
+ } else
+#endif
+ {
+ error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
+ cookiep);
+ debugf("irq %u add handler error %d on %s\n", irq, error, name);
+ }
+ if (error != 0)
+ return (error);
+
+ mtx_lock(&isrc_table_lock);
+ isrc->isrc_handlers++;
+ if (isrc->isrc_handlers == 1) {
+ PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
+ PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
+ }
+ mtx_unlock(&isrc_table_lock);
+ return (0);
+}
+
+int
+arm_irq_remove_handler(device_t dev, u_int irq, void *cookie)
+{
+ struct arm_irqsrc *isrc;
+ int error;
+
+ isrc = isrc_lookup(irq);
+ if (isrc == NULL || isrc->isrc_handlers == 0)
+ return (EINVAL);
+
+ if (isrc->isrc_filter != NULL) {
+ if (isrc != cookie)
+ return (EINVAL);
+
+ mtx_lock(&isrc_table_lock);
+ isrc->isrc_filter = NULL;
+ isrc->isrc_arg = NULL;
+ isrc->isrc_handlers = 0;
+ PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc);
+ PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
+ isrc_update_name(isrc, NULL);
+ mtx_unlock(&isrc_table_lock);
+ return (0);
+ }
+
+ if (isrc != intr_handler_source(cookie))
+ return (EINVAL);
+
+ error = intr_event_remove_handler(cookie);
+ if (error == 0) {
+ mtx_lock(&isrc_table_lock);
+ isrc->isrc_handlers--;
+ if (isrc->isrc_handlers == 0) {
+ PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc);
+ PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
+ }
+ intrcnt_updatename(isrc);
+ mtx_unlock(&isrc_table_lock);
+ }
+ return (error);
+}
+
+int
+arm_irq_config(u_int irq, enum intr_trigger trig, enum intr_polarity pol)
+{
+ struct arm_irqsrc *isrc;
+
+ isrc = isrc_lookup(irq);
+ if (isrc == NULL)
+ return (EINVAL);
+
+ if (isrc->isrc_handlers != 0)
+ return (EBUSY); /* interrrupt is enabled (active) */
+
+ /*
+ * Once an interrupt is enabled, we do not change its configuration.
+ * A controller PIC_ENABLE_INTR() method is called when an interrupt
+ * is going to be enabled. In this method, a controller should setup
+ * the interrupt according to saved configuration parameters.
+ */
+ isrc->isrc_trig = trig;
+ isrc->isrc_pol = pol;
+
+ return (0);
+}
+
+int
+arm_irq_describe(u_int irq, void *cookie, const char *descr)
+{
+ struct arm_irqsrc *isrc;
+ int error;
+
+ isrc = isrc_lookup(irq);
+ if (isrc == NULL || isrc->isrc_handlers == 0)
+ return (EINVAL);
+
+ if (isrc->isrc_filter != NULL) {
+ if (isrc != cookie)
+ return (EINVAL);
+
+ mtx_lock(&isrc_table_lock);
+ isrc_update_name(isrc, descr);
+ mtx_unlock(&isrc_table_lock);
+ return (0);
+ }
+
+ error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
+ if (error == 0) {
+ mtx_lock(&isrc_table_lock);
+ intrcnt_updatename(isrc);
+ mtx_unlock(&isrc_table_lock);
+ }
+ return (error);
+}
+
+#ifdef SMP
+int
+arm_irq_bind(u_int irq, int cpu)
+{
+ struct arm_irqsrc *isrc;
+
+ isrc = isrc_lookup(irq);
+ if (isrc == NULL || isrc->isrc_handlers == 0)
+ return (EINVAL);
+
+ if (isrc->isrc_filter != NULL)
+ return (arm_isrc_assign_cpu(isrc, cpu));
+
+ return (intr_event_bind(isrc->isrc_event, cpu));
+}
+
+/*
+ * Return the CPU that the next interrupt source should use.
+ * For now just returns the next CPU according to round-robin.
+ */
+u_int
+arm_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
+{
+
+ if (!irq_assign_cpu || mp_ncpus == 1)
+ return (PCPU_GET(cpuid));
+
+ do {
+ last_cpu++;
+ if (last_cpu > mp_maxid)
+ last_cpu = 0;
+ } while (!CPU_ISSET(last_cpu, cpumask));
+ return (last_cpu);
+}
+
+/*
+ * Distribute all the interrupt sources among the available
+ * CPUs once the AP's have been launched.
+ */
+static void
+arm_irq_shuffle(void *arg __unused)
+{
+ struct arm_irqsrc *isrc;
+ u_int i;
+
+ if (mp_ncpus == 1)
+ return;
+
+ mtx_lock(&isrc_table_lock);
+ irq_assign_cpu = TRUE;
+ for (i = 0; i < NIRQ; i++) {
+ isrc = irq_sources[i];
+ if (isrc == NULL || isrc->isrc_handlers == 0 ||
+ isrc->isrc_flags & ARM_ISRCF_PERCPU)
+ continue;
+
+ if (isrc->isrc_event != NULL &&
+ isrc->isrc_flags & ARM_ISRCF_BOUND &&
+ isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
+ panic("%s: CPU inconsistency", __func__);
+
+ if ((isrc->isrc_flags & ARM_ISRCF_BOUND) == 0)
+ CPU_ZERO(&isrc->isrc_cpu); /* start again */
+
+ /*
+ * We are in wicked position here if the following call fails
+ * for bound ISRC. The best thing we can do is to clear
+ * isrc_cpu so inconsistency with ie_cpu will be detectable.
+ */
+ if (PIC_BIND(isrc->isrc_dev, isrc) != 0)
+ CPU_ZERO(&isrc->isrc_cpu);
+ }
+ mtx_unlock(&isrc_table_lock);
+}
+SYSINIT(arm_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, arm_irq_shuffle, NULL);
+
+#else
+u_int
+arm_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
+{
+
+ return (PCPU_GET(cpuid));
+}
+#endif
+
+void dosoftints(void);
+void
+dosoftints(void)
+{
+}
+
+/*
+ * arm_irq_memory_barrier()
+ *
+ * Ensure all writes to device memory have reached devices before proceeding.
+ *
+ * This is intended to be called from the post-filter and post-thread routines
+ * of an interrupt controller implementation. A peripheral device driver should
+ * use bus_space_barrier() if it needs to ensure a write has reached the
+ * hardware for some reason other than clearing interrupt conditions.
+ *
+ * The need for this function arises from the ARM weak memory ordering model.
+ * Writes to locations mapped with the Device attribute bypass any caches, but
+ * are buffered. Multiple writes to the same device will be observed by that
+ * device in the order issued by the cpu. Writes to different devices may
+ * appear at those devices in a different order than issued by the cpu. That
+ * is, if the cpu writes to device A then device B, the write to device B could
+ * complete before the write to device A.
+ *
+ * Consider a typical device interrupt handler which services the interrupt and
+ * writes to a device status-acknowledge register to clear the interrupt before
+ * returning. That write is posted to the L2 controller which "immediately"
+ * places it in a store buffer and automatically drains that buffer. This can
+ * be less immediate than you'd think... There may be no free slots in the store
+ * buffers, so an existing buffer has to be drained first to make room. The
+ * target bus may be busy with other traffic (such as DMA for various devices),
+ * delaying the drain of the store buffer for some indeterminate time. While
+ * all this delay is happening, execution proceeds on the CPU, unwinding its way
+ * out of the interrupt call stack to the point where the interrupt driver code
+ * is ready to EOI and unmask the interrupt. The interrupt controller may be
+ * accessed via a faster bus than the hardware whose handler just ran; the write
+ * to unmask and EOI the interrupt may complete quickly while the device write
+ * to ack and clear the interrupt source is still lingering in a store buffer
+ * waiting for access to a slower bus. With the interrupt unmasked at the
+ * interrupt controller but still active at the device, as soon as interrupts
+ * are enabled on the core the device re-interrupts immediately: now you've got
+ * a spurious interrupt on your hands.
+ *
+ * The right way to fix this problem is for every device driver to use the
+ * proper bus_space_barrier() calls in its interrupt handler. For ARM a single
+ * barrier call at the end of the handler would work. This would have to be
+ * done to every driver in the system, not just arm-specific drivers.
+ *
+ * Another potential fix is to map all device memory as Strongly-Ordered rather
+ * than Device memory, which takes the store buffers out of the picture. This
+ * has a pretty big impact on overall system performance, because each strongly
+ * ordered memory access causes all L2 store buffers to be drained.
+ *
+ * A compromise solution is to have the interrupt controller implementation call
+ * this function to establish a barrier between writes to the interrupt-source
+ * device and writes to the interrupt controller device.
+ *
+ * This takes the interrupt number as an argument, and currently doesn't use it.
+ * The plan is that maybe some day there is a way to flag certain interrupts as
+ * "memory barrier safe" and we can avoid this overhead with them.
+ */
+void
+arm_irq_memory_barrier(uintptr_t irq)
+{
+
+ dsb();
+ cpu_l2cache_drain_writebuf();
+}
+
+#ifdef SMP
+/*
+ * Lookup IPI source.
+ */
+static struct arm_irqsrc *
+arm_ipi_lookup(u_int ipi)
+{
+
+ if (ipi >= ARM_IPI_COUNT)
+ panic("%s: no such IPI %u", __func__, ipi);
+
+ return (&ipi_sources[ipi]);
+}
+
+/*
+ * ARM interrupt controller dispatch function for IPIs. It should
+ * be called straight from the interrupt controller, when associated
+ * interrupt source is learned. Or from anybody who has an interrupt
+ * source mapped.
+ */
+void
+arm_ipi_dispatch(struct arm_irqsrc *isrc, struct trapframe *tf)
+{
+ void *arg;
+
+ KASSERT(isrc != NULL, ("%s: no source", __func__));
+
+ isrc_increment_ipi_count(isrc, PCPU_GET(cpuid));
+
+ /*
+ * Supply ipi filter with trapframe argument
+ * if none is registered.
+ */
+ arg = isrc->isrc_arg != NULL ? isrc->isrc_arg : tf;
+ isrc->isrc_ipifilter(arg);
+}
+
+/*
+ * Map IPI into interrupt controller.
+ *
+ * Not SMP coherent.
+ */
+static int
+ipi_map(struct arm_irqsrc *isrc, u_int ipi)
+{
+ boolean_t is_percpu;
+ int error;
+
+ if (ipi >= ARM_IPI_COUNT)
+ panic("%s: no such IPI %u", __func__, ipi);
+
+ KASSERT(irq_root_dev != NULL, ("%s: no root attached", __func__));
+
+ isrc->isrc_type = ARM_ISRCT_NAMESPACE;
+ isrc->isrc_nspc_type = ARM_IRQ_NSPC_IPI;
+ isrc->isrc_nspc_num = ipi_next_num;
+
+ error = PIC_REGISTER(irq_root_dev, isrc, &is_percpu);
+
+ debugf("ipi %u mapped to %u on %s - error %d\n", ipi, ipi_next_num,
+ device_get_nameunit(irq_root_dev), error);
+
+ if (error == 0) {
+ isrc->isrc_dev = irq_root_dev;
+ ipi_next_num++;
+ }
+ return (error);
+}
+
+/*
+ * Setup IPI handler to interrupt source.
+ *
+ * Note that there could be more ways how to send and receive IPIs
+ * on a platform like fast interrupts for example. In that case,
+ * one can call this function with ASIF_NOALLOC flag set and then
+ * call arm_ipi_dispatch() when appropriate.
+ *
+ * Not SMP coherent.
+ */
+int
+arm_ipi_set_handler(u_int ipi, const char *name, arm_ipi_filter_t *filter,
+ void *arg, u_int flags)
+{
+ struct arm_irqsrc *isrc;
+ int error;
+
+ if (filter == NULL)
+ return(EINVAL);
+
+ isrc = arm_ipi_lookup(ipi);
+ if (isrc->isrc_ipifilter != NULL)
+ return (EEXIST);
+
+ if ((flags & AISHF_NOALLOC) == 0) {
+ error = ipi_map(isrc, ipi);
+ if (error != 0)
+ return (error);
+ }
+
+ isrc->isrc_ipifilter = filter;
+ isrc->isrc_arg = arg;
+ isrc->isrc_handlers = 1;
+ isrc_setup_ipi_counters(isrc, name);
+
+ if (isrc->isrc_dev != NULL) {
+ mtx_lock(&isrc_table_lock);
+ PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
+ PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
+ mtx_unlock(&isrc_table_lock);
+ }
+ return (0);
+}
+
+/*
+ * Send IPI thru interrupt controller.
+ */
+void
+pic_ipi_send(cpuset_t cpus, u_int ipi)
+{
+ struct arm_irqsrc *isrc;
+
+ isrc = arm_ipi_lookup(ipi);
+
+ KASSERT(irq_root_dev != NULL, ("%s: no root attached", __func__));
+ PIC_IPI_SEND(irq_root_dev, isrc, cpus);
+}
+
+/*
+ * Init interrupt controller on another CPU.
+ */
+void
+arm_pic_init_secondary(void)
+{
+
+ /*
+ * QQQ: Only root PIC is aware of other CPUs ???
+ */
+ KASSERT(irq_root_dev != NULL, ("%s: no root attached", __func__));
+
+ //mtx_lock(&isrc_table_lock);
+ PIC_INIT_SECONDARY(irq_root_dev);
+ //mtx_unlock(&isrc_table_lock);
+}
+#endif
+
+#ifdef DDB
+DB_SHOW_COMMAND(irqs, db_show_irqs)
+{
+ u_int i, irqsum;
+ struct arm_irqsrc *isrc;
+
+#ifdef SMP
+ for (i = 0; i <= mp_maxid; i++) {
+ struct pcpu *pc;
+ u_int ipi, ipisum;
+
+ pc = pcpu_find(i);
+ if (pc != NULL) {
+ for (ipisum = 0, ipi = 0; ipi < ARM_IPI_COUNT; ipi++) {
+ isrc = arm_ipi_lookup(ipi);
+ if (isrc->isrc_count != NULL)
+ ipisum += isrc->isrc_count[i];
+ }
+ printf ("cpu%u: total %u ipis %u\n", i,
+ pc->pc_cnt.v_intr, ipisum);
+ }
+ }
+ db_printf("\n");
+#endif
+
+ for (irqsum = 0, i = 0; i < NIRQ; i++) {
+ isrc = irq_sources[i];
+ if (isrc == NULL)
+ continue;
+
+ db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
+ isrc->isrc_name, isrc->isrc_cpu.__bits[0],
+ isrc->isrc_flags & ARM_ISRCF_BOUND ? " (bound)" : "",
+ isrc->isrc_count[0]);
+ irqsum += isrc->isrc_count[0];
+ }
+ db_printf("irq total %u\n", irqsum);
+}
+#endif
diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c
index 083e62e..5d1043f 100644
--- a/sys/arm/arm/mp_machdep.c
+++ b/sys/arm/arm/mp_machdep.c
@@ -74,7 +74,9 @@ volatile int mp_naps;
/* Set to 1 once we're ready to let the APs out of the pen. */
volatile int aps_ready = 0;
+#ifndef ARM_INTRNG
static int ipi_handler(void *arg);
+#endif
void set_stackptrs(int cpu);
/* Temporary variables for init_secondary() */
@@ -134,7 +136,6 @@ cpu_mp_start(void)
else
for (i = 1; i < mp_ncpus; i++)
CPU_SET(i, &all_cpus);
-
}
/* Introduce rest of cores to the world */
@@ -150,7 +151,9 @@ init_secondary(int cpu)
{
struct pcpu *pc;
uint32_t loop_counter;
+#ifndef ARM_INTRNG
int start = 0, end = 0;
+#endif
#ifdef ARM_NEW_PMAP
pmap_set_tex();
@@ -211,11 +214,12 @@ init_secondary(int cpu)
mtx_unlock_spin(&ap_boot_mtx);
+#ifndef ARM_INTRNG
/* Enable ipi */
#ifdef IPI_IRQ_START
start = IPI_IRQ_START;
#ifdef IPI_IRQ_END
- end = IPI_IRQ_END;
+ end = IPI_IRQ_END;
#else
end = IPI_IRQ_START;
#endif
@@ -223,6 +227,7 @@ init_secondary(int cpu)
for (int i = start; i <= end; i++)
arm_unmask_irq(i);
+#endif /* INTRNG */
enable_interrupts(PSR_I);
loop_counter = 0;
@@ -245,6 +250,108 @@ init_secondary(int cpu)
/* NOTREACHED */
}
+#ifdef ARM_INTRNG
+static void
+ipi_rendezvous(void *dummy __unused)
+{
+
+ CTR0(KTR_SMP, "IPI_RENDEZVOUS");
+ smp_rendezvous_action();
+}
+
+static void
+ipi_ast(void *dummy __unused)
+{
+
+ CTR0(KTR_SMP, "IPI_AST");
+}
+
+static void
+ipi_stop(void *dummy __unused)
+{
+ u_int cpu;
+
+ /*
+ * IPI_STOP_HARD is mapped to IPI_STOP.
+ */
+ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
+
+ cpu = PCPU_GET(cpuid);
+ savectx(&stoppcbs[cpu]);
+
+ /*
+ * CPUs are stopped when entering the debugger and at
+ * system shutdown, both events which can precede a
+ * panic dump. For the dump to be correct, all caches
+ * must be flushed and invalidated, but on ARM there's
+ * no way to broadcast a wbinv_all to other cores.
+ * Instead, we have each core do the local wbinv_all as
+ * part of stopping the core. The core requesting the
+ * stop will do the l2 cache flush after all other cores
+ * have done their l1 flushes and stopped.
+ */
+ cpu_idcache_wbinv_all();
+
+ /* Indicate we are stopped */
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
+
+ /* Wait for restart */
+ while (!CPU_ISSET(cpu, &started_cpus))
+ cpu_spinwait();
+
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
+ CTR0(KTR_SMP, "IPI_STOP (restart)");
+}
+
+static void
+ipi_preempt(void *arg)
+{
+ struct trapframe *oldframe;
+ struct thread *td;
+
+ critical_enter();
+ td = curthread;
+ td->td_intr_nesting_level++;
+ oldframe = td->td_intr_frame;
+ td->td_intr_frame = (struct trapframe *)arg;
+
+ CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
+ sched_preempt(td);
+
+ td->td_intr_frame = oldframe;
+ td->td_intr_nesting_level--;
+ critical_exit();
+}
+
+static void
+ipi_hardclock(void *arg)
+{
+ struct trapframe *oldframe;
+ struct thread *td;
+
+ critical_enter();
+ td = curthread;
+ td->td_intr_nesting_level++;
+ oldframe = td->td_intr_frame;
+ td->td_intr_frame = (struct trapframe *)arg;
+
+ CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
+ hardclockintr();
+
+ td->td_intr_frame = oldframe;
+ td->td_intr_nesting_level--;
+ critical_exit();
+}
+
+static void
+ipi_tlb(void *dummy __unused)
+{
+
+ CTR1(KTR_SMP, "%s: IPI_TLB", __func__);
+ cpufuncs.cf_tlb_flushID();
+}
+#else
static int
ipi_handler(void *arg)
{
@@ -320,15 +427,28 @@ ipi_handler(void *arg)
return (FILTER_HANDLED);
}
+#endif
static void
release_aps(void *dummy __unused)
{
uint32_t loop_counter;
+#ifndef ARM_INTRNG
int start = 0, end = 0;
+#endif
if (mp_ncpus == 1)
return;
+
+#ifdef ARM_INTRNG
+ arm_ipi_set_handler(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL, 0);
+ arm_ipi_set_handler(IPI_AST, "ast", ipi_ast, NULL, 0);
+ arm_ipi_set_handler(IPI_STOP, "stop", ipi_stop, NULL, 0);
+ arm_ipi_set_handler(IPI_PREEMPT, "preempt", ipi_preempt, NULL, 0);
+ arm_ipi_set_handler(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL, 0);
+ arm_ipi_set_handler(IPI_TLB, "tlb", ipi_tlb, NULL, 0);
+
+#else
#ifdef IPI_IRQ_START
start = IPI_IRQ_START;
#ifdef IPI_IRQ_END
@@ -353,6 +473,7 @@ release_aps(void *dummy __unused)
/* Enable ipi */
arm_unmask_irq(i);
}
+#endif
atomic_store_rel_int(&aps_ready, 1);
/* Wake the other threads up */
#if __ARM_ARCH >= 7
diff --git a/sys/arm/arm/nexus.c b/sys/arm/arm/nexus.c
index def6c1a..e2deeb0 100644
--- a/sys/arm/arm/nexus.c
+++ b/sys/arm/arm/nexus.c
@@ -85,8 +85,17 @@ static struct resource *nexus_alloc_resource(device_t, device_t, int, int *,
u_long, u_long, u_long, u_int);
static int nexus_activate_resource(device_t, device_t, int, int,
struct resource *);
+#ifdef ARM_INTRNG
+#ifdef SMP
+static int nexus_bind_intr(device_t, device_t, struct resource *, int);
+#endif
+#endif
static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
enum intr_polarity pol);
+#ifdef ARM_INTRNG
+static int nexus_describe_intr(device_t dev, device_t child,
+ struct resource *irq, void *cookie, const char *descr);
+#endif
static int nexus_deactivate_resource(device_t, device_t, int, int,
struct resource *);
static int nexus_release_resource(device_t, device_t, int, int,
@@ -115,6 +124,12 @@ static device_method_t nexus_methods[] = {
DEVMETHOD(bus_release_resource, nexus_release_resource),
DEVMETHOD(bus_setup_intr, nexus_setup_intr),
DEVMETHOD(bus_teardown_intr, nexus_teardown_intr),
+#ifdef ARM_INTRNG
+ DEVMETHOD(bus_describe_intr, nexus_describe_intr),
+#ifdef SMP
+ DEVMETHOD(bus_bind_intr, nexus_bind_intr),
+#endif
+#endif
#ifdef FDT
DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr),
#endif
@@ -251,9 +266,12 @@ nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
{
int ret = ENODEV;
+#ifdef ARM_INTRNG
+ ret = arm_irq_config(irq, trig, pol);
+#else
if (arm_config_irq)
ret = (*arm_config_irq)(irq, trig, pol);
-
+#endif
return (ret);
}
@@ -267,9 +285,14 @@ nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags,
flags |= INTR_EXCL;
for (irq = rman_get_start(res); irq <= rman_get_end(res); irq++) {
+#ifdef ARM_INTRNG
+ arm_irq_add_handler(child, filt, intr, arg, irq, flags,
+ cookiep);
+#else
arm_setup_irqhandler(device_get_nameunit(child),
filt, intr, arg, irq, flags, cookiep);
arm_unmask_irq(irq);
+#endif
}
return (0);
}
@@ -278,9 +301,31 @@ static int
nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih)
{
+#ifdef ARM_INTRNG
+ return (arm_irq_remove_handler(child, rman_get_start(r), ih));
+#else
return (arm_remove_irqhandler(rman_get_start(r), ih));
+#endif
}
+#ifdef ARM_INTRNG
+static int
+nexus_describe_intr(device_t dev, device_t child, struct resource *irq,
+ void *cookie, const char *descr)
+{
+
+ return (arm_irq_describe(rman_get_start(irq), cookie, descr));
+}
+
+#ifdef SMP
+static int
+nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu)
+{
+
+ return (arm_irq_bind(rman_get_start(irq), cpu));
+}
+#endif
+#endif
static int
nexus_activate_resource(device_t bus, device_t child, int type, int rid,
diff --git a/sys/arm/arm/pic_if.m b/sys/arm/arm/pic_if.m
new file mode 100644
index 0000000..d3002ea
--- /dev/null
+++ b/sys/arm/arm/pic_if.m
@@ -0,0 +1,124 @@
+#-
+# Copyright (c) 2012 Jakub Wojciech Klama <jceel@FreeBSD.org>
+# Copyright (c) 2015 Svatopluk Kraus
+# Copyright (c) 2015 Michal Meloun
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <sys/bus.h>
+#include <sys/cpuset.h>
+#include <machine/frame.h>
+#include <machine/intr.h>
+
+INTERFACE pic;
+
+CODE {
+ static int null_pic_bind(device_t dev, struct arm_irqsrc *isrc)
+ {
+ return (EOPNOTSUPP);
+ }
+
+ static void null_pic_disable_intr(device_t dev, struct arm_irqsrc *isrc)
+ {
+ return;
+ }
+
+ static void null_pic_enable_intr(device_t dev, struct arm_irqsrc *isrc)
+ {
+ return;
+ }
+
+ static void null_pic_init_secondary(device_t dev)
+ {
+ return;
+ }
+
+ static void null_pic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi)
+ {
+ return;
+ }
+};
+
+METHOD int register {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+ boolean_t *is_percpu;
+};
+
+METHOD int unregister {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+};
+
+METHOD void disable_intr {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+} DEFAULT null_pic_disable_intr;
+
+METHOD void disable_source {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+};
+
+METHOD void enable_source {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+};
+
+METHOD void enable_intr {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+} DEFAULT null_pic_enable_intr;
+
+METHOD void pre_ithread {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+};
+
+METHOD void post_ithread {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+};
+
+METHOD void post_filter {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+};
+
+METHOD int bind {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+} DEFAULT null_pic_bind;
+
+METHOD void init_secondary {
+ device_t dev;
+} DEFAULT null_pic_init_secondary;
+
+METHOD void ipi_send {
+ device_t dev;
+ struct arm_irqsrc *isrc;
+ cpuset_t cpus;
+} DEFAULT null_pic_ipi_send;
diff --git a/sys/arm/include/fdt.h b/sys/arm/include/fdt.h
index c1b785e..e8302d6 100644
--- a/sys/arm/include/fdt.h
+++ b/sys/arm/include/fdt.h
@@ -34,12 +34,16 @@
#include <machine/bus.h>
+#ifndef INTRNG
+
/* Max interrupt number */
#define FDT_INTR_MAX NIRQ
/* Map phandle/intpin pair to global IRQ number */
#define FDT_MAP_IRQ(node, pin) (pin)
+#endif
+
/*
* Bus space tag. XXX endianess info needs to be derived from the blob.
*/
diff --git a/sys/arm/include/intr.h b/sys/arm/include/intr.h
index 6a46f00..ed075df 100644
--- a/sys/arm/include/intr.h
+++ b/sys/arm/include/intr.h
@@ -43,6 +43,102 @@
#include <dev/ofw/openfirm.h>
#endif
+#ifdef ARM_INTRNG
+
+#ifndef NIRQ
+#define NIRQ 1024 /* XXX - It should be an option. */
+#endif
+
+#ifdef notyet
+#define INTR_SOLO INTR_MD1
+typedef int arm_irq_filter_t(void *arg, struct trapframe *tf);
+#else
+typedef int arm_irq_filter_t(void *arg);
+#endif
+
+#define ARM_ISRC_NAMELEN (MAXCOMLEN + 1)
+
+typedef void arm_ipi_filter_t(void *arg);
+
+enum arm_isrc_type {
+ ARM_ISRCT_NAMESPACE,
+ ARM_ISRCT_FDT
+};
+
+#define ARM_ISRCF_REGISTERED 0x01 /* registered in a controller */
+#define ARM_ISRCF_PERCPU 0x02 /* per CPU interrupt */
+#define ARM_ISRCF_BOUND 0x04 /* bound to a CPU */
+
+/* Interrupt source definition. */
+struct arm_irqsrc {
+ device_t isrc_dev; /* where isrc is mapped */
+ intptr_t isrc_xref; /* device reference key */
+ uintptr_t isrc_data; /* device data for isrc */
+ u_int isrc_irq; /* unique identificator */
+ enum arm_isrc_type isrc_type; /* how is isrc decribed */
+ u_int isrc_flags;
+ char isrc_name[ARM_ISRC_NAMELEN];
+ uint16_t isrc_nspc_type;
+ uint16_t isrc_nspc_num;
+ enum intr_trigger isrc_trig;
+ enum intr_polarity isrc_pol;
+ cpuset_t isrc_cpu; /* on which CPUs is enabled */
+ u_int isrc_index;
+ u_long * isrc_count;
+ u_int isrc_handlers;
+ struct intr_event * isrc_event;
+ arm_irq_filter_t * isrc_filter;
+ arm_ipi_filter_t * isrc_ipifilter;
+ void * isrc_arg;
+#ifdef FDT
+ u_int isrc_ncells;
+ pcell_t isrc_cells[]; /* leave it last */
+#endif
+};
+
+void arm_irq_set_name(struct arm_irqsrc *isrc, const char *fmt, ...)
+ __printflike(2, 3);
+
+void arm_irq_dispatch(struct arm_irqsrc *isrc, struct trapframe *tf);
+
+#define ARM_IRQ_NSPC_NONE 0
+#define ARM_IRQ_NSPC_PLAIN 1
+#define ARM_IRQ_NSPC_IRQ 2
+#define ARM_IRQ_NSPC_IPI 3
+
+u_int arm_namespace_map_irq(device_t dev, uint16_t type, uint16_t num);
+#ifdef FDT
+u_int arm_fdt_map_irq(phandle_t, pcell_t *, u_int);
+#endif
+
+int arm_pic_register(device_t dev, intptr_t xref);
+int arm_pic_unregister(device_t dev, intptr_t xref);
+int arm_pic_claim_root(device_t dev, intptr_t xref, arm_irq_filter_t *filter,
+ void *arg, u_int ipicount);
+
+int arm_irq_add_handler(device_t dev, driver_filter_t, driver_intr_t, void *,
+ u_int, int, void **);
+int arm_irq_remove_handler(device_t dev, u_int, void *);
+int arm_irq_config(u_int, enum intr_trigger, enum intr_polarity);
+int arm_irq_describe(u_int, void *, const char *);
+
+u_int arm_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask);
+
+#ifdef SMP
+int arm_irq_bind(u_int, int);
+
+void arm_ipi_dispatch(struct arm_irqsrc *isrc, struct trapframe *tf);
+
+#define AISHF_NOALLOC 0x0001
+
+int arm_ipi_set_handler(u_int ipi, const char *name, arm_ipi_filter_t *filter,
+ void *arg, u_int flags);
+
+void arm_pic_init_secondary(void);
+#endif
+
+#else /* ARM_INTRNG */
+
/* XXX move to std.* files? */
#ifdef CPU_XSCALE_81342
#define NIRQ 128
@@ -71,7 +167,6 @@
#define NIRQ 32
#endif
-
int arm_get_next_irq(int);
void arm_mask_irq(uintptr_t);
void arm_unmask_irq(uintptr_t);
@@ -83,8 +178,6 @@ extern void (*arm_post_filter)(void *);
extern int (*arm_config_irq)(int irq, enum intr_trigger trig,
enum intr_polarity pol);
-void arm_irq_memory_barrier(uintptr_t);
-
void arm_pic_init_secondary(void);
int gic_decode_fdt(uint32_t iparentnode, uint32_t *intrcells, int *interrupt,
int *trig, int *pol);
@@ -93,4 +186,8 @@ int gic_decode_fdt(uint32_t iparentnode, uint32_t *intrcells, int *interrupt,
int arm_fdt_map_irq(phandle_t, pcell_t *, int);
#endif
+#endif /* ARM_INTRNG */
+
+void arm_irq_memory_barrier(uintptr_t);
+
#endif /* _MACHINE_INTR_H */
diff --git a/sys/arm/include/smp.h b/sys/arm/include/smp.h
index bce8b4f..1c90431 100644
--- a/sys/arm/include/smp.h
+++ b/sys/arm/include/smp.h
@@ -6,6 +6,19 @@
#include <sys/_cpuset.h>
#include <machine/pcb.h>
+#ifdef ARM_INTRNG
+enum {
+ IPI_AST,
+ IPI_PREEMPT,
+ IPI_RENDEZVOUS,
+ IPI_STOP,
+ IPI_STOP_HARD = IPI_STOP, /* These are synonyms on arm. */
+ IPI_HARDCLOCK,
+ IPI_TLB,
+ IPI_CACHE,
+ ARM_IPI_COUNT
+};
+#else
#define IPI_AST 0
#define IPI_PREEMPT 2
#define IPI_RENDEZVOUS 3
@@ -14,6 +27,7 @@
#define IPI_HARDCLOCK 6
#define IPI_TLB 7
#define IPI_CACHE 8
+#endif /* INTRNG */
void init_secondary(int cpu);
void mpentry(void);
@@ -24,8 +38,10 @@ void ipi_selected(cpuset_t cpus, u_int ipi);
/* PIC interface */
void pic_ipi_send(cpuset_t cpus, u_int ipi);
+#ifndef ARM_INTRNG
void pic_ipi_clear(int ipi);
int pic_ipi_read(int arg);
+#endif
/* Platform interface */
void platform_mp_setmaxid(void);
diff --git a/sys/conf/files.arm b/sys/conf/files.arm
index a2abcc2..33838ae 100644
--- a/sys/conf/files.arm
+++ b/sys/conf/files.arm
@@ -41,7 +41,8 @@ arm/arm/gic.c optional gic
arm/arm/identcpu.c standard
arm/arm/in_cksum.c optional inet | inet6
arm/arm/in_cksum_arm.S optional inet | inet6
-arm/arm/intr.c standard
+arm/arm/intr.c optional !arm_intrng
+arm/arm/intrng.c optional arm_intrng
arm/arm/locore.S standard no-obj
arm/arm/machdep.c standard
arm/arm/mem.c optional mem
@@ -49,6 +50,7 @@ arm/arm/minidump_machdep.c optional mem
arm/arm/mp_machdep.c optional smp
arm/arm/nexus.c standard
arm/arm/physmem.c standard
+arm/arm/pic_if.m optional arm_intrng
arm/arm/pl190.c optional pl190
arm/arm/pl310.c optional pl310
arm/arm/platform.c optional platform
diff --git a/sys/conf/options.arm b/sys/conf/options.arm
index 57cef6a..aee546a 100644
--- a/sys/conf/options.arm
+++ b/sys/conf/options.arm
@@ -1,6 +1,7 @@
#$FreeBSD$
ARMV6 opt_global.h
ARM_CACHE_LOCK_ENABLE opt_global.h
+ARM_INTRNG opt_global.h
ARM_KERN_DIRECTMAP opt_vm.h
ARM_L2_PIPT opt_global.h
ARM_MANY_BOARD opt_global.h
OpenPOWER on IntegriCloud