summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2009-06-25 18:13:46 +0000
committerjhb <jhb@FreeBSD.org>2009-06-25 18:13:46 +0000
commit7f94b486065bb85599411c34d8938b09249a60ac (patch)
tree0e088657e9ba66d81df5cd694a24bd377ac93161 /sys/i386
parent4f8d3e68515720d253dc161b214a03b8025fdf8d (diff)
downloadFreeBSD-src-7f94b486065bb85599411c34d8938b09249a60ac.zip
FreeBSD-src-7f94b486065bb85599411c34d8938b09249a60ac.tar.gz
- Restore the behavior of pre-allocating IDT vectors for MSI interrupts.
This is mostly important for the multiple MSI message case where the IDT vectors for the entire group need to be allocated together. This also restores the assumptions made by the PCI bus code that it could invoke PCIB_MAP_MSI() once MSI vectors were allocated. - To avoid whiplash with CPU assignments, change the way that CPUs are assigned to interrupt sources on activation. Instead of assigning the CPU via pic_assign_cpu() before calling enable_intr(), allow the different interrupt source drivers to ask the MD interrupt code which CPU to use when they allocate an IDT vector. I/O APIC interrupt pins do this in their pic_enable_intr() routines giving the same behavior as before. MSI sources do it when the IDT vectors are allocated during msi_alloc() and msix_alloc(). - Change the intr_table_lock from an sx lock to a mutex. Tested by: rnoland
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/intr_machdep.c73
-rw-r--r--sys/i386/i386/io_apic.c2
-rw-r--r--sys/i386/i386/msi.c54
-rw-r--r--sys/i386/include/intr_machdep.h3
4 files changed, 77 insertions, 55 deletions
diff --git a/sys/i386/i386/intr_machdep.c b/sys/i386/i386/intr_machdep.c
index 3a5c9a2..eedc682 100644
--- a/sys/i386/i386/intr_machdep.c
+++ b/sys/i386/i386/intr_machdep.c
@@ -50,7 +50,6 @@
#include <sys/smp.h>
#include <sys/syslog.h>
#include <sys/systm.h>
-#include <sys/sx.h>
#include <machine/clock.h>
#include <machine/intr_machdep.h>
#include <machine/smp.h>
@@ -64,14 +63,12 @@ typedef void (*mask_fn)(void *);
static int intrcnt_index;
static struct intsrc *interrupt_sources[NUM_IO_INTS];
-static struct sx intr_table_lock;
+static struct mtx intr_table_lock;
static struct mtx intrcnt_lock;
static STAILQ_HEAD(, pic) pics;
#ifdef SMP
static int assign_cpu;
-
-static void intr_assign_next_cpu(struct intsrc *isrc);
#endif
static int intr_assign_cpu(void *arg, u_char cpu);
@@ -105,14 +102,14 @@ intr_register_pic(struct pic *pic)
{
int error;
- sx_xlock(&intr_table_lock);
+ mtx_lock(&intr_table_lock);
if (intr_pic_registered(pic))
error = EBUSY;
else {
STAILQ_INSERT_TAIL(&pics, pic, pics);
error = 0;
}
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
return (error);
}
@@ -136,16 +133,16 @@ intr_register_source(struct intsrc *isrc)
vector);
if (error)
return (error);
- sx_xlock(&intr_table_lock);
+ mtx_lock(&intr_table_lock);
if (interrupt_sources[vector] != NULL) {
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
intr_event_destroy(isrc->is_event);
return (EEXIST);
}
intrcnt_register(isrc);
interrupt_sources[vector] = isrc;
isrc->is_handlers = 0;
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
return (0);
}
@@ -169,18 +166,14 @@ intr_add_handler(const char *name, int vector, driver_filter_t filter,
error = intr_event_add_handler(isrc->is_event, name, filter, handler,
arg, intr_priority(flags), flags, cookiep);
if (error == 0) {
- sx_xlock(&intr_table_lock);
+ mtx_lock(&intr_table_lock);
intrcnt_updatename(isrc);
isrc->is_handlers++;
if (isrc->is_handlers == 1) {
-#ifdef SMP
- if (assign_cpu)
- intr_assign_next_cpu(isrc);
-#endif
isrc->is_pic->pic_enable_intr(isrc);
isrc->is_pic->pic_enable_source(isrc);
}
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
}
return (error);
}
@@ -194,14 +187,14 @@ intr_remove_handler(void *cookie)
isrc = intr_handler_source(cookie);
error = intr_event_remove_handler(cookie);
if (error == 0) {
- sx_xlock(&intr_table_lock);
+ mtx_lock(&intr_table_lock);
isrc->is_handlers--;
if (isrc->is_handlers == 0) {
isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
isrc->is_pic->pic_disable_intr(isrc);
}
intrcnt_updatename(isrc);
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
}
return (error);
}
@@ -272,12 +265,12 @@ intr_resume(void)
{
struct pic *pic;
- sx_xlock(&intr_table_lock);
+ mtx_lock(&intr_table_lock);
STAILQ_FOREACH(pic, &pics, pics) {
if (pic->pic_resume != NULL)
pic->pic_resume(pic);
}
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
}
void
@@ -285,12 +278,12 @@ intr_suspend(void)
{
struct pic *pic;
- sx_xlock(&intr_table_lock);
+ mtx_lock(&intr_table_lock);
STAILQ_FOREACH(pic, &pics, pics) {
if (pic->pic_suspend != NULL)
pic->pic_suspend(pic);
}
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
}
static int
@@ -305,9 +298,9 @@ intr_assign_cpu(void *arg, u_char cpu)
*/
if (assign_cpu && cpu != NOCPU) {
isrc = arg;
- sx_xlock(&intr_table_lock);
+ mtx_lock(&intr_table_lock);
isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
}
return (0);
#else
@@ -366,7 +359,7 @@ intr_init(void *dummy __unused)
intrcnt_setname("???", 0);
intrcnt_index = 1;
STAILQ_INIT(&pics);
- sx_init(&intr_table_lock, "intr sources");
+ mtx_init(&intr_table_lock, "intr sources", NULL, MTX_DEF | MTX_RECURSE);
mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
}
SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
@@ -401,19 +394,28 @@ DB_SHOW_COMMAND(irqs, db_show_irqs)
static cpumask_t intr_cpus = (1 << 0);
static int current_cpu;
-static void
-intr_assign_next_cpu(struct intsrc *isrc)
+/*
+ * Return the CPU that the next interrupt source should use. For now
+ * this just returns the next local APIC according to round-robin.
+ */
+u_int
+intr_next_cpu(void)
{
+ u_int apic_id;
- /*
- * Assign this source to a local APIC in a round-robin fashion.
- */
- isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]);
+ /* Leave all interrupts on the BSP during boot. */
+ if (!assign_cpu)
+ return (cpu_apic_ids[0]);
+
+ mtx_lock(&intr_table_lock);
+ apic_id = cpu_apic_ids[current_cpu];
do {
current_cpu++;
if (current_cpu > mp_maxid)
current_cpu = 0;
} while (!(intr_cpus & (1 << current_cpu)));
+ mtx_unlock(&intr_table_lock);
+ return (apic_id);
}
/* Attempt to bind the specified IRQ to the specified CPU. */
@@ -453,6 +455,7 @@ static void
intr_shuffle_irqs(void *arg __unused)
{
struct intsrc *isrc;
+ u_int apic_id;
int i;
#ifdef XEN
@@ -467,7 +470,7 @@ intr_shuffle_irqs(void *arg __unused)
return;
/* Round-robin assign a CPU to each enabled source. */
- sx_xlock(&intr_table_lock);
+ mtx_lock(&intr_table_lock);
assign_cpu = 1;
for (i = 0; i < NUM_IO_INTS; i++) {
isrc = interrupt_sources[i];
@@ -478,13 +481,13 @@ intr_shuffle_irqs(void *arg __unused)
* of picking one via round-robin.
*/
if (isrc->is_event->ie_cpu != NOCPU)
- isrc->is_pic->pic_assign_cpu(isrc,
- cpu_apic_ids[isrc->is_event->ie_cpu]);
+ apic_id = isrc->is_event->ie_cpu;
else
- intr_assign_next_cpu(isrc);
+ apic_id = intr_next_cpu();
+ isrc->is_pic->pic_assign_cpu(isrc, apic_id);
}
}
- sx_xunlock(&intr_table_lock);
+ mtx_unlock(&intr_table_lock);
}
SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
NULL);
diff --git a/sys/i386/i386/io_apic.c b/sys/i386/i386/io_apic.c
index f63f28f..d3bdad0 100644
--- a/sys/i386/i386/io_apic.c
+++ b/sys/i386/i386/io_apic.c
@@ -372,7 +372,7 @@ ioapic_enable_intr(struct intsrc *isrc)
struct ioapic_intsrc *intpin = (struct ioapic_intsrc *)isrc;
if (intpin->io_vector == 0)
- ioapic_assign_cpu(isrc, pcpu_find(0)->pc_apic_id);
+ ioapic_assign_cpu(isrc, intr_next_cpu());
apic_enable_vector(intpin->io_cpu, intpin->io_vector);
}
diff --git a/sys/i386/i386/msi.c b/sys/i386/i386/msi.c
index e42f3d1..d5e24c9 100644
--- a/sys/i386/i386/msi.c
+++ b/sys/i386/i386/msi.c
@@ -161,8 +161,6 @@ msi_enable_intr(struct intsrc *isrc)
{
struct msi_intsrc *msi = (struct msi_intsrc *)isrc;
- if (msi->msi_vector == 0)
- msi_assign_cpu(isrc, 0);
apic_enable_vector(msi->msi_cpu, msi->msi_vector);
}
@@ -208,10 +206,11 @@ msi_assign_cpu(struct intsrc *isrc, u_int apic_id)
/* Store information to free existing irq. */
old_vector = msi->msi_vector;
old_id = msi->msi_cpu;
- if (old_vector && old_id == apic_id)
+ if (old_id == apic_id)
return;
- if (old_vector && !msi->msi_msix && msi->msi_first->msi_count > 1)
+ if (!msi->msi_msix && msi->msi_first->msi_count > 1)
return;
+
/* Allocate IDT vector on this cpu. */
vector = apic_alloc_vector(apic_id, msi->msi_irq);
if (vector == 0)
@@ -223,15 +222,14 @@ msi_assign_cpu(struct intsrc *isrc, u_int apic_id)
msi->msi_msix ? "MSI-X" : "MSI", msi->msi_irq,
msi->msi_cpu, msi->msi_vector);
pci_remap_msi_irq(msi->msi_dev, msi->msi_irq);
+
/*
* Free the old vector after the new one is established. This is done
* to prevent races where we could miss an interrupt.
*/
- if (old_vector)
- apic_free_vector(old_id, old_vector, msi->msi_irq);
+ apic_free_vector(old_id, old_vector, msi->msi_irq);
}
-
void
msi_init(void)
{
@@ -287,7 +285,8 @@ int
msi_alloc(device_t dev, int count, int maxcount, int *irqs)
{
struct msi_intsrc *msi, *fsrc;
- int cnt, i;
+ u_int cpu;
+ int cnt, i, vector;
if (!msi_enabled)
return (ENXIO);
@@ -333,12 +332,25 @@ again:
/* Ok, we now have the IRQs allocated. */
KASSERT(cnt == count, ("count mismatch"));
+ /* Allocate 'count' IDT vectors. */
+ cpu = intr_next_cpu();
+ vector = apic_alloc_vectors(cpu, irqs, count, maxcount);
+ if (vector == 0) {
+ mtx_unlock(&msi_lock);
+ return (ENOSPC);
+ }
+
/* Assign IDT vectors and make these messages owned by 'dev'. */
fsrc = (struct msi_intsrc *)intr_lookup_source(irqs[0]);
for (i = 0; i < count; i++) {
msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]);
+ msi->msi_cpu = cpu;
msi->msi_dev = dev;
- msi->msi_vector = 0;
+ msi->msi_vector = vector + i;
+ if (bootverbose)
+ printf(
+ "msi: routing MSI IRQ %d to local APIC %u vector %u\n",
+ msi->msi_irq, msi->msi_cpu, msi->msi_vector);
msi->msi_first = fsrc;
KASSERT(msi->msi_intsrc.is_handlers == 0,
("dead MSI has handlers"));
@@ -391,18 +403,14 @@ msi_release(int *irqs, int count)
KASSERT(msi->msi_dev == first->msi_dev, ("owner mismatch"));
msi->msi_first = NULL;
msi->msi_dev = NULL;
- if (msi->msi_vector)
- apic_free_vector(msi->msi_cpu, msi->msi_vector,
- msi->msi_irq);
+ apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq);
msi->msi_vector = 0;
}
/* Clear out the first message. */
first->msi_first = NULL;
first->msi_dev = NULL;
- if (first->msi_vector)
- apic_free_vector(first->msi_cpu, first->msi_vector,
- first->msi_irq);
+ apic_free_vector(first->msi_cpu, first->msi_vector, first->msi_irq);
first->msi_vector = 0;
first->msi_count = 0;
@@ -451,7 +459,8 @@ int
msix_alloc(device_t dev, int *irq)
{
struct msi_intsrc *msi;
- int i;
+ u_int cpu;
+ int i, vector;
if (!msi_enabled)
return (ENXIO);
@@ -486,9 +495,17 @@ again:
goto again;
}
+ /* Allocate an IDT vector. */
+ cpu = intr_next_cpu();
+ vector = apic_alloc_vector(cpu, i);
+ if (bootverbose)
+ printf("msi: routing MSI-X IRQ %d to local APIC %u vector %u\n",
+ msi->msi_irq, cpu, vector);
+
/* Setup source. */
+ msi->msi_cpu = cpu;
msi->msi_dev = dev;
- msi->msi_vector = 0;
+ msi->msi_vector = vector;
msi->msi_msix = 1;
KASSERT(msi->msi_intsrc.is_handlers == 0, ("dead MSI-X has handlers"));
@@ -520,8 +537,7 @@ msix_release(int irq)
/* Clear out the message. */
msi->msi_dev = NULL;
- if (msi->msi_vector)
- apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq);
+ apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq);
msi->msi_vector = 0;
msi->msi_msix = 0;
diff --git a/sys/i386/include/intr_machdep.h b/sys/i386/include/intr_machdep.h
index 4593077..052f9bd 100644
--- a/sys/i386/include/intr_machdep.h
+++ b/sys/i386/include/intr_machdep.h
@@ -139,6 +139,9 @@ int intr_bind(u_int vector, u_char cpu);
int intr_config_intr(int vector, enum intr_trigger trig,
enum intr_polarity pol);
void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame);
+#ifdef SMP
+u_int intr_next_cpu(void);
+#endif
struct intsrc *intr_lookup_source(int vector);
int intr_register_pic(struct pic *pic);
int intr_register_source(struct intsrc *isrc);
OpenPOWER on IntegriCloud