summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2008-03-14 19:41:48 +0000
committerjhb <jhb@FreeBSD.org>2008-03-14 19:41:48 +0000
commit9c113163fb2bb182d320f0228312c13601341163 (patch)
treec7c725651b1ad4c07a8fe93c253676ef3d6d5279
parent33dfb1706b9985093bf2f15b13b6d6fcf86e117f (diff)
downloadFreeBSD-src-9c113163fb2bb182d320f0228312c13601341163.zip
FreeBSD-src-9c113163fb2bb182d320f0228312c13601341163.tar.gz
Add preliminary support for binding interrupts to CPUs:
- Add a new intr_event method ie_assign_cpu() that is invoked when the MI code wishes to bind an interrupt source to an individual CPU. The MD code may reject the binding with an error. If an assign_cpu function is not provided, then the kernel assumes the platform does not support binding interrupts to CPUs and fails all requests to do so. - Bind ithreads to CPUs on their next execution loop once an interrupt event is bound to a CPU. Only shared ithreads are bound. We currently leave private ithreads for drivers using filters + ithreads in the INTR_FILTER case unbound. - A new intr_event_bind() routine is used to bind an interrupt event to a CPU. - Implement binding on amd64 and i386 by way of the existing pic_assign_cpu PIC method. - For x86, provide a 'intr_bind(IRQ, cpu)' wrapper routine that looks up an interrupt source and binds its interrupt event to the specified CPU. MI code can currently (ab)use this by doing: intr_bind(rman_get_start(irq_res), cpu); however, I plan to add a truly MI interface (probably a bus_bind_intr(9)) where the implementation in the x86 nexus(4) driver would end up calling intr_bind() internally. Requested by: kmacy, gallatin, jeff Tested on: {amd64, i386} x {regular, INTR_FILTER}
-rw-r--r--sys/amd64/amd64/intr_machdep.c61
-rw-r--r--sys/amd64/include/intr_machdep.h3
-rw-r--r--sys/arm/arm/intr.c4
-rw-r--r--sys/i386/i386/intr_machdep.c61
-rw-r--r--sys/i386/include/intr_machdep.h3
-rw-r--r--sys/ia64/ia64/interrupt.c2
-rw-r--r--sys/kern/kern_intr.c96
-rw-r--r--sys/powerpc/powerpc/intr_machdep.c2
-rw-r--r--sys/sparc64/sparc64/intr_machdep.c4
-rw-r--r--sys/sun4v/sun4v/intr_machdep.c2
-rw-r--r--sys/sys/interrupt.h13
11 files changed, 218 insertions, 33 deletions
diff --git a/sys/amd64/amd64/intr_machdep.c b/sys/amd64/amd64/intr_machdep.c
index 7efc7ac..23d9278 100644
--- a/sys/amd64/amd64/intr_machdep.c
+++ b/sys/amd64/amd64/intr_machdep.c
@@ -89,6 +89,7 @@ static int assign_cpu;
static void intr_assign_next_cpu(struct intsrc *isrc);
#endif
+static int intr_assign_cpu(void *arg, u_char cpu);
static void intr_init(void *__dummy);
static int intr_pic_registered(struct pic *pic);
static void intrcnt_setname(const char *name, int index);
@@ -146,10 +147,12 @@ intr_register_source(struct intsrc *isrc)
#ifdef INTR_FILTER
error = intr_event_create(&isrc->is_event, isrc, 0,
(mask_fn)isrc->is_pic->pic_enable_source,
- intr_eoi_src, intr_disab_eoi_src, "irq%d:", vector);
+ intr_eoi_src, intr_disab_eoi_src, intr_assign_cpu, "irq%d:",
+ vector);
#else
error = intr_event_create(&isrc->is_event, isrc, 0,
- (mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector);
+ (mask_fn)isrc->is_pic->pic_enable_source, intr_assign_cpu, "irq%d:",
+ vector);
#endif
if (error)
return (error);
@@ -431,6 +434,28 @@ intr_suspend(void)
sx_xunlock(&intr_table_lock);
}
+static int
+intr_assign_cpu(void *arg, u_char cpu)
+{
+#ifdef SMP
+ struct intsrc *isrc;
+
+ /*
+ * Don't do anything during early boot. We will pick up the
+ * assignment once the APs are started.
+ */
+ if (assign_cpu && cpu != NOCPU) {
+ isrc = arg;
+ sx_xlock(&intr_table_lock);
+ isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
+ sx_xunlock(&intr_table_lock);
+ }
+ return (0);
+#else
+ return (EOPNOTSUPP);
+#endif
+}
+
static void
intrcnt_setname(const char *name, int index)
{
@@ -542,15 +567,11 @@ static int current_cpu;
static void
intr_assign_next_cpu(struct intsrc *isrc)
{
- struct pic *pic;
- u_int apic_id;
/*
* Assign this source to a local APIC in a round-robin fashion.
*/
- pic = isrc->is_pic;
- apic_id = cpu_apic_ids[current_cpu];
- pic->pic_assign_cpu(isrc, apic_id);
+ isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]);
do {
current_cpu++;
if (current_cpu > mp_maxid)
@@ -558,6 +579,18 @@ intr_assign_next_cpu(struct intsrc *isrc)
} while (!(intr_cpus & (1 << current_cpu)));
}
+/* Attempt to bind the specified IRQ to the specified CPU. */
+int
+intr_bind(u_int vector, u_char cpu)
+{
+ struct intsrc *isrc;
+
+ isrc = intr_lookup_source(vector);
+ if (isrc == NULL)
+ return (EINVAL);
+ return (intr_event_bind(isrc->is_event, cpu));
+}
+
/*
* Add a CPU to our mask of valid CPUs that can be destinations of
* interrupts.
@@ -594,8 +627,18 @@ intr_shuffle_irqs(void *arg __unused)
assign_cpu = 1;
for (i = 0; i < NUM_IO_INTS; i++) {
isrc = interrupt_sources[i];
- if (isrc != NULL && isrc->is_handlers > 0)
- intr_assign_next_cpu(isrc);
+ if (isrc != NULL && isrc->is_handlers > 0) {
+ /*
+ * If this event is already bound to a CPU,
+ * then assign the source to that CPU instead
+ * of picking one via round-robin.
+ */
+ if (isrc->is_event->ie_cpu != NOCPU)
+ isrc->is_pic->pic_assign_cpu(isrc,
+ cpu_apic_ids[isrc->is_event->ie_cpu]);
+ else
+ intr_assign_next_cpu(isrc);
+ }
}
sx_xunlock(&intr_table_lock);
}
diff --git a/sys/amd64/include/intr_machdep.h b/sys/amd64/include/intr_machdep.h
index 8b16c5e..7e1c43f 100644
--- a/sys/amd64/include/intr_machdep.h
+++ b/sys/amd64/include/intr_machdep.h
@@ -137,6 +137,9 @@ void intr_add_cpu(u_int cpu);
int intr_add_handler(const char *name, int vector, driver_filter_t filter,
driver_intr_t handler, void *arg, enum intr_type flags,
void **cookiep);
+#ifdef SMP
+int intr_bind(u_int vector, u_char cpu);
+#endif
int intr_config_intr(int vector, enum intr_trigger trig,
enum intr_polarity pol);
void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame);
diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c
index c991220..fbaa96c 100644
--- a/sys/arm/arm/intr.c
+++ b/sys/arm/arm/intr.c
@@ -92,10 +92,10 @@ arm_setup_irqhandler(const char *name, driver_filter_t *filt,
#ifdef INTR_FILTER
error = intr_event_create(&event, (void *)irq, 0,
(void (*)(void *))arm_unmask_irq, intr_eoi_src,
- intr_disab_eoi_src, "intr%d:", irq);
+ intr_disab_eoi_src, NULL, "intr%d:", irq);
#else
error = intr_event_create(&event, (void *)irq, 0,
- (void (*)(void *))arm_unmask_irq, "intr%d:", irq);
+ (void (*)(void *))arm_unmask_irq, NULL, "intr%d:", irq);
#endif
if (error)
return;
diff --git a/sys/i386/i386/intr_machdep.c b/sys/i386/i386/intr_machdep.c
index 993b68b..057ceaf 100644
--- a/sys/i386/i386/intr_machdep.c
+++ b/sys/i386/i386/intr_machdep.c
@@ -80,6 +80,7 @@ static int assign_cpu;
static void intr_assign_next_cpu(struct intsrc *isrc);
#endif
+static int intr_assign_cpu(void *arg, u_char cpu);
static void intr_init(void *__dummy);
static int intr_pic_registered(struct pic *pic);
static void intrcnt_setname(const char *name, int index);
@@ -137,10 +138,12 @@ intr_register_source(struct intsrc *isrc)
#ifdef INTR_FILTER
error = intr_event_create(&isrc->is_event, isrc, 0,
(mask_fn)isrc->is_pic->pic_enable_source,
- intr_eoi_src, intr_disab_eoi_src, "irq%d:", vector);
+ intr_eoi_src, intr_disab_eoi_src, intr_assign_cpu, "irq%d:",
+ vector);
#else
error = intr_event_create(&isrc->is_event, isrc, 0,
- (mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector);
+ (mask_fn)isrc->is_pic->pic_enable_source, intr_assign_cpu, "irq%d:",
+ vector);
#endif
if (error)
return (error);
@@ -429,6 +432,28 @@ intr_suspend(void)
sx_xunlock(&intr_table_lock);
}
+static int
+intr_assign_cpu(void *arg, u_char cpu)
+{
+#ifdef SMP
+ struct intsrc *isrc;
+
+ /*
+ * Don't do anything during early boot. We will pick up the
+ * assignment once the APs are started.
+ */
+ if (assign_cpu && cpu != NOCPU) {
+ isrc = arg;
+ sx_xlock(&intr_table_lock);
+ isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
+ sx_xunlock(&intr_table_lock);
+ }
+ return (0);
+#else
+ return (EOPNOTSUPP);
+#endif
+}
+
static void
intrcnt_setname(const char *name, int index)
{
@@ -518,15 +543,11 @@ static int current_cpu;
static void
intr_assign_next_cpu(struct intsrc *isrc)
{
- struct pic *pic;
- u_int apic_id;
/*
* Assign this source to a local APIC in a round-robin fashion.
*/
- pic = isrc->is_pic;
- apic_id = cpu_apic_ids[current_cpu];
- pic->pic_assign_cpu(isrc, apic_id);
+ isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]);
do {
current_cpu++;
if (current_cpu > mp_maxid)
@@ -534,6 +555,18 @@ intr_assign_next_cpu(struct intsrc *isrc)
} while (!(intr_cpus & (1 << current_cpu)));
}
+/* Attempt to bind the specified IRQ to the specified CPU. */
+int
+intr_bind(u_int vector, u_char cpu)
+{
+ struct intsrc *isrc;
+
+ isrc = intr_lookup_source(vector);
+ if (isrc == NULL)
+ return (EINVAL);
+ return (intr_event_bind(isrc->is_event, cpu));
+}
+
/*
* Add a CPU to our mask of valid CPUs that can be destinations of
* interrupts.
@@ -570,8 +603,18 @@ intr_shuffle_irqs(void *arg __unused)
assign_cpu = 1;
for (i = 0; i < NUM_IO_INTS; i++) {
isrc = interrupt_sources[i];
- if (isrc != NULL && isrc->is_handlers > 0)
- intr_assign_next_cpu(isrc);
+ if (isrc != NULL && isrc->is_handlers > 0) {
+ /*
+ * If this event is already bound to a CPU,
+ * then assign the source to that CPU instead
+ * of picking one via round-robin.
+ */
+ if (isrc->is_event->ie_cpu != NOCPU)
+ isrc->is_pic->pic_assign_cpu(isrc,
+ cpu_apic_ids[isrc->is_event->ie_cpu]);
+ else
+ intr_assign_next_cpu(isrc);
+ }
}
sx_xunlock(&intr_table_lock);
}
diff --git a/sys/i386/include/intr_machdep.h b/sys/i386/include/intr_machdep.h
index 4e6d070..2aeb0c9 100644
--- a/sys/i386/include/intr_machdep.h
+++ b/sys/i386/include/intr_machdep.h
@@ -133,6 +133,9 @@ void intr_add_cpu(u_int cpu);
#endif
int intr_add_handler(const char *name, int vector, driver_filter_t filter,
driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep);
+#ifdef SMP
+int intr_bind(u_int vector, u_char cpu);
+#endif
int intr_config_intr(int vector, enum intr_trigger trig,
enum intr_polarity pol);
void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame);
diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c
index 5d30566..d832dd8 100644
--- a/sys/ia64/ia64/interrupt.c
+++ b/sys/ia64/ia64/interrupt.c
@@ -344,7 +344,7 @@ ia64_setup_intr(const char *name, int irq, driver_filter_t filter,
#ifdef INTR_FILTER
ia64_intr_eoi, ia64_intr_mask,
#endif
- "irq%u:", irq);
+ NULL, "irq%u:", irq);
if (error) {
free(i, M_DEVBUF);
return (error);
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index d9b983a..3d02073 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/random.h>
#include <sys/resourcevar.h>
#include <sys/sched.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
#include <sys/vmmeter.h>
@@ -240,7 +241,8 @@ intr_event_update(struct intr_event *ie)
#ifndef INTR_FILTER
int
intr_event_create(struct intr_event **event, void *source, int flags,
- void (*enable)(void *), const char *fmt, ...)
+ void (*enable)(void *), int (*assign_cpu)(void *, u_char), const char *fmt,
+ ...)
{
struct intr_event *ie;
va_list ap;
@@ -251,7 +253,9 @@ intr_event_create(struct intr_event **event, void *source, int flags,
ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
ie->ie_source = source;
ie->ie_enable = enable;
+ ie->ie_assign_cpu = assign_cpu;
ie->ie_flags = flags;
+ ie->ie_cpu = NOCPU;
TAILQ_INIT(&ie->ie_handlers);
mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
@@ -271,7 +275,7 @@ intr_event_create(struct intr_event **event, void *source, int flags,
int
intr_event_create(struct intr_event **event, void *source, int flags,
void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *),
- const char *fmt, ...)
+ int (*assign_cpu)(void *, u_char), const char *fmt, ...)
{
struct intr_event *ie;
va_list ap;
@@ -282,9 +286,11 @@ intr_event_create(struct intr_event **event, void *source, int flags,
ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
ie->ie_source = source;
ie->ie_enable = enable;
+ ie->ie_assign_cpu = assign_cpu;
ie->ie_eoi = eoi;
ie->ie_disab = disab;
ie->ie_flags = flags;
+ ie->ie_cpu = NOCPU;
TAILQ_INIT(&ie->ie_handlers);
mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
@@ -302,6 +308,52 @@ intr_event_create(struct intr_event **event, void *source, int flags,
}
#endif
+/*
+ * Bind an interrupt event to the specified CPU. Note that not all
+ * platforms support binding an interrupt to a CPU. For those
+ * platforms this request will fail. For supported platforms, any
+ * associated ithreads as well as the primary interrupt context will
+ * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds
+ * the interrupt event.
+ */
+int
+intr_event_bind(struct intr_event *ie, u_char cpu)
+{
+ struct thread *td;
+ int error;
+
+ /* Need a CPU to bind to. */
+ if (cpu != NOCPU && CPU_ABSENT(cpu))
+ return (EINVAL);
+
+ if (ie->ie_assign_cpu == NULL)
+ return (EOPNOTSUPP);
+
+ /* Don't allow a bind request if the interrupt is already bound. */
+ mtx_lock(&ie->ie_lock);
+ if (ie->ie_cpu != NOCPU && cpu != NOCPU) {
+ mtx_unlock(&ie->ie_lock);
+ return (EBUSY);
+ }
+ mtx_unlock(&ie->ie_lock);
+
+ error = ie->ie_assign_cpu(ie->ie_source, cpu);
+ if (error)
+ return (error);
+ mtx_lock(&ie->ie_lock);
+ if (ie->ie_thread != NULL)
+ td = ie->ie_thread->it_thread;
+ else
+ td = NULL;
+ if (td != NULL)
+ thread_lock(td);
+ ie->ie_cpu = cpu;
+ if (td != NULL)
+ thread_unlock(td);
+ mtx_unlock(&ie->ie_lock);
+ return (0);
+}
+
int
intr_event_destroy(struct intr_event *ie)
{
@@ -893,10 +945,10 @@ swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
} else {
#ifdef INTR_FILTER
error = intr_event_create(&ie, NULL, IE_SOFT,
- NULL, NULL, NULL, "swi%d:", pri);
+ NULL, NULL, NULL, NULL, "swi%d:", pri);
#else
error = intr_event_create(&ie, NULL, IE_SOFT,
- NULL, "swi%d:", pri);
+ NULL, NULL, "swi%d:", pri);
#endif
if (error)
return (error);
@@ -1078,6 +1130,7 @@ ithread_loop(void *arg)
struct intr_event *ie;
struct thread *td;
struct proc *p;
+ u_char cpu;
td = curthread;
p = td->td_proc;
@@ -1086,6 +1139,7 @@ ithread_loop(void *arg)
("%s: ithread and proc linkage out of sync", __func__));
ie = ithd->it_event;
ie->ie_count = 0;
+ cpu = NOCPU;
/*
* As long as we have interrupts outstanding, go through the
@@ -1131,6 +1185,21 @@ ithread_loop(void *arg)
ie->ie_count = 0;
mi_switch(SW_VOL, NULL);
}
+
+#ifdef SMP
+ /*
+ * Ensure we are bound to the correct CPU. We can't
+ * move ithreads until SMP is running however, so just
+ * leave interrupts on the boor CPU during boot.
+ */
+ if (ie->ie_cpu != cpu && smp_started) {
+ cpu = ie->ie_cpu;
+ if (cpu == NOCPU)
+ sched_unbind(td);
+ else
+ sched_bind(td, cpu);
+ }
+#endif
thread_unlock(td);
}
}
@@ -1147,6 +1216,7 @@ ithread_loop(void *arg)
struct thread *td;
struct proc *p;
int priv;
+ u_char cpu;
td = curthread;
p = td->td_proc;
@@ -1157,6 +1227,7 @@ ithread_loop(void *arg)
("%s: ithread and proc linkage out of sync", __func__));
ie = ithd->it_event;
ie->ie_count = 0;
+ cpu = NOCPU;
/*
* As long as we have interrupts outstanding, go through the
@@ -1205,6 +1276,21 @@ ithread_loop(void *arg)
ie->ie_count = 0;
mi_switch(SW_VOL, NULL);
}
+
+#ifdef SMP
+ /*
+ * Ensure we are bound to the correct CPU. We can't
+ * move ithreads until SMP is running however, so just
+ * leave interrupts on the boor CPU during boot.
+ */
+ if (!priv && ie->ie_cpu != cpu && smp_started) {
+ cpu = ie->ie_cpu;
+ if (cpu == NOCPU)
+ sched_unbind(td);
+ else
+ sched_bind(td, cpu);
+ }
+#endif
thread_unlock(td);
}
}
@@ -1440,6 +1526,8 @@ db_dump_intr_event(struct intr_event *ie, int handlers)
db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
else
db_printf("(no thread)");
+ if (ie->ie_cpu != NOCPU)
+ db_printf(" (CPU %d)", ie->ie_cpu);
if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
(it != NULL && it->it_need)) {
db_printf(" {");
diff --git a/sys/powerpc/powerpc/intr_machdep.c b/sys/powerpc/powerpc/intr_machdep.c
index 83805cf..e05195f 100644
--- a/sys/powerpc/powerpc/intr_machdep.c
+++ b/sys/powerpc/powerpc/intr_machdep.c
@@ -233,7 +233,7 @@ powerpc_setup_intr(const char *name, u_int irq, driver_filter_t filter,
#ifdef INTR_FILTER
powerpc_intr_eoi, powerpc_intr_mask,
#endif
- "irq%u:", irq);
+ NULL, "irq%u:", irq);
if (error)
return (error);
diff --git a/sys/sparc64/sparc64/intr_machdep.c b/sys/sparc64/sparc64/intr_machdep.c
index 735b4c0..091ed90 100644
--- a/sys/sparc64/sparc64/intr_machdep.c
+++ b/sys/sparc64/sparc64/intr_machdep.c
@@ -331,9 +331,9 @@ intr_controller_register(int vec, const struct intr_controller *ic,
*/
error = intr_event_create(&ie, iv, 0, intr_enable_eoi,
#ifdef INTR_FILTER
- ic->ic_eoi, ic->ic_disable, "vec%d:", vec);
+ ic->ic_eoi, ic->ic_disable, NULL, "vec%d:", vec);
#else
- "vec%d:", vec);
+ NULL, "vec%d:", vec);
#endif
if (error != 0)
return (error);
diff --git a/sys/sun4v/sun4v/intr_machdep.c b/sys/sun4v/sun4v/intr_machdep.c
index 6b9f1fb..80e9d90 100644
--- a/sys/sun4v/sun4v/intr_machdep.c
+++ b/sys/sun4v/sun4v/intr_machdep.c
@@ -359,7 +359,7 @@ inthand_add(const char *name, int vec, driver_filter_t *filt,
mtx_unlock_spin(&intr_table_lock);
if (ie == NULL) {
errcode = intr_event_create(&ie, (void *)(intptr_t)vec, 0, NULL,
- "vec%d:", vec);
+ NULL, "vec%d:", vec);
if (errcode)
return (errcode);
mtx_lock_spin(&intr_table_lock);
diff --git a/sys/sys/interrupt.h b/sys/sys/interrupt.h
index b110374..54cd0e4 100644
--- a/sys/sys/interrupt.h
+++ b/sys/sys/interrupt.h
@@ -73,6 +73,7 @@ struct intr_event {
void *ie_source; /* Cookie used by MD code. */
struct intr_thread *ie_thread; /* Thread we are connected to. */
void (*ie_enable)(void *);
+ int (*ie_assign_cpu)(void *, u_char);
#ifdef INTR_FILTER
void (*ie_eoi)(void *);
void (*ie_disab)(void *);
@@ -81,6 +82,7 @@ struct intr_event {
int ie_count; /* Loop counter. */
int ie_warncnt; /* Rate-check interrupt storm warns. */
struct timeval ie_warntm;
+ u_char ie_cpu; /* CPU this event is bound to. */
};
/* Interrupt event flags kept in ie_flags. */
@@ -127,15 +129,18 @@ u_char intr_priority(enum intr_type flags);
int intr_event_add_handler(struct intr_event *ie, const char *name,
driver_filter_t filter, driver_intr_t handler, void *arg,
u_char pri, enum intr_type flags, void **cookiep);
+int intr_event_bind(struct intr_event *ie, u_char cpu);
#ifndef INTR_FILTER
int intr_event_create(struct intr_event **event, void *source,
- int flags, void (*enable)(void *), const char *fmt, ...)
- __printflike(5, 6);
+ int flags, void (*enable)(void *),
+ int (*assign_cpu)(void *, u_char), const char *fmt, ...)
+ __printflike(6, 7);
#else
int intr_event_create(struct intr_event **event, void *source,
int flags, void (*enable)(void *), void (*eoi)(void *),
- void (*disab)(void *), const char *fmt, ...)
- __printflike(7, 8);
+ void (*disab)(void *), int (*assign_cpu)(void *, u_char),
+ const char *fmt, ...)
+ __printflike(8, 9);
#endif
int intr_event_destroy(struct intr_event *ie);
int intr_event_remove_handler(void *cookie);
OpenPOWER on IntegriCloud