summaryrefslogtreecommitdiffstats
path: root/sys/alpha
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2005-10-25 19:48:48 +0000
committerjhb <jhb@FreeBSD.org>2005-10-25 19:48:48 +0000
commite20e5c07ce4cea9d83ea29f37035a800261a5025 (patch)
treec0e88867c475b0bb7f3b0c1dc5799cf5ad98997f /sys/alpha
parentb5662e2acad3b391e8117ba16eaca7a1cb878179 (diff)
downloadFreeBSD-src-e20e5c07ce4cea9d83ea29f37035a800261a5025.zip
FreeBSD-src-e20e5c07ce4cea9d83ea29f37035a800261a5025.tar.gz
Reorganize the interrupt handling code a bit to make a few things cleaner
and increase flexibility to allow various different approaches to be tried in the future. - Split struct ithd up into two pieces. struct intr_event holds the list of interrupt handlers associated with interrupt sources. struct intr_thread contains the data relative to an interrupt thread. Currently we still provide a 1:1 relationship of events to threads with the exception that events only have an associated thread if there is at least one threaded interrupt handler attached to the event. This means that on x86 we no longer have 4 bazillion interrupt threads with no handlers. It also means that interrupt events with only INTR_FAST handlers no longer have an associated thread either. - Renamed struct intrhand to struct intr_handler to follow the struct intr_foo naming convention. This did require renaming the powerpc MD struct intr_handler to struct ppc_intr_handler. - INTR_FAST no longer implies INTR_EXCL on all architectures except for powerpc. This means that multiple INTR_FAST handlers can attach to the same interrupt and that INTR_FAST and non-INTR_FAST handlers can attach to the same interrupt. Sharing INTR_FAST handlers may not always be desirable, but having sio(4) and uhci(4) fight over an IRQ isn't fun either. Drivers can always still use INTR_EXCL to ask for an interrupt exclusively. The way this sharing works is that when an interrupt comes in, all the INTR_FAST handlers are executed first, and if any threaded handlers exist, the interrupt thread is scheduled afterwards. This type of layout also makes it possible to investigate using interrupt filters ala OS X where the filter determines whether or not its companion threaded handler should run. - Aside from the INTR_FAST changes above, the impact on MD interrupt code is mostly just 's/ithread/intr_event/'. - A new MI ddb command 'show intrs' walks the list of interrupt events dumping their state. It also has a '/v' verbose switch which dumps info about all of the handlers attached to each event. - We currently don't destroy an interrupt thread when the last threaded handler is removed because it would suck for things like ppbus(8)'s braindead behavior. The code is present, though, it is just under #if 0 for now. - Move the code to actually execute the threaded handlers for an interrrupt event into a separate function so that ithread_loop() becomes more readable. Previously this code was all in the middle of ithread_loop() and indented halfway across the screen. - Made struct intr_thread private to kern_intr.c and replaced td_ithd with a thread private flag TDP_ITHREAD. - In statclock, check curthread against idlethread directly rather than curthread's proc against idlethread's proc. (Not really related to intr changes) Tested on: alpha, amd64, i386, sparc64 Tested on: arm, ia64 (older version of patch by cognet and marcel)
Diffstat (limited to 'sys/alpha')
-rw-r--r--sys/alpha/alpha/interrupt.c72
-rw-r--r--sys/alpha/isa/isa.c10
2 files changed, 48 insertions, 34 deletions
diff --git a/sys/alpha/alpha/interrupt.c b/sys/alpha/alpha/interrupt.c
index 6f812bb..b4a7500 100644
--- a/sys/alpha/alpha/interrupt.c
+++ b/sys/alpha/alpha/interrupt.c
@@ -325,8 +325,9 @@ LIST_HEAD(alpha_intr_list, alpha_intr);
struct alpha_intr {
LIST_ENTRY(alpha_intr) list; /* chain handlers in this hash bucket */
uintptr_t vector; /* vector to match */
- struct ithd *ithd; /* interrupt thread */
+ struct intr_event *ie; /* interrupt event structure */
volatile long *cntp; /* interrupt counter */
+ void (*disable)(uintptr_t);
};
static struct mtx alpha_intr_hash_lock;
@@ -338,7 +339,7 @@ static void
ithds_init(void *dummy)
{
- mtx_init(&alpha_intr_hash_lock, "ithread table lock", NULL, MTX_SPIN);
+ mtx_init(&alpha_intr_hash_lock, "intr table", NULL, MTX_SPIN);
}
SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
@@ -371,8 +372,9 @@ alpha_setup_intr(const char *name, uintptr_t vector, driver_intr_t handler, void
return ENOMEM;
i->vector = vector;
i->cntp = cntp;
- errcode = ithread_create(&i->ithd, vector, 0, disable, enable,
- "intr:");
+ i->disable = disable;
+ errcode = intr_event_create(&i->ie, (void *)vector, 0,
+ (void (*)(void *))enable, "intr:");
if (errcode) {
free(i, M_DEVBUF);
return errcode;
@@ -384,44 +386,49 @@ alpha_setup_intr(const char *name, uintptr_t vector, driver_intr_t handler, void
}
/* Second, add this handler. */
- return (ithread_add_handler(i->ithd, name, handler, arg,
- ithread_priority(flags), flags, cookiep));
+ return (intr_event_add_handler(i->ie, name, handler, arg,
+ intr_priority(flags), flags, cookiep));
}
int
alpha_teardown_intr(void *cookie)
{
- return (ithread_remove_handler(cookie));
+ return (intr_event_remove_handler(cookie));
}
+/*
+ * XXX: Alpha doesn't count stray interrupts like some of the other archs.
+ */
void
alpha_dispatch_intr(void *frame, unsigned long vector)
{
int h = HASHVEC(vector);
struct alpha_intr *i;
- struct ithd *ithd; /* our interrupt thread */
- struct intrhand *ih;
- int error;
+ struct intr_event *ie;
+ struct intr_handler *ih;
+ int error, thread;
/*
* Walk the hash bucket for this vector looking for this vector's
- * interrupt thread.
+ * interrupt structure.
*/
for (i = LIST_FIRST(&alpha_intr_hash[h]); i && i->vector != vector;
i = LIST_NEXT(i, list))
; /* nothing */
+
+ /* No interrupt structure for this vector. */
if (i == NULL)
- return; /* no ithread for this vector */
+ return;
- ithd = i->ithd;
- KASSERT(ithd != NULL, ("interrupt vector without a thread"));
+ ie = i->ie;
+ KASSERT(ie != NULL, ("interrupt structure without an event"));
/*
- * As an optimization, if an ithread has no handlers, don't
+ * As an optimization, if an event has no handlers, don't
* schedule it to run.
*/
- if (TAILQ_EMPTY(&ithd->it_handlers))
+ if (TAILQ_EMPTY(&ie->ie_handlers))
return;
atomic_add_long(i->cntp, 1);
@@ -433,25 +440,32 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
*/
sched_pin();
+ /* Execute all fast interrupt handlers directly. */
+ thread = 0;
+ critical_enter();
+ TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
+ if (!(ih->ih_flags & IH_FAST)) {
+ thread = 1;
+ continue;
+ }
+ CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
+ ih->ih_handler, ih->ih_argument, ih->ih_name);
+ ih->ih_handler(ih->ih_argument);
+ }
+ critical_exit();
+
/*
- * Handle a fast interrupt if there is no actual thread for this
- * interrupt by calling the handler directly without Giant. Note
- * that this means that any fast interrupt handler must be MP safe.
+ * If the ithread needs to run, disable the source and schedule the
+ * thread.
*/
- ih = TAILQ_FIRST(&ithd->it_handlers);
- if ((ih->ih_flags & IH_FAST) != 0) {
- critical_enter();
- ih->ih_handler(ih->ih_argument);
- critical_exit();
- } else {
- if (ithd->it_disable) {
+ if (thread) {
+ if (i->disable) {
CTR1(KTR_INTR,
"alpha_dispatch_intr: disabling vector 0x%x",
i->vector);
- ithd->it_disable(ithd->it_vector);
+ i->disable(i->vector);
}
-
- error = ithread_schedule(ithd);
+ error = intr_event_schedule_thread(ie);
KASSERT(error == 0, ("got an impossible stray interrupt"));
}
sched_unpin();
diff --git a/sys/alpha/isa/isa.c b/sys/alpha/isa/isa.c
index 5675c99..263b6bb 100644
--- a/sys/alpha/isa/isa.c
+++ b/sys/alpha/isa/isa.c
@@ -389,14 +389,14 @@ isa_teardown_intr(device_t dev, device_t child,
struct resource *irq, void *cookie)
{
struct isa_intr *ii = cookie;
- struct intrhand *ih, *handler = (struct intrhand *)ii->ih;
- struct ithd *ithread = handler->ih_ithread;
+ struct intr_handler *ih, *handler = (struct intr_handler *)ii->ih;
+ struct intr_event *ie = handler->ih_event;
int num_handlers = 0;
- mtx_lock(&ithread->it_lock);
- TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
+ mtx_lock(&ie->ie_lock);
+ TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
num_handlers++;
- mtx_unlock(&ithread->it_lock);
+ mtx_unlock(&ie->ie_lock);
/*
* Only disable the interrupt in hardware if there are no
OpenPOWER on IntegriCloud