summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorpiso <piso@FreeBSD.org>2007-05-31 19:25:35 +0000
committerpiso <piso@FreeBSD.org>2007-05-31 19:25:35 +0000
commit42dfc7815053cf9eda064a398dd5f2e1efa583ca (patch)
tree970cc806d49c2592d85b73107b989311f414b03f /sys
parente12a0ce02fe36373a2610fcdbf80521f4613b504 (diff)
downloadFreeBSD-src-42dfc7815053cf9eda064a398dd5f2e1efa583ca.zip
FreeBSD-src-42dfc7815053cf9eda064a398dd5f2e1efa583ca.tar.gz
In some particular cases (like in pccard and pccbb), the real device
handler is wrapped in a couple of functions - a filter wrapper and an ithread wrapper. In this case (and just in this case), the filter wrapper could ask the system to schedule the ithread and mask the interrupt source if the wrapped handler is composed of just an ithread handler: modify the "old" interrupt code to make it support this situation, while the "new" interrupt code is already ok. Discussed with: jhb
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/intr_machdep.c15
-rw-r--r--sys/arm/arm/intr.c13
-rw-r--r--sys/i386/i386/intr_machdep.c25
-rw-r--r--sys/ia64/ia64/interrupt.c13
-rw-r--r--sys/kern/kern_intr.c3
-rw-r--r--sys/powerpc/powerpc/intr_machdep.c13
-rw-r--r--sys/sparc64/sparc64/intr_machdep.c13
-rw-r--r--sys/sun4v/sun4v/intr_machdep.c13
8 files changed, 89 insertions, 19 deletions
diff --git a/sys/amd64/amd64/intr_machdep.c b/sys/amd64/amd64/intr_machdep.c
index 2864868..6ed8c80 100644
--- a/sys/amd64/amd64/intr_machdep.c
+++ b/sys/amd64/amd64/intr_machdep.c
@@ -310,7 +310,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
struct thread *td;
struct intr_event *ie;
struct intr_handler *ih;
- int error, vector, thread;
+ int error, vector, thread, ret;
td = curthread;
@@ -356,6 +356,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
* a trapframe as its argument.
*/
td->td_intr_nesting_level++;
+ ret = 0;
thread = 0;
critical_enter();
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
@@ -367,9 +368,17 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
ih->ih_filter, ih->ih_argument == NULL ? frame :
ih->ih_argument, ih->ih_name);
if (ih->ih_argument == NULL)
- ih->ih_filter(frame);
+ ret = ih->ih_filter(frame);
else
- ih->ih_filter(ih->ih_argument);
+ ret = ih->ih_filter(ih->ih_argument);
+ /*
+ * Wrapper handler special case: see
+ * i386/intr_machdep.c::intr_execute_handlers()
+ */
+ if (!thread) {
+ if (ret == FILTER_SCHEDULE_THREAD)
+ thread = 1;
+ }
}
/*
diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c
index c5bbb51..8d573dc 100644
--- a/sys/arm/arm/intr.c
+++ b/sys/arm/arm/intr.c
@@ -104,7 +104,7 @@ arm_handler_execute(struct trapframe *frame, int irqnb)
struct intr_event *event;
struct intr_handler *ih;
struct thread *td = curthread;
- int i, thread;
+ int i, thread, ret;
PCPU_LAZY_INC(cnt.v_intr);
td->td_intr_nesting_level++;
@@ -116,13 +116,22 @@ arm_handler_execute(struct trapframe *frame, int irqnb)
continue;
/* Execute fast handlers. */
+ ret = 0;
thread = 0;
TAILQ_FOREACH(ih, &event->ie_handlers, ih_next) {
if (ih->ih_filter == NULL)
thread = 1;
else
- ih->ih_filter(ih->ih_argument ?
+ ret = ih->ih_filter(ih->ih_argument ?
ih->ih_argument : frame);
+ /*
+ * Wrapper handler special case: see
+ * i386/intr_machdep.c::intr_execute_handlers()
+ */
+ if (!thread) {
+ if (ret == FILTER_SCHEDULE_THREAD)
+ thread = 1;
+ }
}
/* Schedule thread if needed. */
diff --git a/sys/i386/i386/intr_machdep.c b/sys/i386/i386/intr_machdep.c
index 0fa44ad..65e67e2 100644
--- a/sys/i386/i386/intr_machdep.c
+++ b/sys/i386/i386/intr_machdep.c
@@ -301,7 +301,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
struct thread *td;
struct intr_event *ie;
struct intr_handler *ih;
- int error, vector, thread;
+ int error, vector, thread, ret;
td = curthread;
@@ -347,6 +347,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
* a trapframe as its argument.
*/
td->td_intr_nesting_level++;
+ ret = 0;
thread = 0;
critical_enter();
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
@@ -358,9 +359,27 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
ih->ih_filter, ih->ih_argument == NULL ? frame :
ih->ih_argument, ih->ih_name);
if (ih->ih_argument == NULL)
- ih->ih_filter(frame);
+ ret = ih->ih_filter(frame);
else
- ih->ih_filter(ih->ih_argument);
+ ret = ih->ih_filter(ih->ih_argument);
+ /*
+ * Wrapper handler special handling:
+ *
+ * in some particular cases (like pccard and pccbb),
+ * the _real_ device handler is wrapped in a couple of
+ * functions - a filter wrapper and an ithread wrapper.
+ * In this case (and just in this case), the filter wrapper
+ * could ask the system to schedule the ithread and mask
+ * the interrupt source if the wrapped handler is composed
+ * of just an ithread handler.
+ *
+ * TODO: write a generic wrapper to avoid people rolling
+ * their own
+ */
+ if (!thread) {
+ if (ret == FILTER_SCHEDULE_THREAD)
+ thread = 1;
+ }
}
/*
diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c
index 61d1bdc..8cf18b4 100644
--- a/sys/ia64/ia64/interrupt.c
+++ b/sys/ia64/ia64/interrupt.c
@@ -353,7 +353,7 @@ ia64_dispatch_intr(void *frame, unsigned long vector)
struct ia64_intr *i;
struct intr_event *ie; /* our interrupt event */
struct intr_handler *ih;
- int error, thread;
+ int error, thread, ret;
/*
* Find the interrupt thread for this vector.
@@ -379,6 +379,7 @@ ia64_dispatch_intr(void *frame, unsigned long vector)
* Execute all fast interrupt handlers directly without Giant. Note
* that this means that any fast interrupt handler must be MP safe.
*/
+ ret = 0;
thread = 0;
critical_enter();
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
@@ -388,7 +389,15 @@ ia64_dispatch_intr(void *frame, unsigned long vector)
}
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
ih->ih_filter, ih->ih_argument, ih->ih_name);
- ih->ih_filter(ih->ih_argument);
+ ret = ih->ih_filter(ih->ih_argument);
+ /*
+ * Wrapper handler special case: see
+ * i386/intr_machdep.c::intr_execute_handlers()
+ */
+ if (!thread) {
+ if (ret == FILTER_SCHEDULE_THREAD)
+ thread = 1;
+ }
}
critical_exit();
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index a9adaa1..aa79bfa 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -1264,9 +1264,6 @@ intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
continue;
}
- KASSERT(ret != FILTER_SCHEDULE_THREAD,
- ("intr_filter_loop: FILTER_SCHEDULE_THREAD from filter"));
-
if (ret & FILTER_STRAY)
continue;
else {
diff --git a/sys/powerpc/powerpc/intr_machdep.c b/sys/powerpc/powerpc/intr_machdep.c
index f493147..287df91 100644
--- a/sys/powerpc/powerpc/intr_machdep.c
+++ b/sys/powerpc/powerpc/intr_machdep.c
@@ -198,7 +198,7 @@ intr_handle(u_int irq)
struct ppc_intr *i;
struct intr_event *ie;
struct intr_handler *ih;
- int error, sched;
+ int error, sched, ret;
i = ppc_intrs[irq];
if (i == NULL)
@@ -216,6 +216,7 @@ intr_handle(u_int irq)
* Execute all fast interrupt handlers directly without Giant. Note
* that this means that any fast interrupt handler must be MP safe.
*/
+ ret = 0;
sched = 0;
critical_enter();
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
@@ -225,7 +226,15 @@ intr_handle(u_int irq)
}
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
ih->ih_filter, ih->ih_argument, ih->ih_name);
- ih->ih_filter(ih->ih_argument);
+ ret = ih->ih_filter(ih->ih_argument);
+ /*
+ * Wrapper handler special case: see
+ * i386/intr_machdep.c::intr_execute_handlers()
+ */
+ if (!sched) {
+ if (ret == FILTER_SCHEDULE_THREAD)
+ sched = 1;
+ }
}
critical_exit();
diff --git a/sys/sparc64/sparc64/intr_machdep.c b/sys/sparc64/sparc64/intr_machdep.c
index 66c9ab7..66b285e 100644
--- a/sys/sparc64/sparc64/intr_machdep.c
+++ b/sys/sparc64/sparc64/intr_machdep.c
@@ -236,7 +236,7 @@ intr_execute_handlers(void *cookie)
struct intr_vector *iv;
struct intr_event *ie;
struct intr_handler *ih;
- int error, thread;
+ int error, thread, ret;
iv = cookie;
ie = iv->iv_event;
@@ -246,6 +246,7 @@ intr_execute_handlers(void *cookie)
}
/* Execute fast interrupt handlers directly. */
+ ret = 0;
thread = 0;
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (ih->ih_filter == NULL) {
@@ -255,7 +256,15 @@ intr_execute_handlers(void *cookie)
MPASS(ih->ih_filter != NULL && ih->ih_argument != NULL);
CTR3(KTR_INTR, "%s: executing handler %p(%p)", __func__,
ih->ih_filter, ih->ih_argument);
- ih->ih_filter(ih->ih_argument);
+ ret = ih->ih_filter(ih->ih_argument);
+ /*
+ * Wrapper handler special case: see
+ * i386/intr_machdep.c::intr_execute_handlers()
+ */
+ if (!thread) {
+ if (ret == FILTER_SCHEDULE_THREAD)
+ thread = 1;
+ }
}
/* Schedule a heavyweight interrupt process. */
diff --git a/sys/sun4v/sun4v/intr_machdep.c b/sys/sun4v/sun4v/intr_machdep.c
index 559bf8f..6b9f1fb 100644
--- a/sys/sun4v/sun4v/intr_machdep.c
+++ b/sys/sun4v/sun4v/intr_machdep.c
@@ -283,7 +283,7 @@ intr_execute_handlers(void *cookie)
struct intr_vector *iv;
struct intr_event *ie;
struct intr_handler *ih;
- int fast, thread;
+ int fast, thread, ret;
iv = cookie;
ie = iv->iv_event;
@@ -292,6 +292,7 @@ intr_execute_handlers(void *cookie)
return;
}
+ ret = 0;
fast = thread = 0;
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (ih->ih_filter == NULL) {
@@ -301,8 +302,16 @@ intr_execute_handlers(void *cookie)
MPASS(ih->ih_filter != NULL && ih->ih_argument != NULL);
CTR3(KTR_INTR, "%s: executing handler %p(%p)", __func__,
ih->ih_filter, ih->ih_argument);
- ih->ih_filter(ih->ih_argument);
+ ret = ih->ih_filter(ih->ih_argument);
fast = 1;
+ /*
+ * Wrapper handler special case: see
+ * i386/intr_machdep.c::intr_execute_handlers()
+ */
+ if (!thread) {
+ if (ret == FILTER_SCHEDULE_THREAD)
+ thread = 1;
+ }
}
/* Schedule a heavyweight interrupt process. */
OpenPOWER on IntegriCloud