summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/irq.h1
-rw-r--r--kernel/irq/chip.c14
-rw-r--r--kernel/irq/manage.c49
4 files changed, 61 insertions, 7 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 35e7df1..1ac57e5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -50,6 +50,9 @@
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
* registered first in an shared interrupt is considered for
* performance reasons)
+ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
+ * Used by threaded interrupts which need to keep the
+ * irq line disabled until the threaded handler has been run.
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -59,6 +62,7 @@
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
+#define IRQF_ONESHOT 0x00002000
/*
* Bits used by threaded handlers:
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cb2e77a..5e7c6ee 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -69,6 +69,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
+#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 13c68e7..b08c0d2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -382,7 +382,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
- if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
+
+ if (unlikely(desc->status & IRQ_ONESHOT))
+ desc->status |= IRQ_MASKED;
+ else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
spin_unlock(&desc->lock);
@@ -478,8 +481,13 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
- if (desc->chip->ack)
- desc->chip->ack(irq);
+ if (unlikely(desc->status & IRQ_ONESHOT)) {
+ desc->status |= IRQ_MASKED;
+ mask_ack_irq(desc, irq);
+ } else {
+ if (desc->chip->ack)
+ desc->chip->ack(irq);
+ }
/* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index d222515..d7f7b5f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -436,6 +436,16 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return ret;
}
+/*
+ * Default primary interrupt handler for threaded interrupts. Is
+ * assigned as primary handler when request_threaded_irq is called
+ * with handler == NULL. Useful for oneshot interrupts.
+ */
+static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
+{
+ return IRQ_WAKE_THREAD;
+}
+
static int irq_wait_for_interrupt(struct irqaction *action)
{
while (!kthread_should_stop()) {
@@ -451,6 +461,21 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return -1;
}
+/*
+ * Oneshot interrupts keep the irq line masked until the threaded
+ * handler finished. unmask if the interrupt has not been disabled and
+ * is marked MASKED.
+ */
+static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
+{
+ spin_lock_irq(&desc->lock);
+ if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
+ desc->status &= ~IRQ_MASKED;
+ desc->chip->unmask(irq);
+ }
+ spin_unlock_irq(&desc->lock);
+}
+
#ifdef CONFIG_SMP
/*
* Check whether we need to change the affinity of the interrupt thread.
@@ -492,7 +517,7 @@ static int irq_thread(void *data)
struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
struct irqaction *action = data;
struct irq_desc *desc = irq_to_desc(action->irq);
- int wake;
+ int wake, oneshot = desc->status & IRQ_ONESHOT;
sched_setscheduler(current, SCHED_FIFO, &param);
current->irqaction = action;
@@ -518,6 +543,9 @@ static int irq_thread(void *data)
spin_unlock_irq(&desc->lock);
action->thread_fn(action->irq, action->dev_id);
+
+ if (oneshot)
+ irq_finalize_oneshot(action->irq, desc);
}
wake = atomic_dec_and_test(&desc->threads_active);
@@ -590,6 +618,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
rand_initialize_irq(irq);
}
+ /* Oneshot interrupts are not allowed with shared */
+ if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
+ return -EINVAL;
+
/*
* Threaded handler ?
*/
@@ -663,9 +695,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
desc->status |= IRQ_PER_CPU;
#endif
- desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
+ desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
+ if (new->flags & IRQF_ONESHOT)
+ desc->status |= IRQ_ONESHOT;
+
if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
@@ -878,6 +913,8 @@ EXPORT_SYMBOL(free_irq);
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Primary handler for threaded interrupts
+ * If NULL and thread_fn != NULL the default
+ * primary handler is installed
* @thread_fn: Function called from the irq handler thread
* If NULL, no irq thread is created
* @irqflags: Interrupt type flags
@@ -957,8 +994,12 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
if (desc->status & IRQ_NOREQUEST)
return -EINVAL;
- if (!handler)
- return -EINVAL;
+
+ if (!handler) {
+ if (!thread_fn)
+ return -EINVAL;
+ handler = irq_default_primary_handler;
+ }
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
OpenPOWER on IntegriCloud