summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-03-28 16:13:24 +0200
committerThomas Gleixner <tglx@linutronix.de>2011-03-28 16:55:11 +0200
commit0521c8fbb3da45c2a58cd551ca6e9644983f6028 (patch)
treed2ed3452a75f1d3ff516cd02c86f4371db81e06e
parent32f4125ebffee4f3c4dbc6a437fc656129eb9e60 (diff)
downloadop-kernel-dev-0521c8fbb3da45c2a58cd551ca6e9644983f6028.zip
op-kernel-dev-0521c8fbb3da45c2a58cd551ca6e9644983f6028.tar.gz
genirq: Provide edge_eoi flow handler
This is a replacment for the cell flow handler which is in the way of cleanups. Must be selected to avoid general bloat. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/irq.h1
-rw-r--r--kernel/irq/Kconfig4
-rw-r--r--kernel/irq/chip.c45
3 files changed, 50 insertions, 0 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 18aaccc..44ebca7 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -423,6 +423,7 @@ extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 00f2c03..72606ba 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -51,6 +51,10 @@ config HARDIRQS_SW_RESEND
config IRQ_PREFLOW_FASTEOI
bool
+# Edge style eoi based handler (cell)
+config IRQ_EDGE_EOI_HANDLER
+ bool
+
# Support forced irq threading
config IRQ_FORCED_THREADING
bool
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index e00bdc5..451d1e8 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -604,6 +604,51 @@ out_unlock:
raw_spin_unlock(&desc->lock);
}
+#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
+/**
+ * handle_edge_eoi_irq - edge eoi type IRQ handler
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ *
+ * Similar as the above handle_edge_irq, but using eoi and w/o the
+ * mask/unmask logic.
+ */
+void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ raw_spin_lock(&desc->lock);
+
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+ /*
+ * If we're currently running this IRQ, or its disabled,
+ * we shouldn't process the IRQ. Mark it pending, handle
+ * the necessary masking and go out
+ */
+ if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
+ irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
+ if (!irq_check_poll(desc)) {
+ desc->istate |= IRQS_PENDING;
+ goto out_eoi;
+ }
+ }
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ do {
+ if (unlikely(!desc->action))
+ goto out_eoi;
+
+ handle_irq_event(desc);
+
+ } while ((desc->istate & IRQS_PENDING) &&
+ !irqd_irq_disabled(&desc->irq_data));
+
+out_unlock:
+ chip->irq_eoi(&desc->irq_data);
+ raw_spin_unlock(&desc->lock);
+}
+#endif
+
/**
* handle_percpu_irq - Per CPU local irq handler
* @irq: the interrupt number
OpenPOWER on IntegriCloud