summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev/uic.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev/uic.c')
-rw-r--r--arch/powerpc/sysdev/uic.c66
1 files changed, 64 insertions, 2 deletions
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 8905989..47180b3 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/prom.h>
@@ -142,7 +143,7 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (trigger)
+ if (!trigger)
desc->status |= IRQ_LEVEL;
spin_unlock_irqrestore(&uic->lock, flags);
@@ -159,6 +160,64 @@ static struct irq_chip uic_irq_chip = {
.set_type = uic_set_irq_type,
};
+/**
+ * handle_uic_irq - irq flow handler for UIC
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ *
+ * This is modified version of the generic handle_level_irq() suitable
+ * for the UIC. On the UIC, acking (i.e. clearing the SR bit) a level
+ * irq will have no effect if the interrupt is still asserted by the
+ * device, even if the interrupt is already masked. Therefore, unlike
+ * the standard handle_level_irq(), we must ack the interrupt *after*
+ * invoking the ISR (which should have de-asserted the interrupt in
+ * the external source). For edge interrupts we ack at the beginning
+ * instead of the end, to keep the window in which we can miss an
+ * interrupt as small as possible.
+ */
+void fastcall handle_uic_irq(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int cpu = smp_processor_id();
+ struct irqaction *action;
+ irqreturn_t action_ret;
+
+ spin_lock(&desc->lock);
+ if (desc->status & IRQ_LEVEL)
+ desc->chip->mask(irq);
+ else
+ desc->chip->mask_ack(irq);
+
+ if (unlikely(desc->status & IRQ_INPROGRESS))
+ goto out_unlock;
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+ kstat_cpu(cpu).irqs[irq]++;
+
+ /*
+ * If its disabled or no action available
+ * keep it masked and get out of here
+ */
+ action = desc->action;
+ if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
+ desc->status |= IRQ_PENDING;
+ goto out_unlock;
+ }
+
+ desc->status |= IRQ_INPROGRESS;
+ desc->status &= ~IRQ_PENDING;
+ spin_unlock(&desc->lock);
+
+ action_ret = handle_IRQ_event(irq, action);
+
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+ if (desc->status & IRQ_LEVEL)
+ desc->chip->ack(irq);
+ if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
+ desc->chip->unmask(irq);
+out_unlock:
+ spin_unlock(&desc->lock);
+}
+
static int uic_host_match(struct irq_host *h, struct device_node *node)
{
struct uic *uic = h->host_data;
@@ -173,7 +232,7 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
set_irq_chip_data(virq, uic);
/* Despite the name, handle_level_irq() works for both level
* and edge irqs on UIC. FIXME: check this is correct */
- set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
+ set_irq_chip_and_handler(virq, &uic_irq_chip, handle_uic_irq);
/* Set default irq type */
set_irq_type(virq, IRQ_TYPE_NONE);
@@ -207,6 +266,9 @@ irqreturn_t uic_cascade(int virq, void *data)
int subvirq;
msr = mfdcr(uic->dcrbase + UIC_MSR);
+ if (!msr) /* spurious interrupt */
+ return IRQ_HANDLED;
+
src = 32 - ffs(msr);
subvirq = irq_linear_revmap(uic->irqhost, src);
OpenPOWER on IntegriCloud