diff options
author | Timothy Pearson <tpearson@raptorengineering.com> | 2017-08-23 14:45:25 -0500 |
---|---|---|
committer | Timothy Pearson <tpearson@raptorengineering.com> | 2017-08-23 14:45:25 -0500 |
commit | fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204 (patch) | |
tree | 22962a4387943edc841c72a4e636a068c66d58fd /kernel/irq | |
download | ast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.zip ast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.tar.gz |
Initial import of modified Linux 2.6.28 tree
Original upstream URL:
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git | branch linux-2.6.28.y
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Makefile | 5 | ||||
-rw-r--r-- | kernel/irq/autoprobe.c | 191 | ||||
-rw-r--r-- | kernel/irq/chip.c | 631 | ||||
-rw-r--r-- | kernel/irq/devres.c | 90 | ||||
-rw-r--r-- | kernel/irq/handle.c | 277 | ||||
-rw-r--r-- | kernel/irq/internals.h | 69 | ||||
-rw-r--r-- | kernel/irq/manage.c | 736 | ||||
-rw-r--r-- | kernel/irq/migration.c | 64 | ||||
-rw-r--r-- | kernel/irq/proc.c | 249 | ||||
-rw-r--r-- | kernel/irq/resend.c | 82 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 300 |
11 files changed, 2694 insertions, 0 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile new file mode 100644 index 0000000..681c52d --- /dev/null +++ b/kernel/irq/Makefile @@ -0,0 +1,5 @@ + +obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o +obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o +obj-$(CONFIG_PROC_FS) += proc.o +obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c new file mode 100644 index 0000000..cc0f732 --- /dev/null +++ b/kernel/irq/autoprobe.c @@ -0,0 +1,191 @@ +/* + * linux/kernel/irq/autoprobe.c + * + * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar + * + * This file contains the interrupt probing code and driver APIs. + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> + +#include "internals.h" + +/* + * Autodetection depends on the fact that any interrupt that + * comes in on to an unassigned handler will get stuck with + * "IRQ_WAITING" cleared and the interrupt disabled. + */ +static DEFINE_MUTEX(probing_active); + +/** + * probe_irq_on - begin an interrupt autodetect + * + * Commence probing for an interrupt. The interrupts are scanned + * and a mask of potential interrupt lines is returned. + * + */ +unsigned long probe_irq_on(void) +{ + struct irq_desc *desc; + unsigned long mask = 0; + unsigned int status; + int i; + + mutex_lock(&probing_active); + /* + * something may have generated an irq long ago and we want to + * flush such a longstanding irq before considering it as spurious. + */ + for_each_irq_desc_reverse(i, desc) { + spin_lock_irq(&desc->lock); + if (!desc->action && !(desc->status & IRQ_NOPROBE)) { + /* + * An old-style architecture might still have + * the handle_bad_irq handler there: + */ + compat_irq_chip_set_default_handler(desc); + + /* + * Some chips need to know about probing in + * progress: + */ + if (desc->chip->set_type) + desc->chip->set_type(i, IRQ_TYPE_PROBE); + desc->chip->startup(i); + } + spin_unlock_irq(&desc->lock); + } + + /* Wait for longstanding interrupts to trigger. */ + msleep(20); + + /* + * enable any unassigned irqs + * (we must startup again here because if a longstanding irq + * happened in the previous stage, it may have masked itself) + */ + for_each_irq_desc_reverse(i, desc) { + spin_lock_irq(&desc->lock); + if (!desc->action && !(desc->status & IRQ_NOPROBE)) { + desc->status |= IRQ_AUTODETECT | IRQ_WAITING; + if (desc->chip->startup(i)) + desc->status |= IRQ_PENDING; + } + spin_unlock_irq(&desc->lock); + } + + /* + * Wait for spurious interrupts to trigger + */ + msleep(100); + + /* + * Now filter out any obviously spurious interrupts + */ + for_each_irq_desc(i, desc) { + spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + /* It triggered already - consider it spurious. */ + if (!(status & IRQ_WAITING)) { + desc->status = status & ~IRQ_AUTODETECT; + desc->chip->shutdown(i); + } else + if (i < 32) + mask |= 1 << i; + } + spin_unlock_irq(&desc->lock); + } + + return mask; +} +EXPORT_SYMBOL(probe_irq_on); + +/** + * probe_irq_mask - scan a bitmap of interrupt lines + * @val: mask of interrupts to consider + * + * Scan the interrupt lines and return a bitmap of active + * autodetect interrupts. The interrupt probe logic state + * is then returned to its previous value. + * + * Note: we need to scan all the irq's even though we will + * only return autodetect irq numbers - just so that we reset + * them all to a known state. + */ +unsigned int probe_irq_mask(unsigned long val) +{ + unsigned int status, mask = 0; + struct irq_desc *desc; + int i; + + for_each_irq_desc(i, desc) { + spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + if (i < 16 && !(status & IRQ_WAITING)) + mask |= 1 << i; + + desc->status = status & ~IRQ_AUTODETECT; + desc->chip->shutdown(i); + } + spin_unlock_irq(&desc->lock); + } + mutex_unlock(&probing_active); + + return mask & val; +} +EXPORT_SYMBOL(probe_irq_mask); + +/** + * probe_irq_off - end an interrupt autodetect + * @val: mask of potential interrupts (unused) + * + * Scans the unused interrupt lines and returns the line which + * appears to have triggered the interrupt. If no interrupt was + * found then zero is returned. If more than one interrupt is + * found then minus the first candidate is returned to indicate + * their is doubt. + * + * The interrupt probe logic state is returned to its previous + * value. + * + * BUGS: When used in a module (which arguably shouldn't happen) + * nothing prevents two IRQ probe callers from overlapping. The + * results of this are non-optimal. + */ +int probe_irq_off(unsigned long val) +{ + int i, irq_found = 0, nr_of_irqs = 0; + struct irq_desc *desc; + unsigned int status; + + for_each_irq_desc(i, desc) { + spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + if (!(status & IRQ_WAITING)) { + if (!nr_of_irqs) + irq_found = i; + nr_of_irqs++; + } + desc->status = status & ~IRQ_AUTODETECT; + desc->chip->shutdown(i); + } + spin_unlock_irq(&desc->lock); + } + mutex_unlock(&probing_active); + + if (nr_of_irqs > 1) + irq_found = -irq_found; + + return irq_found; +} +EXPORT_SYMBOL(probe_irq_off); + diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c new file mode 100644 index 0000000..10b5092 --- /dev/null +++ b/kernel/irq/chip.c @@ -0,0 +1,631 @@ +/* + * linux/kernel/irq/chip.c + * + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King + * + * This file contains the core interrupt handling code, for irq-chip + * based architectures. + * + * Detailed information is available in Documentation/DocBook/genericirq + */ + +#include <linux/irq.h> +#include <linux/msi.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> + +#include "internals.h" + +/** + * dynamic_irq_init - initialize a dynamically allocated irq + * @irq: irq number to initialize + */ +void dynamic_irq_init(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); + return; + } + + /* Ensure we don't have left over values from a previous use of this irq */ + spin_lock_irqsave(&desc->lock, flags); + desc->status = IRQ_DISABLED; + desc->chip = &no_irq_chip; + desc->handle_irq = handle_bad_irq; + desc->depth = 1; + desc->msi_desc = NULL; + desc->handler_data = NULL; + desc->chip_data = NULL; + desc->action = NULL; + desc->irq_count = 0; + desc->irqs_unhandled = 0; +#ifdef CONFIG_SMP + cpus_setall(desc->affinity); +#endif + spin_unlock_irqrestore(&desc->lock, flags); +} + +/** + * dynamic_irq_cleanup - cleanup a dynamically allocated irq + * @irq: irq number to initialize + */ +void dynamic_irq_cleanup(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); + return; + } + + spin_lock_irqsave(&desc->lock, flags); + if (desc->action) { + spin_unlock_irqrestore(&desc->lock, flags); + WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", + irq); + return; + } + desc->msi_desc = NULL; + desc->handler_data = NULL; + desc->chip_data = NULL; + desc->handle_irq = handle_bad_irq; + desc->chip = &no_irq_chip; + desc->name = NULL; + spin_unlock_irqrestore(&desc->lock, flags); +} + + +/** + * set_irq_chip - set the irq chip for an irq + * @irq: irq number + * @chip: pointer to irq chip description structure + */ +int set_irq_chip(unsigned int irq, struct irq_chip *chip) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); + return -EINVAL; + } + + if (!chip) + chip = &no_irq_chip; + + spin_lock_irqsave(&desc->lock, flags); + irq_chip_set_defaults(chip); + desc->chip = chip; + spin_unlock_irqrestore(&desc->lock, flags); + + return 0; +} +EXPORT_SYMBOL(set_irq_chip); + +/** + * set_irq_type - set the irq trigger type for an irq + * @irq: irq number + * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h + */ +int set_irq_type(unsigned int irq, unsigned int type) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + int ret = -ENXIO; + + if (!desc) { + printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); + return -ENODEV; + } + + if (type == IRQ_TYPE_NONE) + return 0; + + spin_lock_irqsave(&desc->lock, flags); + ret = __irq_set_trigger(desc, irq, type); + spin_unlock_irqrestore(&desc->lock, flags); + return ret; +} +EXPORT_SYMBOL(set_irq_type); + +/** + * set_irq_data - set irq type data for an irq + * @irq: Interrupt number + * @data: Pointer to interrupt specific data + * + * Set the hardware irq controller data for an irq + */ +int set_irq_data(unsigned int irq, void *data) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR + "Trying to install controller data for IRQ%d\n", irq); + return -EINVAL; + } + + spin_lock_irqsave(&desc->lock, flags); + desc->handler_data = data; + spin_unlock_irqrestore(&desc->lock, flags); + return 0; +} +EXPORT_SYMBOL(set_irq_data); + +/** + * set_irq_data - set irq type data for an irq + * @irq: Interrupt number + * @entry: Pointer to MSI descriptor data + * + * Set the hardware irq controller data for an irq + */ +int set_irq_msi(unsigned int irq, struct msi_desc *entry) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR + "Trying to install msi data for IRQ%d\n", irq); + return -EINVAL; + } + + spin_lock_irqsave(&desc->lock, flags); + desc->msi_desc = entry; + if (entry) + entry->irq = irq; + spin_unlock_irqrestore(&desc->lock, flags); + return 0; +} + +/** + * set_irq_chip_data - set irq chip data for an irq + * @irq: Interrupt number + * @data: Pointer to chip specific data + * + * Set the hardware irq chip data for an irq + */ +int set_irq_chip_data(unsigned int irq, void *data) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR + "Trying to install chip data for IRQ%d\n", irq); + return -EINVAL; + } + + if (!desc->chip) { + printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); + return -EINVAL; + } + + spin_lock_irqsave(&desc->lock, flags); + desc->chip_data = data; + spin_unlock_irqrestore(&desc->lock, flags); + + return 0; +} +EXPORT_SYMBOL(set_irq_chip_data); + +/* + * default enable function + */ +static void default_enable(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + desc->chip->unmask(irq); + desc->status &= ~IRQ_MASKED; +} + +/* + * default disable function + */ +static void default_disable(unsigned int irq) +{ +} + +/* + * default startup function + */ +static unsigned int default_startup(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + desc->chip->enable(irq); + return 0; +} + +/* + * default shutdown function + */ +static void default_shutdown(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + desc->chip->mask(irq); + desc->status |= IRQ_MASKED; +} + +/* + * Fixup enable/disable function pointers + */ +void irq_chip_set_defaults(struct irq_chip *chip) +{ + if (!chip->enable) + chip->enable = default_enable; + if (!chip->disable) + chip->disable = default_disable; + if (!chip->startup) + chip->startup = default_startup; + /* + * We use chip->disable, when the user provided its own. When + * we have default_disable set for chip->disable, then we need + * to use default_shutdown, otherwise the irq line is not + * disabled on free_irq(): + */ + if (!chip->shutdown) + chip->shutdown = chip->disable != default_disable ? + chip->disable : default_shutdown; + if (!chip->name) + chip->name = chip->typename; + if (!chip->end) + chip->end = dummy_irq_chip.end; +} + +static inline void mask_ack_irq(struct irq_desc *desc, int irq) +{ + if (desc->chip->mask_ack) + desc->chip->mask_ack(irq); + else { + desc->chip->mask(irq); + desc->chip->ack(irq); + } +} + +/** + * handle_simple_irq - Simple and software-decoded IRQs. + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Simple interrupts are either sent from a demultiplexing interrupt + * handler or come from hardware, where no interrupt hardware control + * is necessary. + * + * Note: The caller is expected to handle the ack, clear, mask and + * unmask issues if necessary. + */ +void +handle_simple_irq(unsigned int irq, struct irq_desc *desc) +{ + struct irqaction *action; + irqreturn_t action_ret; + + spin_lock(&desc->lock); + + if (unlikely(desc->status & IRQ_INPROGRESS)) + goto out_unlock; + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + kstat_incr_irqs_this_cpu(irq, desc); + + action = desc->action; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) + goto out_unlock; + + desc->status |= IRQ_INPROGRESS; + spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + spin_lock(&desc->lock); + desc->status &= ~IRQ_INPROGRESS; +out_unlock: + spin_unlock(&desc->lock); +} + +/** + * handle_level_irq - Level type irq handler + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Level type interrupts are active as long as the hardware line has + * the active level. This may require to mask the interrupt and unmask + * it after the associated handler has acknowledged the device, so the + * interrupt line is back to inactive. + */ +void +handle_level_irq(unsigned int irq, struct irq_desc *desc) +{ + struct irqaction *action; + irqreturn_t action_ret; + + spin_lock(&desc->lock); + mask_ack_irq(desc, irq); + + if (unlikely(desc->status & IRQ_INPROGRESS)) + goto out_unlock; + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + kstat_incr_irqs_this_cpu(irq, desc); + + /* + * If its disabled or no action available + * keep it masked and get out of here + */ + action = desc->action; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) + goto out_unlock; + + desc->status |= IRQ_INPROGRESS; + spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + spin_lock(&desc->lock); + desc->status &= ~IRQ_INPROGRESS; + if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) + desc->chip->unmask(irq); +out_unlock: + spin_unlock(&desc->lock); +} + +/** + * handle_fasteoi_irq - irq handler for transparent controllers + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Only a single callback will be issued to the chip: an ->eoi() + * call when the interrupt has been serviced. This enables support + * for modern forms of interrupt handlers, which handle the flow + * details in hardware, transparently. + */ +void +handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) +{ + struct irqaction *action; + irqreturn_t action_ret; + + spin_lock(&desc->lock); + + if (unlikely(desc->status & IRQ_INPROGRESS)) + goto out; + + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + kstat_incr_irqs_this_cpu(irq, desc); + + /* + * If its disabled or no action available + * then mask it and get out of here: + */ + action = desc->action; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) { + desc->status |= IRQ_PENDING; + if (desc->chip->mask) + desc->chip->mask(irq); + goto out; + } + + desc->status |= IRQ_INPROGRESS; + desc->status &= ~IRQ_PENDING; + spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + spin_lock(&desc->lock); + desc->status &= ~IRQ_INPROGRESS; +out: + desc->chip->eoi(irq); + + spin_unlock(&desc->lock); +} + +/** + * handle_edge_irq - edge type IRQ handler + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Interrupt occures on the falling and/or rising edge of a hardware + * signal. The occurence is latched into the irq controller hardware + * and must be acked in order to be reenabled. After the ack another + * interrupt can happen on the same source even before the first one + * is handled by the assosiacted event handler. If this happens it + * might be necessary to disable (mask) the interrupt depending on the + * controller hardware. This requires to reenable the interrupt inside + * of the loop which handles the interrupts which have arrived while + * the handler was running. If all pending interrupts are handled, the + * loop is left. + */ +void +handle_edge_irq(unsigned int irq, struct irq_desc *desc) +{ + spin_lock(&desc->lock); + + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + + /* + * If we're currently running this IRQ, or its disabled, + * we shouldn't process the IRQ. Mark it pending, handle + * the necessary masking and go out + */ + if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || + !desc->action)) { + desc->status |= (IRQ_PENDING | IRQ_MASKED); + mask_ack_irq(desc, irq); + goto out_unlock; + } + kstat_incr_irqs_this_cpu(irq, desc); + + /* Start handling the irq */ + desc->chip->ack(irq); + + /* Mark the IRQ currently in progress.*/ + desc->status |= IRQ_INPROGRESS; + + do { + struct irqaction *action = desc->action; + irqreturn_t action_ret; + + if (unlikely(!action)) { + desc->chip->mask(irq); + goto out_unlock; + } + + /* + * When another irq arrived while we were handling + * one, we could have masked the irq. + * Renable it, if it was not disabled in meantime. + */ + if (unlikely((desc->status & + (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == + (IRQ_PENDING | IRQ_MASKED))) { + desc->chip->unmask(irq); + desc->status &= ~IRQ_MASKED; + } + + desc->status &= ~IRQ_PENDING; + spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + spin_lock(&desc->lock); + + } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); + + desc->status &= ~IRQ_INPROGRESS; +out_unlock: + spin_unlock(&desc->lock); +} + +/** + * handle_percpu_IRQ - Per CPU local irq handler + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Per CPU interrupts on SMP machines without locking requirements + */ +void +handle_percpu_irq(unsigned int irq, struct irq_desc *desc) +{ + irqreturn_t action_ret; + + kstat_incr_irqs_this_cpu(irq, desc); + + if (desc->chip->ack) + desc->chip->ack(irq); + + action_ret = handle_IRQ_event(irq, desc->action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + if (desc->chip->eoi) + desc->chip->eoi(irq); +} + +void +__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + const char *name) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR + "Trying to install type control for IRQ%d\n", irq); + return; + } + + if (!handle) + handle = handle_bad_irq; + else if (desc->chip == &no_irq_chip) { + printk(KERN_WARNING "Trying to install %sinterrupt handler " + "for IRQ%d\n", is_chained ? "chained " : "", irq); + /* + * Some ARM implementations install a handler for really dumb + * interrupt hardware without setting an irq_chip. This worked + * with the ARM no_irq_chip but the check in setup_irq would + * prevent us to setup the interrupt at all. Switch it to + * dummy_irq_chip for easy transition. + */ + desc->chip = &dummy_irq_chip; + } + + spin_lock_irqsave(&desc->lock, flags); + + /* Uninstall? */ + if (handle == handle_bad_irq) { + if (desc->chip != &no_irq_chip) + mask_ack_irq(desc, irq); + desc->status |= IRQ_DISABLED; + desc->depth = 1; + } + desc->handle_irq = handle; + desc->name = name; + + if (handle != handle_bad_irq && is_chained) { + desc->status &= ~IRQ_DISABLED; + desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; + desc->depth = 0; + desc->chip->startup(irq); + } + spin_unlock_irqrestore(&desc->lock, flags); +} + +void +set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle) +{ + set_irq_chip(irq, chip); + __set_irq_handler(irq, handle, 0, NULL); +} + +void +set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle, const char *name) +{ + set_irq_chip(irq, chip); + __set_irq_handler(irq, handle, 0, name); +} + +void __init set_irq_noprobe(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); + return; + } + + spin_lock_irqsave(&desc->lock, flags); + desc->status |= IRQ_NOPROBE; + spin_unlock_irqrestore(&desc->lock, flags); +} + +void __init set_irq_probe(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); + return; + } + + spin_lock_irqsave(&desc->lock, flags); + desc->status &= ~IRQ_NOPROBE; + spin_unlock_irqrestore(&desc->lock, flags); +} diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c new file mode 100644 index 0000000..38a25b8 --- /dev/null +++ b/kernel/irq/devres.c @@ -0,0 +1,90 @@ +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/gfp.h> + +/* + * Device resource management aware IRQ request/free implementation. + */ +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +static void devm_irq_release(struct device *dev, void *res) +{ + struct irq_devres *this = res; + + free_irq(this->irq, this->dev_id); +} + +static int devm_irq_match(struct device *dev, void *res, void *data) +{ + struct irq_devres *this = res, *match = data; + + return this->irq == match->irq && this->dev_id == match->dev_id; +} + +/** + * devm_request_irq - allocate an interrupt line for a managed device + * @dev: device to request interrupt for + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * Except for the extra @dev argument, this function takes the + * same arguments and performs the same function as + * request_irq(). IRQs requested with this function will be + * automatically freed on driver detach. + * + * If an IRQ allocated with this function needs to be freed + * separately, dev_free_irq() must be used. + */ +int devm_request_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id) +{ + struct irq_devres *dr; + int rc; + + dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), + GFP_KERNEL); + if (!dr) + return -ENOMEM; + + rc = request_irq(irq, handler, irqflags, devname, dev_id); + if (rc) { + devres_free(dr); + return rc; + } + + dr->irq = irq; + dr->dev_id = dev_id; + devres_add(dev, dr); + + return 0; +} +EXPORT_SYMBOL(devm_request_irq); + +/** + * devm_free_irq - free an interrupt + * @dev: device to free interrupt for + * @irq: Interrupt line to free + * @dev_id: Device identity to free + * + * Except for the extra @dev argument, this function takes the + * same arguments and performs the same function as free_irq(). + * This function instead of free_irq() should be used to manually + * free IRQs allocated with dev_request_irq(). + */ +void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) +{ + struct irq_devres match_data = { irq, dev_id }; + + free_irq(irq, dev_id); + WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match, + &match_data)); +} +EXPORT_SYMBOL(devm_free_irq); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c new file mode 100644 index 0000000..c815b42 --- /dev/null +++ b/kernel/irq/handle.c @@ -0,0 +1,277 @@ +/* + * linux/kernel/irq/handle.c + * + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King + * + * This file contains the core interrupt handling code. + * + * Detailed information is available in Documentation/DocBook/genericirq + * + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> + +#include "internals.h" + +/** + * handle_bad_irq - handle spurious and unhandled irqs + * @irq: the interrupt number + * @desc: description of the interrupt + * + * Handles spurious and unhandled IRQ's. It also prints a debugmessage. + */ +void handle_bad_irq(unsigned int irq, struct irq_desc *desc) +{ + print_irq_desc(irq, desc); + kstat_incr_irqs_this_cpu(irq, desc); + ack_bad_irq(irq); +} + +/* + * Linux has a controller-independent interrupt architecture. + * Every controller has a 'controller-template', that is used + * by the main code to do the right thing. Each driver-visible + * interrupt source is transparently wired to the appropriate + * controller. Thus drivers need not be aware of the + * interrupt-controller. + * + * The code is designed to be easily extended with new/different + * interrupt controllers, without having to do assembly magic or + * having to touch the generic code. + * + * Controller mappings for all interrupt sources: + */ +int nr_irqs = NR_IRQS; +EXPORT_SYMBOL_GPL(nr_irqs); + +struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { + [0 ... NR_IRQS-1] = { + .status = IRQ_DISABLED, + .chip = &no_irq_chip, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), +#ifdef CONFIG_SMP + .affinity = CPU_MASK_ALL +#endif + } +}; + +/* + * What should we do if we get a hw irq event on an illegal vector? + * Each architecture has to answer this themself. + */ +static void ack_bad(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + print_irq_desc(irq, desc); + ack_bad_irq(irq); +} + +/* + * NOP functions + */ +static void noop(unsigned int irq) +{ +} + +static unsigned int noop_ret(unsigned int irq) +{ + return 0; +} + +/* + * Generic no controller implementation + */ +struct irq_chip no_irq_chip = { + .name = "none", + .startup = noop_ret, + .shutdown = noop, + .enable = noop, + .disable = noop, + .ack = ack_bad, + .end = noop, +}; + +/* + * Generic dummy implementation which can be used for + * real dumb interrupt sources + */ +struct irq_chip dummy_irq_chip = { + .name = "dummy", + .startup = noop_ret, + .shutdown = noop, + .enable = noop, + .disable = noop, + .ack = noop, + .mask = noop, + .unmask = noop, + .end = noop, +}; + +/* + * Special, empty irq handler: + */ +irqreturn_t no_action(int cpl, void *dev_id) +{ + return IRQ_NONE; +} + +/** + * handle_IRQ_event - irq action chain handler + * @irq: the interrupt number + * @action: the interrupt action chain for this irq + * + * Handles the action chain of an irq event + */ +irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) +{ + irqreturn_t ret, retval = IRQ_NONE; + unsigned int status = 0; + + if (!(action->flags & IRQF_DISABLED)) + local_irq_enable_in_hardirq(); + + do { + ret = action->handler(irq, action->dev_id); + if (ret == IRQ_HANDLED) + status |= action->flags; + retval |= ret; + action = action->next; + } while (action); + + if (status & IRQF_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + local_irq_disable(); + + return retval; +} + +#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ +/** + * __do_IRQ - original all in one highlevel IRQ handler + * @irq: the interrupt number + * + * __do_IRQ handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + * + * This is the original x86 implementation which is used for every + * interrupt type. + */ +unsigned int __do_IRQ(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + unsigned int status; + + kstat_incr_irqs_this_cpu(irq, desc); + + if (CHECK_IRQ_PER_CPU(desc->status)) { + irqreturn_t action_ret; + + /* + * No locking required for CPU-local interrupts: + */ + if (desc->chip->ack) + desc->chip->ack(irq); + if (likely(!(desc->status & IRQ_DISABLED))) { + action_ret = handle_IRQ_event(irq, desc->action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + } + desc->chip->end(irq); + return 1; + } + + spin_lock(&desc->lock); + if (desc->chip->ack) + desc->chip->ack(irq); + /* + * REPLAY is when Linux resends an IRQ that was dropped earlier + * WAITING is used by probe to mark irqs that are being tested + */ + status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); + status |= IRQ_PENDING; /* we _want_ to handle it */ + + /* + * If the IRQ is disabled for whatever reason, we cannot + * use the action we have. + */ + action = NULL; + if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { + action = desc->action; + status &= ~IRQ_PENDING; /* we commit to handling */ + status |= IRQ_INPROGRESS; /* we are handling it */ + } + desc->status = status; + + /* + * If there is no IRQ handler or it was disabled, exit early. + * Since we set PENDING, if another processor is handling + * a different instance of this same irq, the other processor + * will take care of it. + */ + if (unlikely(!action)) + goto out; + + /* + * Edge triggered interrupts need to remember + * pending events. + * This applies to any hw interrupts that allow a second + * instance of the same irq to arrive while we are in do_IRQ + * or in the handler. But the code here only handles the _second_ + * instance of the irq, not the third or fourth. So it is mostly + * useful for irq hardware that does not mask cleanly in an + * SMP environment. + */ + for (;;) { + irqreturn_t action_ret; + + spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + spin_lock(&desc->lock); + if (likely(!(desc->status & IRQ_PENDING))) + break; + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; + +out: + /* + * The ->end() handler has to deal with interrupts which got + * disabled while the handler was running. + */ + desc->chip->end(irq); + spin_unlock(&desc->lock); + + return 1; +} +#endif + + +#ifdef CONFIG_TRACE_IRQFLAGS +/* + * lockdep: we want to handle all irq_desc locks as a single lock-class: + */ +static struct lock_class_key irq_desc_lock_class; + +void early_init_irq_lock_class(void) +{ + struct irq_desc *desc; + int i; + + for_each_irq_desc(i, desc) + lockdep_set_class(&desc->lock, &irq_desc_lock_class); +} +#endif diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h new file mode 100644 index 0000000..64c1c72 --- /dev/null +++ b/kernel/irq/internals.h @@ -0,0 +1,69 @@ +/* + * IRQ subsystem internal functions and variables: + */ + +extern int noirqdebug; + +/* Set default functions for irq_chip structures: */ +extern void irq_chip_set_defaults(struct irq_chip *chip); + +/* Set default handler: */ +extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); + +extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, + unsigned long flags); + +#ifdef CONFIG_PROC_FS +extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); +extern void register_handler_proc(unsigned int irq, struct irqaction *action); +extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); +#else +static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } +static inline void register_handler_proc(unsigned int irq, + struct irqaction *action) { } +static inline void unregister_handler_proc(unsigned int irq, + struct irqaction *action) { } +#endif + +extern int irq_select_affinity_usr(unsigned int irq); + +/* + * Debugging printout: + */ + +#include <linux/kallsyms.h> + +#define P(f) if (desc->status & f) printk("%14s set\n", #f) + +static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) +{ + printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", + irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); + printk("->handle_irq(): %p, ", desc->handle_irq); + print_symbol("%s\n", (unsigned long)desc->handle_irq); + printk("->chip(): %p, ", desc->chip); + print_symbol("%s\n", (unsigned long)desc->chip); + printk("->action(): %p\n", desc->action); + if (desc->action) { + printk("->action->handler(): %p, ", desc->action->handler); + print_symbol("%s\n", (unsigned long)desc->action->handler); + } + + P(IRQ_INPROGRESS); + P(IRQ_DISABLED); + P(IRQ_PENDING); + P(IRQ_REPLAY); + P(IRQ_AUTODETECT); + P(IRQ_WAITING); + P(IRQ_LEVEL); + P(IRQ_MASKED); +#ifdef CONFIG_IRQ_PER_CPU + P(IRQ_PER_CPU); +#endif + P(IRQ_NOPROBE); + P(IRQ_NOREQUEST); + P(IRQ_NOAUTOEN); +} + +#undef P + diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c new file mode 100644 index 0000000..801addd --- /dev/null +++ b/kernel/irq/manage.c @@ -0,0 +1,736 @@ +/* + * linux/kernel/irq/manage.c + * + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006 Thomas Gleixner + * + * This file contains driver APIs to the irq subsystem. + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/interrupt.h> +#include <linux/slab.h> + +#include "internals.h" + +#ifdef CONFIG_SMP + +cpumask_t irq_default_affinity = CPU_MASK_ALL; + +/** + * synchronize_irq - wait for pending IRQ handlers (on other CPUs) + * @irq: interrupt number to wait for + * + * This function waits for any pending IRQ handlers for this interrupt + * to complete before returning. If you use this function while + * holding a resource the IRQ handler may need you will deadlock. + * + * This function may be called - with care - from IRQ context. + */ +void synchronize_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned int status; + + if (!desc) + return; + + do { + unsigned long flags; + + /* + * Wait until we're out of the critical section. This might + * give the wrong answer due to the lack of memory barriers. + */ + while (desc->status & IRQ_INPROGRESS) + cpu_relax(); + + /* Ok, that indicated we're done: double-check carefully. */ + spin_lock_irqsave(&desc->lock, flags); + status = desc->status; + spin_unlock_irqrestore(&desc->lock, flags); + + /* Oops, that failed? */ + } while (status & IRQ_INPROGRESS); +} +EXPORT_SYMBOL(synchronize_irq); + +/** + * irq_can_set_affinity - Check if the affinity of a given irq can be set + * @irq: Interrupt to check + * + */ +int irq_can_set_affinity(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || + !desc->chip->set_affinity) + return 0; + + return 1; +} + +/** + * irq_set_affinity - Set the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + */ +int irq_set_affinity(unsigned int irq, cpumask_t cpumask) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc->chip->set_affinity) + return -EINVAL; + + spin_lock_irqsave(&desc->lock, flags); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { + desc->affinity = cpumask; + desc->chip->set_affinity(irq, cpumask); + } else { + desc->status |= IRQ_MOVE_PENDING; + desc->pending_mask = cpumask; + } +#else + desc->affinity = cpumask; + desc->chip->set_affinity(irq, cpumask); +#endif + desc->status |= IRQ_AFFINITY_SET; + spin_unlock_irqrestore(&desc->lock, flags); + return 0; +} + +#ifndef CONFIG_AUTO_IRQ_AFFINITY +/* + * Generic version of the affinity autoselector. + */ +int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) +{ + cpumask_t mask; + + if (!irq_can_set_affinity(irq)) + return 0; + + cpus_and(mask, cpu_online_map, irq_default_affinity); + + /* + * Preserve an userspace affinity setup, but make sure that + * one of the targets is online. + */ + if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { + if (cpus_intersects(desc->affinity, cpu_online_map)) + mask = desc->affinity; + else + desc->status &= ~IRQ_AFFINITY_SET; + } + + desc->affinity = mask; + desc->chip->set_affinity(irq, mask); + + return 0; +} +#else +static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) +{ + return irq_select_affinity(irq); +} +#endif + +/* + * Called when affinity is set via /proc/irq + */ +int irq_select_affinity_usr(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + int ret; + + spin_lock_irqsave(&desc->lock, flags); + ret = do_irq_select_affinity(irq, desc); + spin_unlock_irqrestore(&desc->lock, flags); + + return ret; +} + +#else +static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) +{ + return 0; +} +#endif + +/** + * disable_irq_nosync - disable an irq without waiting + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Disables and Enables are + * nested. + * Unlike disable_irq(), this function does not ensure existing + * instances of the IRQ handler have completed before returning. + * + * This function may be called from IRQ context. + */ +void disable_irq_nosync(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) + return; + + spin_lock_irqsave(&desc->lock, flags); + if (!desc->depth++) { + desc->status |= IRQ_DISABLED; + desc->chip->disable(irq); + } + spin_unlock_irqrestore(&desc->lock, flags); +} +EXPORT_SYMBOL(disable_irq_nosync); + +/** + * disable_irq - disable an irq and wait for completion + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Enables and Disables are + * nested. + * This function waits for any pending IRQ handlers for this interrupt + * to complete before returning. If you use this function while + * holding a resource the IRQ handler may need you will deadlock. + * + * This function may be called - with care - from IRQ context. + */ +void disable_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc) + return; + + disable_irq_nosync(irq); + if (desc->action) + synchronize_irq(irq); +} +EXPORT_SYMBOL(disable_irq); + +static void __enable_irq(struct irq_desc *desc, unsigned int irq) +{ + switch (desc->depth) { + case 0: + WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); + break; + case 1: { + unsigned int status = desc->status & ~IRQ_DISABLED; + + /* Prevent probing on this irq: */ + desc->status = status | IRQ_NOPROBE; + check_irq_resend(desc, irq); + /* fall-through */ + } + default: + desc->depth--; + } +} + +/** + * enable_irq - enable handling of an irq + * @irq: Interrupt to enable + * + * Undoes the effect of one call to disable_irq(). If this + * matches the last disable, processing of interrupts on this + * IRQ line is re-enabled. + * + * This function may be called from IRQ context. + */ +void enable_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) + return; + + spin_lock_irqsave(&desc->lock, flags); + __enable_irq(desc, irq); + spin_unlock_irqrestore(&desc->lock, flags); +} +EXPORT_SYMBOL(enable_irq); + +static int set_irq_wake_real(unsigned int irq, unsigned int on) +{ + struct irq_desc *desc = irq_to_desc(irq); + int ret = -ENXIO; + + if (desc->chip->set_wake) + ret = desc->chip->set_wake(irq, on); + + return ret; +} + +/** + * set_irq_wake - control irq power management wakeup + * @irq: interrupt to control + * @on: enable/disable power management wakeup + * + * Enable/disable power management wakeup mode, which is + * disabled by default. Enables and disables must match, + * just as they match for non-wakeup mode support. + * + * Wakeup mode lets this IRQ wake the system from sleep + * states like "suspend to RAM". + */ +int set_irq_wake(unsigned int irq, unsigned int on) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + int ret = 0; + + /* wakeup-capable irqs can be shared between drivers that + * don't need to have the same sleep mode behaviors. + */ + spin_lock_irqsave(&desc->lock, flags); + if (on) { + if (desc->wake_depth++ == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 0; + else + desc->status |= IRQ_WAKEUP; + } + } else { + if (desc->wake_depth == 0) { + WARN(1, "Unbalanced IRQ %d wake disable\n", irq); + } else if (--desc->wake_depth == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 1; + else + desc->status &= ~IRQ_WAKEUP; + } + } + + spin_unlock_irqrestore(&desc->lock, flags); + return ret; +} +EXPORT_SYMBOL(set_irq_wake); + +/* + * Internal function that tells the architecture code whether a + * particular irq has been exclusively allocated or is available + * for driver use. + */ +int can_request_irq(unsigned int irq, unsigned long irqflags) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + + if (!desc) + return 0; + + if (desc->status & IRQ_NOREQUEST) + return 0; + + action = desc->action; + if (action) + if (irqflags & action->flags & IRQF_SHARED) + action = NULL; + + return !action; +} + +void compat_irq_chip_set_default_handler(struct irq_desc *desc) +{ + /* + * If the architecture still has not overriden + * the flow handler then zap the default. This + * should catch incorrect flow-type setting. + */ + if (desc->handle_irq == &handle_bad_irq) + desc->handle_irq = NULL; +} + +int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, + unsigned long flags) +{ + int ret; + struct irq_chip *chip = desc->chip; + + if (!chip || !chip->set_type) { + /* + * IRQF_TRIGGER_* but the PIC does not support multiple + * flow-types? + */ + pr_debug("No set_type function for IRQ %d (%s)\n", irq, + chip ? (chip->name ? : "unknown") : "unknown"); + return 0; + } + + ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); + + if (ret) + pr_err("setting trigger mode %d for irq %u failed (%pF)\n", + (int)(flags & IRQF_TRIGGER_MASK), + irq, chip->set_type); + else { + /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ + desc->status &= ~IRQ_TYPE_SENSE_MASK; + desc->status |= flags & IRQ_TYPE_SENSE_MASK; + } + + return ret; +} + +/* + * Internal function to register an irqaction - typically used to + * allocate special interrupts that are part of the architecture. + */ +static int +__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) +{ + struct irqaction *old, **p; + const char *old_name = NULL; + unsigned long flags; + int shared = 0; + int ret; + + if (!desc) + return -EINVAL; + + if (desc->chip == &no_irq_chip) + return -ENOSYS; + /* + * Some drivers like serial.c use request_irq() heavily, + * so we have to be careful not to interfere with a + * running system. + */ + if (new->flags & IRQF_SAMPLE_RANDOM) { + /* + * This function might sleep, we want to call it first, + * outside of the atomic block. + * Yes, this might clear the entropy pool if the wrong + * driver is attempted to be loaded, without actually + * installing a new handler, but is this really a problem, + * only the sysadmin is able to do this. + */ + rand_initialize_irq(irq); + } + + /* + * The following block of code has to be executed atomically + */ + spin_lock_irqsave(&desc->lock, flags); + p = &desc->action; + old = *p; + if (old) { + /* + * Can't share interrupts unless both agree to and are + * the same type (level, edge, polarity). So both flag + * fields must have IRQF_SHARED set and the bits which + * set the trigger type must match. + */ + if (!((old->flags & new->flags) & IRQF_SHARED) || + ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { + old_name = old->name; + goto mismatch; + } + +#if defined(CONFIG_IRQ_PER_CPU) + /* All handlers must agree on per-cpuness */ + if ((old->flags & IRQF_PERCPU) != + (new->flags & IRQF_PERCPU)) + goto mismatch; +#endif + + /* add new interrupt at end of irq queue */ + do { + p = &old->next; + old = *p; + } while (old); + shared = 1; + } + + if (!shared) { + irq_chip_set_defaults(desc->chip); + + /* Setup the type (level, edge polarity) if configured: */ + if (new->flags & IRQF_TRIGGER_MASK) { + ret = __irq_set_trigger(desc, irq, new->flags); + + if (ret) { + spin_unlock_irqrestore(&desc->lock, flags); + return ret; + } + } else + compat_irq_chip_set_default_handler(desc); +#if defined(CONFIG_IRQ_PER_CPU) + if (new->flags & IRQF_PERCPU) + desc->status |= IRQ_PER_CPU; +#endif + + desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | + IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); + + if (!(desc->status & IRQ_NOAUTOEN)) { + desc->depth = 0; + desc->status &= ~IRQ_DISABLED; + desc->chip->startup(irq); + } else + /* Undo nested disables: */ + desc->depth = 1; + + /* Exclude IRQ from balancing if requested */ + if (new->flags & IRQF_NOBALANCING) + desc->status |= IRQ_NO_BALANCING; + + /* Set default affinity mask once everything is setup */ + do_irq_select_affinity(irq, desc); + + } else if ((new->flags & IRQF_TRIGGER_MASK) + && (new->flags & IRQF_TRIGGER_MASK) + != (desc->status & IRQ_TYPE_SENSE_MASK)) { + /* hope the handler works with the actual trigger mode... */ + pr_warning("IRQ %d uses trigger mode %d; requested %d\n", + irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), + (int)(new->flags & IRQF_TRIGGER_MASK)); + } + + *p = new; + + /* Reset broken irq detection when installing new handler */ + desc->irq_count = 0; + desc->irqs_unhandled = 0; + + /* + * Check whether we disabled the irq via the spurious handler + * before. Reenable it and give it another chance. + */ + if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { + desc->status &= ~IRQ_SPURIOUS_DISABLED; + __enable_irq(desc, irq); + } + + spin_unlock_irqrestore(&desc->lock, flags); + + new->irq = irq; + register_irq_proc(irq, desc); + new->dir = NULL; + register_handler_proc(irq, new); + + return 0; + +mismatch: +#ifdef CONFIG_DEBUG_SHIRQ + if (!(new->flags & IRQF_PROBE_SHARED)) { + printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); + if (old_name) + printk(KERN_ERR "current handler: %s\n", old_name); + dump_stack(); + } +#endif + spin_unlock_irqrestore(&desc->lock, flags); + return -EBUSY; +} + +/** + * setup_irq - setup an interrupt + * @irq: Interrupt line to setup + * @act: irqaction for the interrupt + * + * Used to statically setup interrupts in the early boot process. + */ +int setup_irq(unsigned int irq, struct irqaction *act) +{ + struct irq_desc *desc = irq_to_desc(irq); + + return __setup_irq(irq, desc, act); +} + +/** + * free_irq - free an interrupt + * @irq: Interrupt line to free + * @dev_id: Device identity to free + * + * Remove an interrupt handler. The handler is removed and if the + * interrupt line is no longer in use by any driver it is disabled. + * On a shared IRQ the caller must ensure the interrupt is disabled + * on the card it drives before calling this function. The function + * does not return until any executing interrupts for this IRQ + * have completed. + * + * This function must not be called from interrupt context. + */ +void free_irq(unsigned int irq, void *dev_id) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction **p; + unsigned long flags; + + WARN_ON(in_interrupt()); + + if (!desc) + return; + + spin_lock_irqsave(&desc->lock, flags); + p = &desc->action; + for (;;) { + struct irqaction *action = *p; + + if (action) { + struct irqaction **pp = p; + + p = &action->next; + if (action->dev_id != dev_id) + continue; + + /* Found it - now remove it from the list of entries */ + *pp = action->next; + + /* Currently used only by UML, might disappear one day.*/ +#ifdef CONFIG_IRQ_RELEASE_METHOD + if (desc->chip->release) + desc->chip->release(irq, dev_id); +#endif + + if (!desc->action) { + desc->status |= IRQ_DISABLED; + if (desc->chip->shutdown) + desc->chip->shutdown(irq); + else + desc->chip->disable(irq); + } + spin_unlock_irqrestore(&desc->lock, flags); + unregister_handler_proc(irq, action); + + /* Make sure it's not being used on another CPU */ + synchronize_irq(irq); +#ifdef CONFIG_DEBUG_SHIRQ + /* + * It's a shared IRQ -- the driver ought to be + * prepared for it to happen even now it's + * being freed, so let's make sure.... We do + * this after actually deregistering it, to + * make sure that a 'real' IRQ doesn't run in + * parallel with our fake + */ + if (action->flags & IRQF_SHARED) { + local_irq_save(flags); + action->handler(irq, dev_id); + local_irq_restore(flags); + } +#endif + kfree(action); + return; + } + printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); +#ifdef CONFIG_DEBUG_SHIRQ + dump_stack(); +#endif + spin_unlock_irqrestore(&desc->lock, flags); + return; + } +} +EXPORT_SYMBOL(free_irq); + +/** + * request_irq - allocate an interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the + * interrupt line and IRQ handling. From the point this + * call is made your handler function may be invoked. Since + * your handler function must clear any interrupt the board + * raises, you must take care both to initialise your hardware + * and to set up the interrupt handler in the right order. + * + * Dev_id must be globally unique. Normally the address of the + * device data structure is used as the cookie. Since the handler + * receives this value it makes sense to use it. + * + * If your interrupt is shared you must pass a non NULL dev_id + * as this is required when freeing the interrupt. + * + * Flags: + * + * IRQF_SHARED Interrupt is shared + * IRQF_DISABLED Disable local interrupts while processing + * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy + * IRQF_TRIGGER_* Specify active edge(s) or level + * + */ +int request_irq(unsigned int irq, irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +{ + struct irqaction *action; + struct irq_desc *desc; + int retval; + +#ifdef CONFIG_LOCKDEP + /* + * Lockdep wants atomic interrupt handlers: + */ + irqflags |= IRQF_DISABLED; +#endif + /* + * Sanity-check: shared interrupts must pass in a real dev-ID, + * otherwise we'll have trouble later trying to figure out + * which interrupt is which (messes up the interrupt freeing + * logic etc). + */ + if ((irqflags & IRQF_SHARED) && !dev_id) + return -EINVAL; + + desc = irq_to_desc(irq); + if (!desc) + return -EINVAL; + + if (desc->status & IRQ_NOREQUEST) + return -EINVAL; + if (!handler) + return -EINVAL; + + action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); + if (!action) + return -ENOMEM; + + action->handler = handler; + action->flags = irqflags; + cpus_clear(action->mask); + action->name = devname; + action->next = NULL; + action->dev_id = dev_id; + + retval = __setup_irq(irq, desc, action); + if (retval) + kfree(action); + +#ifdef CONFIG_DEBUG_SHIRQ + if (irqflags & IRQF_SHARED) { + /* + * It's a shared IRQ -- the driver ought to be prepared for it + * to happen immediately, so let's make sure.... + * We disable the irq to make sure that a 'real' IRQ doesn't + * run in parallel with our fake. + */ + unsigned long flags; + + disable_irq(irq); + local_irq_save(flags); + + handler(irq, dev_id); + + local_irq_restore(flags); + enable_irq(irq); + } +#endif + return retval; +} +EXPORT_SYMBOL(request_irq); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c new file mode 100644 index 0000000..9db681d --- /dev/null +++ b/kernel/irq/migration.c @@ -0,0 +1,64 @@ + +#include <linux/irq.h> + +void move_masked_irq(int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + cpumask_t tmp; + + if (likely(!(desc->status & IRQ_MOVE_PENDING))) + return; + + /* + * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. + */ + if (CHECK_IRQ_PER_CPU(desc->status)) { + WARN_ON(1); + return; + } + + desc->status &= ~IRQ_MOVE_PENDING; + + if (unlikely(cpus_empty(desc->pending_mask))) + return; + + if (!desc->chip->set_affinity) + return; + + assert_spin_locked(&desc->lock); + + cpus_and(tmp, desc->pending_mask, cpu_online_map); + + /* + * If there was a valid mask to work with, please + * do the disable, re-program, enable sequence. + * This is *not* particularly important for level triggered + * but in a edge trigger case, we might be setting rte + * when an active trigger is comming in. This could + * cause some ioapics to mal-function. + * Being paranoid i guess! + * + * For correct operation this depends on the caller + * masking the irqs. + */ + if (likely(!cpus_empty(tmp))) { + desc->chip->set_affinity(irq,tmp); + } + cpus_clear(desc->pending_mask); +} + +void move_native_irq(int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (likely(!(desc->status & IRQ_MOVE_PENDING))) + return; + + if (unlikely(desc->status & IRQ_DISABLED)) + return; + + desc->chip->mask(irq); + move_masked_irq(irq); + desc->chip->unmask(irq); +} + diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c new file mode 100644 index 0000000..d257e7d --- /dev/null +++ b/kernel/irq/proc.c @@ -0,0 +1,249 @@ +/* + * linux/kernel/irq/proc.c + * + * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar + * + * This file contains the /proc/irq/ handling code. + */ + +#include <linux/irq.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/interrupt.h> + +#include "internals.h" + +static struct proc_dir_entry *root_irq_dir; + +#ifdef CONFIG_SMP + +static int irq_affinity_proc_show(struct seq_file *m, void *v) +{ + struct irq_desc *desc = irq_to_desc((long)m->private); + cpumask_t *mask = &desc->affinity; + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (desc->status & IRQ_MOVE_PENDING) + mask = &desc->pending_mask; +#endif + seq_cpumask(m, mask); + seq_putc(m, '\n'); + return 0; +} + +#ifndef is_affinity_mask_valid +#define is_affinity_mask_valid(val) 1 +#endif + +int no_irq_affinity; +static ssize_t irq_affinity_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) +{ + unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; + cpumask_t new_value; + int err; + + if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || + irq_balancing_disabled(irq)) + return -EIO; + + err = cpumask_parse_user(buffer, count, new_value); + if (err) + return err; + + if (!is_affinity_mask_valid(new_value)) + return -EINVAL; + + /* + * Do not allow disabling IRQs completely - it's a too easy + * way to make the system unusable accidentally :-) At least + * one online CPU still has to be targeted. + */ + if (!cpus_intersects(new_value, cpu_online_map)) + /* Special case for empty set - allow the architecture + code to set default SMP affinity. */ + return irq_select_affinity_usr(irq) ? -EINVAL : count; + + irq_set_affinity(irq, new_value); + + return count; +} + +static int irq_affinity_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, irq_affinity_proc_show, PDE(inode)->data); +} + +static const struct file_operations irq_affinity_proc_fops = { + .open = irq_affinity_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = irq_affinity_proc_write, +}; + +static int default_affinity_show(struct seq_file *m, void *v) +{ + seq_cpumask(m, &irq_default_affinity); + seq_putc(m, '\n'); + return 0; +} + +static ssize_t default_affinity_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) +{ + cpumask_t new_value; + int err; + + err = cpumask_parse_user(buffer, count, new_value); + if (err) + return err; + + if (!is_affinity_mask_valid(new_value)) + return -EINVAL; + + /* + * Do not allow disabling IRQs completely - it's a too easy + * way to make the system unusable accidentally :-) At least + * one online CPU still has to be targeted. + */ + if (!cpus_intersects(new_value, cpu_online_map)) + return -EINVAL; + + irq_default_affinity = new_value; + + return count; +} + +static int default_affinity_open(struct inode *inode, struct file *file) +{ + return single_open(file, default_affinity_show, NULL); +} + +static const struct file_operations default_affinity_proc_fops = { + .open = default_affinity_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = default_affinity_write, +}; +#endif + +static int irq_spurious_read(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct irq_desc *desc = irq_to_desc((long) data); + return sprintf(page, "count %u\n" + "unhandled %u\n" + "last_unhandled %u ms\n", + desc->irq_count, + desc->irqs_unhandled, + jiffies_to_msecs(desc->last_unhandled)); +} + +#define MAX_NAMELEN 128 + +static int name_unique(unsigned int irq, struct irqaction *new_action) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&desc->lock, flags); + for (action = desc->action ; action; action = action->next) { + if ((action != new_action) && action->name && + !strcmp(new_action->name, action->name)) { + ret = 0; + break; + } + } + spin_unlock_irqrestore(&desc->lock, flags); + return ret; +} + +void register_handler_proc(unsigned int irq, struct irqaction *action) +{ + char name [MAX_NAMELEN]; + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc->dir || action->dir || !action->name || + !name_unique(irq, action)) + return; + + memset(name, 0, MAX_NAMELEN); + snprintf(name, MAX_NAMELEN, "%s", action->name); + + /* create /proc/irq/1234/handler/ */ + action->dir = proc_mkdir(name, desc->dir); +} + +#undef MAX_NAMELEN + +#define MAX_NAMELEN 10 + +void register_irq_proc(unsigned int irq, struct irq_desc *desc) +{ + char name [MAX_NAMELEN]; + struct proc_dir_entry *entry; + + if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) + return; + + memset(name, 0, MAX_NAMELEN); + sprintf(name, "%d", irq); + + /* create /proc/irq/1234 */ + desc->dir = proc_mkdir(name, root_irq_dir); + +#ifdef CONFIG_SMP + /* create /proc/irq/<irq>/smp_affinity */ + proc_create_data("smp_affinity", 0600, desc->dir, + &irq_affinity_proc_fops, (void *)(long)irq); +#endif + + entry = create_proc_entry("spurious", 0444, desc->dir); + if (entry) { + entry->data = (void *)(long)irq; + entry->read_proc = irq_spurious_read; + } +} + +#undef MAX_NAMELEN + +void unregister_handler_proc(unsigned int irq, struct irqaction *action) +{ + if (action->dir) { + struct irq_desc *desc = irq_to_desc(irq); + + remove_proc_entry(action->dir->name, desc->dir); + } +} + +static void register_default_affinity_proc(void) +{ +#ifdef CONFIG_SMP + proc_create("irq/default_smp_affinity", 0600, NULL, + &default_affinity_proc_fops); +#endif +} + +void init_irq_proc(void) +{ + unsigned int irq; + struct irq_desc *desc; + + /* create /proc/irq */ + root_irq_dir = proc_mkdir("irq", NULL); + if (!root_irq_dir) + return; + + register_default_affinity_proc(); + + /* + * Create entries for all existing IRQs. + */ + for_each_irq_desc(irq, desc) + register_irq_proc(irq, desc); +} + diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c new file mode 100644 index 0000000..89c7117 --- /dev/null +++ b/kernel/irq/resend.c @@ -0,0 +1,82 @@ +/* + * linux/kernel/irq/resend.c + * + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner + * + * This file contains the IRQ-resend code + * + * If the interrupt is waiting to be processed, we try to re-run it. + * We can't directly run it from here since the caller might be in an + * interrupt-protected region. Not all irq controller chips can + * retrigger interrupts at the hardware level, so in those cases + * we allow the resending of IRQs via a tasklet. + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/interrupt.h> + +#include "internals.h" + +#ifdef CONFIG_HARDIRQS_SW_RESEND + +/* Bitmap to handle software resend of interrupts: */ +static DECLARE_BITMAP(irqs_resend, NR_IRQS); + +/* + * Run software resends of IRQ's + */ +static void resend_irqs(unsigned long arg) +{ + struct irq_desc *desc; + int irq; + + while (!bitmap_empty(irqs_resend, nr_irqs)) { + irq = find_first_bit(irqs_resend, nr_irqs); + clear_bit(irq, irqs_resend); + desc = irq_to_desc(irq); + local_irq_disable(); + desc->handle_irq(irq, desc); + local_irq_enable(); + } +} + +/* Tasklet to handle resend: */ +static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); + +#endif + +/* + * IRQ resend + * + * Is called with interrupts disabled and desc->lock held. + */ +void check_irq_resend(struct irq_desc *desc, unsigned int irq) +{ + unsigned int status = desc->status; + + /* + * Make sure the interrupt is enabled, before resending it: + */ + desc->chip->enable(irq); + + /* + * We do not resend level type interrupts. Level type + * interrupts are resent by hardware when they are still + * active. + */ + if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; + + if (!desc->chip || !desc->chip->retrigger || + !desc->chip->retrigger(irq)) { +#ifdef CONFIG_HARDIRQS_SW_RESEND + /* Set it pending and activate the softirq: */ + set_bit(irq, irqs_resend); + tasklet_schedule(&resend_tasklet); +#endif + } + } +} diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c new file mode 100644 index 0000000..dd364c1 --- /dev/null +++ b/kernel/irq/spurious.c @@ -0,0 +1,300 @@ +/* + * linux/kernel/irq/spurious.c + * + * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar + * + * This file contains spurious interrupt handling. + */ + +#include <linux/jiffies.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/interrupt.h> +#include <linux/moduleparam.h> +#include <linux/timer.h> + +static int irqfixup __read_mostly; + +#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) +static void poll_spurious_irqs(unsigned long dummy); +static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); + +/* + * Recovery handler for misrouted interrupts. + */ +static int try_one_irq(int irq, struct irq_desc *desc) +{ + struct irqaction *action; + int ok = 0, work = 0; + + spin_lock(&desc->lock); + /* Already running on another processor */ + if (desc->status & IRQ_INPROGRESS) { + /* + * Already running: If it is shared get the other + * CPU to go looking for our mystery interrupt too + */ + if (desc->action && (desc->action->flags & IRQF_SHARED)) + desc->status |= IRQ_PENDING; + spin_unlock(&desc->lock); + return ok; + } + /* Honour the normal IRQ locking */ + desc->status |= IRQ_INPROGRESS; + action = desc->action; + spin_unlock(&desc->lock); + + while (action) { + /* Only shared IRQ handlers are safe to call */ + if (action->flags & IRQF_SHARED) { + if (action->handler(irq, action->dev_id) == + IRQ_HANDLED) + ok = 1; + } + action = action->next; + } + local_irq_disable(); + /* Now clean up the flags */ + spin_lock(&desc->lock); + action = desc->action; + + /* + * While we were looking for a fixup someone queued a real + * IRQ clashing with our walk: + */ + while ((desc->status & IRQ_PENDING) && action) { + /* + * Perform real IRQ processing for the IRQ we deferred + */ + work = 1; + spin_unlock(&desc->lock); + handle_IRQ_event(irq, action); + spin_lock(&desc->lock); + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; + /* + * If we did actual work for the real IRQ line we must let the + * IRQ controller clean up too + */ + if (work && desc->chip && desc->chip->end) + desc->chip->end(irq); + spin_unlock(&desc->lock); + + return ok; +} + +static int misrouted_irq(int irq) +{ + struct irq_desc *desc; + int i, ok = 0; + + for_each_irq_desc(i, desc) { + if (!i) + continue; + + if (i == irq) /* Already tried */ + continue; + + if (try_one_irq(i, desc)) + ok = 1; + } + /* So the caller can adjust the irq error counts */ + return ok; +} + +static void poll_spurious_irqs(unsigned long dummy) +{ + struct irq_desc *desc; + int i; + + for_each_irq_desc(i, desc) { + unsigned int status; + + if (!i) + continue; + + /* Racy but it doesn't matter */ + status = desc->status; + barrier(); + if (!(status & IRQ_SPURIOUS_DISABLED)) + continue; + + try_one_irq(i, desc); + } + + mod_timer(&poll_spurious_irq_timer, + jiffies + POLL_SPURIOUS_IRQ_INTERVAL); +} + +/* + * If 99,900 of the previous 100,000 interrupts have not been handled + * then assume that the IRQ is stuck in some manner. Drop a diagnostic + * and try to turn the IRQ off. + * + * (The other 100-of-100,000 interrupts may have been a correctly + * functioning device sharing an IRQ with the failing one) + * + * Called under desc->lock + */ + +static void +__report_bad_irq(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) +{ + struct irqaction *action; + + if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { + printk(KERN_ERR "irq event %d: bogus return value %x\n", + irq, action_ret); + } else { + printk(KERN_ERR "irq %d: nobody cared (try booting with " + "the \"irqpoll\" option)\n", irq); + } + dump_stack(); + printk(KERN_ERR "handlers:\n"); + + action = desc->action; + while (action) { + printk(KERN_ERR "[<%p>]", action->handler); + print_symbol(" (%s)", + (unsigned long)action->handler); + printk("\n"); + action = action->next; + } +} + +static void +report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) +{ + static int count = 100; + + if (count > 0) { + count--; + __report_bad_irq(irq, desc, action_ret); + } +} + +static inline int +try_misrouted_irq(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) +{ + struct irqaction *action; + + if (!irqfixup) + return 0; + + /* We didn't actually handle the IRQ - see if it was misrouted? */ + if (action_ret == IRQ_NONE) + return 1; + + /* + * But for 'irqfixup == 2' we also do it for handled interrupts if + * they are marked as IRQF_IRQPOLL (or for irq zero, which is the + * traditional PC timer interrupt.. Legacy) + */ + if (irqfixup < 2) + return 0; + + if (!irq) + return 1; + + /* + * Since we don't get the descriptor lock, "action" can + * change under us. We don't really care, but we don't + * want to follow a NULL pointer. So tell the compiler to + * just load it once by using a barrier. + */ + action = desc->action; + barrier(); + return action && (action->flags & IRQF_IRQPOLL); +} + +void note_interrupt(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) +{ + if (unlikely(action_ret != IRQ_HANDLED)) { + /* + * If we are seeing only the odd spurious IRQ caused by + * bus asynchronicity then don't eventually trigger an error, + * otherwise the couter becomes a doomsday timer for otherwise + * working systems + */ + if (time_after(jiffies, desc->last_unhandled + HZ/10)) + desc->irqs_unhandled = 1; + else + desc->irqs_unhandled++; + desc->last_unhandled = jiffies; + if (unlikely(action_ret != IRQ_NONE)) + report_bad_irq(irq, desc, action_ret); + } + + if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { + int ok = misrouted_irq(irq); + if (action_ret == IRQ_NONE) + desc->irqs_unhandled -= ok; + } + + desc->irq_count++; + if (likely(desc->irq_count < 100000)) + return; + + desc->irq_count = 0; + if (unlikely(desc->irqs_unhandled > 99900)) { + /* + * The interrupt is stuck + */ + __report_bad_irq(irq, desc, action_ret); + /* + * Now kill the IRQ + */ + printk(KERN_EMERG "Disabling IRQ #%d\n", irq); + desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; + desc->depth++; + desc->chip->disable(irq); + + mod_timer(&poll_spurious_irq_timer, + jiffies + POLL_SPURIOUS_IRQ_INTERVAL); + } + desc->irqs_unhandled = 0; +} + +int noirqdebug __read_mostly; + +int noirqdebug_setup(char *str) +{ + noirqdebug = 1; + printk(KERN_INFO "IRQ lockup detection disabled\n"); + + return 1; +} + +__setup("noirqdebug", noirqdebug_setup); +module_param(noirqdebug, bool, 0644); +MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); + +static int __init irqfixup_setup(char *str) +{ + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); + + return 1; +} + +__setup("irqfixup", irqfixup_setup); +module_param(irqfixup, int, 0644); +MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode, 2: irqpoll mode"); + +static int __init irqpoll_setup(char *str) +{ + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); + printk(KERN_WARNING "This may significantly impact system " + "performance\n"); + return 1; +} + +__setup("irqpoll", irqpoll_setup); |