summaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c424
1 files changed, 315 insertions, 109 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 291f036..7e2e7dd 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -8,16 +8,15 @@
*/
#include <linux/irq.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include "internals.h"
-#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
-cpumask_var_t irq_default_affinity;
-
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq)
/* Oops, that failed? */
} while (status & IRQ_INPROGRESS);
+
+ /*
+ * We made sure that no hardirq handler is running. Now verify
+ * that no threaded handlers are active.
+ */
+ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
}
EXPORT_SYMBOL(synchronize_irq);
+#ifdef CONFIG_SMP
+cpumask_var_t irq_default_affinity;
+
/**
* irq_can_set_affinity - Check if the affinity of a given irq can be set
* @irq: Interrupt to check
@@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq)
return 1;
}
+static void
+irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+{
+ struct irqaction *action = desc->action;
+
+ while (action) {
+ if (action->thread)
+ set_cpus_allowed_ptr(action->thread, cpumask);
+ action = action->next;
+ }
+}
+
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
@@ -90,16 +110,17 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
- cpumask_copy(&desc->affinity, cpumask);
+ cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
} else {
desc->status |= IRQ_MOVE_PENDING;
- cpumask_copy(&desc->pending_mask, cpumask);
+ cpumask_copy(desc->pending_mask, cpumask);
}
#else
- cpumask_copy(&desc->affinity, cpumask);
+ cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
#endif
+ irq_set_thread_affinity(desc, cpumask);
desc->status |= IRQ_AFFINITY_SET;
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
@@ -109,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
/*
* Generic version of the affinity autoselector.
*/
-int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
+static int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
if (!irq_can_set_affinity(irq))
return 0;
@@ -119,21 +140,21 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
* one of the targets is online.
*/
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
- if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+ if (cpumask_any_and(desc->affinity, cpu_online_mask)
< nr_cpu_ids)
goto set_affinity;
else
desc->status &= ~IRQ_AFFINITY_SET;
}
- cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+ cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
- desc->chip->set_affinity(irq, &desc->affinity);
+ desc->chip->set_affinity(irq, desc->affinity);
return 0;
}
#else
-static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
+static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
{
return irq_select_affinity(irq);
}
@@ -149,19 +170,35 @@ int irq_select_affinity_usr(unsigned int irq)
int ret;
spin_lock_irqsave(&desc->lock, flags);
- ret = do_irq_select_affinity(irq, desc);
+ ret = setup_affinity(irq, desc);
+ if (!ret)
+ irq_set_thread_affinity(desc, desc->affinity);
spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
#else
-static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
+static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
return 0;
}
#endif
+void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
+{
+ if (suspend) {
+ if (!desc->action || (desc->action->flags & IRQF_TIMER))
+ return;
+ desc->status |= IRQ_SUSPENDED;
+ }
+
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->chip->disable(irq);
+ }
+}
+
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
@@ -182,10 +219,7 @@ void disable_irq_nosync(unsigned int irq)
return;
spin_lock_irqsave(&desc->lock, flags);
- if (!desc->depth++) {
- desc->status |= IRQ_DISABLED;
- desc->chip->disable(irq);
- }
+ __disable_irq(desc, irq, false);
spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -215,15 +249,21 @@ void disable_irq(unsigned int irq)
}
EXPORT_SYMBOL(disable_irq);
-static void __enable_irq(struct irq_desc *desc, unsigned int irq)
+void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
{
+ if (resume)
+ desc->status &= ~IRQ_SUSPENDED;
+
switch (desc->depth) {
case 0:
+ err_out:
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
break;
case 1: {
unsigned int status = desc->status & ~IRQ_DISABLED;
+ if (desc->status & IRQ_SUSPENDED)
+ goto err_out;
/* Prevent probing on this irq: */
desc->status = status | IRQ_NOPROBE;
check_irq_resend(desc, irq);
@@ -253,7 +293,7 @@ void enable_irq(unsigned int irq)
return;
spin_lock_irqsave(&desc->lock, flags);
- __enable_irq(desc, irq);
+ __enable_irq(desc, irq, false);
spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(enable_irq);
@@ -384,14 +424,98 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return ret;
}
+static int irq_wait_for_interrupt(struct irqaction *action)
+{
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ schedule();
+ }
+ return -1;
+}
+
+/*
+ * Interrupt handler thread
+ */
+static int irq_thread(void *data)
+{
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
+ struct irqaction *action = data;
+ struct irq_desc *desc = irq_to_desc(action->irq);
+ int wake;
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ current->irqaction = action;
+
+ while (!irq_wait_for_interrupt(action)) {
+
+ atomic_inc(&desc->threads_active);
+
+ spin_lock_irq(&desc->lock);
+ if (unlikely(desc->status & IRQ_DISABLED)) {
+ /*
+ * CHECKME: We might need a dedicated
+ * IRQ_THREAD_PENDING flag here, which
+ * retriggers the thread in check_irq_resend()
+ * but AFAICT IRQ_PENDING should be fine as it
+ * retriggers the interrupt itself --- tglx
+ */
+ desc->status |= IRQ_PENDING;
+ spin_unlock_irq(&desc->lock);
+ } else {
+ spin_unlock_irq(&desc->lock);
+
+ action->thread_fn(action->irq, action->dev_id);
+ }
+
+ wake = atomic_dec_and_test(&desc->threads_active);
+
+ if (wake && waitqueue_active(&desc->wait_for_threads))
+ wake_up(&desc->wait_for_threads);
+ }
+
+ /*
+ * Clear irqaction. Otherwise exit_irq_thread() would make
+ * fuzz about an active irq thread going into nirvana.
+ */
+ current->irqaction = NULL;
+ return 0;
+}
+
+/*
+ * Called from do_exit()
+ */
+void exit_irq_thread(void)
+{
+ struct task_struct *tsk = current;
+
+ if (!tsk->irqaction)
+ return;
+
+ printk(KERN_ERR
+ "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+ tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
+
+ /*
+ * Set the THREAD DIED flag to prevent further wakeups of the
+ * soon to be gone threaded handler.
+ */
+ set_bit(IRQTF_DIED, &tsk->irqaction->flags);
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
*/
static int
-__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
+__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
- struct irqaction *old, **p;
+ struct irqaction *old, **old_ptr;
const char *old_name = NULL;
unsigned long flags;
int shared = 0;
@@ -420,11 +544,31 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
}
/*
+ * Threaded handler ?
+ */
+ if (new->thread_fn) {
+ struct task_struct *t;
+
+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+ new->name);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ /*
+ * We keep the reference to the task struct even if
+ * the thread dies to avoid that the interrupt code
+ * references an already freed task_struct.
+ */
+ get_task_struct(t);
+ new->thread = t;
+ wake_up_process(t);
+ }
+
+ /*
* The following block of code has to be executed atomically
*/
spin_lock_irqsave(&desc->lock, flags);
- p = &desc->action;
- old = *p;
+ old_ptr = &desc->action;
+ old = *old_ptr;
if (old) {
/*
* Can't share interrupts unless both agree to and are
@@ -447,8 +591,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
/* add new interrupt at end of irq queue */
do {
- p = &old->next;
- old = *p;
+ old_ptr = &old->next;
+ old = *old_ptr;
} while (old);
shared = 1;
}
@@ -456,15 +600,15 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
if (!shared) {
irq_chip_set_defaults(desc->chip);
+ init_waitqueue_head(&desc->wait_for_threads);
+
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc, irq,
new->flags & IRQF_TRIGGER_MASK);
- if (ret) {
- spin_unlock_irqrestore(&desc->lock, flags);
- return ret;
- }
+ if (ret)
+ goto out_thread;
} else
compat_irq_chip_set_default_handler(desc);
#if defined(CONFIG_IRQ_PER_CPU)
@@ -488,7 +632,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
desc->status |= IRQ_NO_BALANCING;
/* Set default affinity mask once everything is setup */
- do_irq_select_affinity(irq, desc);
+ setup_affinity(irq, desc);
} else if ((new->flags & IRQF_TRIGGER_MASK)
&& (new->flags & IRQF_TRIGGER_MASK)
@@ -499,7 +643,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
(int)(new->flags & IRQF_TRIGGER_MASK));
}
- *p = new;
+ *old_ptr = new;
/* Reset broken irq detection when installing new handler */
desc->irq_count = 0;
@@ -511,7 +655,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
*/
if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
desc->status &= ~IRQ_SPURIOUS_DISABLED;
- __enable_irq(desc, irq);
+ __enable_irq(desc, irq, false);
}
spin_unlock_irqrestore(&desc->lock, flags);
@@ -532,8 +676,19 @@ mismatch:
dump_stack();
}
#endif
+ ret = -EBUSY;
+
+out_thread:
spin_unlock_irqrestore(&desc->lock, flags);
- return -EBUSY;
+ if (new->thread) {
+ struct task_struct *t = new->thread;
+
+ new->thread = NULL;
+ if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
+ kthread_stop(t);
+ put_task_struct(t);
+ }
+ return ret;
}
/**
@@ -549,97 +704,138 @@ int setup_irq(unsigned int irq, struct irqaction *act)
return __setup_irq(irq, desc, act);
}
+EXPORT_SYMBOL_GPL(setup_irq);
-/**
- * free_irq - free an interrupt
- * @irq: Interrupt line to free
- * @dev_id: Device identity to free
- *
- * Remove an interrupt handler. The handler is removed and if the
- * interrupt line is no longer in use by any driver it is disabled.
- * On a shared IRQ the caller must ensure the interrupt is disabled
- * on the card it drives before calling this function. The function
- * does not return until any executing interrupts for this IRQ
- * have completed.
- *
- * This function must not be called from interrupt context.
+ /*
+ * Internal function to unregister an irqaction - used to free
+ * regular and special interrupts that are part of the architecture.
*/
-void free_irq(unsigned int irq, void *dev_id)
+static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
- struct irqaction **p;
+ struct irqaction *action, **action_ptr;
+ struct task_struct *irqthread;
unsigned long flags;
- WARN_ON(in_interrupt());
+ WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
if (!desc)
- return;
+ return NULL;
spin_lock_irqsave(&desc->lock, flags);
- p = &desc->action;
+
+ /*
+ * There can be multiple actions per IRQ descriptor, find the right
+ * one based on the dev_id:
+ */
+ action_ptr = &desc->action;
for (;;) {
- struct irqaction *action = *p;
+ action = *action_ptr;
+
+ if (!action) {
+ WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ spin_unlock_irqrestore(&desc->lock, flags);
- if (action) {
- struct irqaction **pp = p;
+ return NULL;
+ }
- p = &action->next;
- if (action->dev_id != dev_id)
- continue;
+ if (action->dev_id == dev_id)
+ break;
+ action_ptr = &action->next;
+ }
- /* Found it - now remove it from the list of entries */
- *pp = action->next;
+ /* Found it - now remove it from the list of entries: */
+ *action_ptr = action->next;
- /* Currently used only by UML, might disappear one day.*/
+ /* Currently used only by UML, might disappear one day: */
#ifdef CONFIG_IRQ_RELEASE_METHOD
- if (desc->chip->release)
- desc->chip->release(irq, dev_id);
+ if (desc->chip->release)
+ desc->chip->release(irq, dev_id);
#endif
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- if (desc->chip->shutdown)
- desc->chip->shutdown(irq);
- else
- desc->chip->disable(irq);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
- unregister_handler_proc(irq, action);
+ /* If this was the last handler, shut down the IRQ line: */
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ if (desc->chip->shutdown)
+ desc->chip->shutdown(irq);
+ else
+ desc->chip->disable(irq);
+ }
+
+ irqthread = action->thread;
+ action->thread = NULL;
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ unregister_handler_proc(irq, action);
+
+ /* Make sure it's not being used on another CPU: */
+ synchronize_irq(irq);
+
+ if (irqthread) {
+ if (!test_bit(IRQTF_DIED, &action->thread_flags))
+ kthread_stop(irqthread);
+ put_task_struct(irqthread);
+ }
- /* Make sure it's not being used on another CPU */
- synchronize_irq(irq);
-#ifdef CONFIG_DEBUG_SHIRQ
- /*
- * It's a shared IRQ -- the driver ought to be
- * prepared for it to happen even now it's
- * being freed, so let's make sure.... We do
- * this after actually deregistering it, to
- * make sure that a 'real' IRQ doesn't run in
- * parallel with our fake
- */
- if (action->flags & IRQF_SHARED) {
- local_irq_save(flags);
- action->handler(irq, dev_id);
- local_irq_restore(flags);
- }
-#endif
- kfree(action);
- return;
- }
- printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
#ifdef CONFIG_DEBUG_SHIRQ
- dump_stack();
-#endif
- spin_unlock_irqrestore(&desc->lock, flags);
- return;
+ /*
+ * It's a shared IRQ -- the driver ought to be prepared for an IRQ
+ * event to happen even now it's being freed, so let's make sure that
+ * is so by doing an extra call to the handler ....
+ *
+ * ( We do this after actually deregistering it, to make sure that a
+ * 'real' IRQ doesn't run in * parallel with our fake. )
+ */
+ if (action->flags & IRQF_SHARED) {
+ local_irq_save(flags);
+ action->handler(irq, dev_id);
+ local_irq_restore(flags);
}
+#endif
+ return action;
+}
+
+/**
+ * remove_irq - free an interrupt
+ * @irq: Interrupt line to free
+ * @act: irqaction for the interrupt
+ *
+ * Used to remove interrupts statically setup by the early boot process.
+ */
+void remove_irq(unsigned int irq, struct irqaction *act)
+{
+ __free_irq(irq, act->dev_id);
+}
+EXPORT_SYMBOL_GPL(remove_irq);
+
+/**
+ * free_irq - free an interrupt allocated with request_irq
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the
+ * interrupt line is no longer in use by any driver it is disabled.
+ * On a shared IRQ the caller must ensure the interrupt is disabled
+ * on the card it drives before calling this function. The function
+ * does not return until any executing interrupts for this IRQ
+ * have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+void free_irq(unsigned int irq, void *dev_id)
+{
+ kfree(__free_irq(irq, dev_id));
}
EXPORT_SYMBOL(free_irq);
/**
- * request_irq - allocate an interrupt line
+ * request_threaded_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
- * @handler: Function to be called when the IRQ occurs
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * @thread_fn: Function called from the irq handler thread
+ * If NULL, no irq thread is created
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
@@ -651,6 +847,15 @@ EXPORT_SYMBOL(free_irq);
* raises, you must take care both to initialise your hardware
* and to set up the interrupt handler in the right order.
*
+ * If you want to set up a threaded irq handler for your device
+ * then you need to supply @handler and @thread_fn. @handler ist
+ * still called in hard interrupt context and has to check
+ * whether the interrupt originates from the device. If yes it
+ * needs to disable the interrupt on the device and return
+ * IRQ_THREAD_WAKE which will wake up the handler thread and run
+ * @thread_fn. This split handler design is necessary to support
+ * shared interrupts.
+ *
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
@@ -666,8 +871,9 @@ EXPORT_SYMBOL(free_irq);
* IRQF_TRIGGER_* Specify active edge(s) or level
*
*/
-int request_irq(unsigned int irq, irq_handler_t handler,
- unsigned long irqflags, const char *devname, void *dev_id)
+int request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn, unsigned long irqflags,
+ const char *devname, void *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
@@ -679,11 +885,12 @@ int request_irq(unsigned int irq, irq_handler_t handler,
* the behavior is classified as "will not fix" so we need to
* start nudging drivers away from using that idiom.
*/
- if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
- == (IRQF_SHARED|IRQF_DISABLED))
- pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
- "guaranteed on shared IRQs\n",
- irq, devname);
+ if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
+ (IRQF_SHARED|IRQF_DISABLED)) {
+ pr_warning(
+ "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
+ irq, devname);
+ }
#ifdef CONFIG_LOCKDEP
/*
@@ -709,15 +916,14 @@ int request_irq(unsigned int irq, irq_handler_t handler,
if (!handler)
return -EINVAL;
- action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
+ action->thread_fn = thread_fn;
action->flags = irqflags;
- cpus_clear(action->mask);
action->name = devname;
- action->next = NULL;
action->dev_id = dev_id;
retval = __setup_irq(irq, desc, action);
@@ -745,4 +951,4 @@ int request_irq(unsigned int irq, irq_handler_t handler,
#endif
return retval;
}
-EXPORT_SYMBOL(request_irq);
+EXPORT_SYMBOL(request_threaded_irq);
OpenPOWER on IntegriCloud