summaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/irqdomain.c106
-rw-r--r--kernel/irq/manage.c119
-rw-r--r--kernel/irq/migration.c13
5 files changed, 161 insertions, 88 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index fc275e4..eebd6d5 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq)
kstat_incr_irqs_this_cpu(irq, desc);
action = desc->action;
- if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
+ if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
+ desc->istate |= IRQS_PENDING;
goto out_unlock;
+ }
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock_irq(&desc->lock);
@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
- if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
+ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+ desc->istate |= IRQS_PENDING;
goto out_unlock;
+ }
handle_irq_event(desc);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 8e5c56b..001fa5b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc);
+extern int irq_do_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force);
+
/* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(struct irq_desc *desc)
{
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 0e0ba5f..41c1564 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "irq: " fmt
+
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
@@ -56,14 +58,73 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
return domain;
}
+static void irq_domain_free(struct irq_domain *domain)
+{
+ of_node_put(domain->of_node);
+ kfree(domain);
+}
+
static void irq_domain_add(struct irq_domain *domain)
{
mutex_lock(&irq_domain_mutex);
list_add(&domain->link, &irq_domain_list);
mutex_unlock(&irq_domain_mutex);
- pr_debug("irq: Allocated domain of type %d @0x%p\n",
+ pr_debug("Allocated domain of type %d @0x%p\n",
+ domain->revmap_type, domain);
+}
+
+/**
+ * irq_domain_remove() - Remove an irq domain.
+ * @domain: domain to remove
+ *
+ * This routine is used to remove an irq domain. The caller must ensure
+ * that all mappings within the domain have been disposed of prior to
+ * use, depending on the revmap type.
+ */
+void irq_domain_remove(struct irq_domain *domain)
+{
+ mutex_lock(&irq_domain_mutex);
+
+ switch (domain->revmap_type) {
+ case IRQ_DOMAIN_MAP_LEGACY:
+ /*
+ * Legacy domains don't manage their own irq_desc
+ * allocations, we expect the caller to handle irq_desc
+ * freeing on their own.
+ */
+ break;
+ case IRQ_DOMAIN_MAP_TREE:
+ /*
+ * radix_tree_delete() takes care of destroying the root
+ * node when all entries are removed. Shout if there are
+ * any mappings left.
+ */
+ WARN_ON(domain->revmap_data.tree.height);
+ break;
+ case IRQ_DOMAIN_MAP_LINEAR:
+ kfree(domain->revmap_data.linear.revmap);
+ domain->revmap_data.linear.size = 0;
+ break;
+ case IRQ_DOMAIN_MAP_NOMAP:
+ break;
+ }
+
+ list_del(&domain->link);
+
+ /*
+ * If the going away domain is the default one, reset it.
+ */
+ if (unlikely(irq_default_domain == domain))
+ irq_set_default_host(NULL);
+
+ mutex_unlock(&irq_domain_mutex);
+
+ pr_debug("Removed domain of type %d @0x%p\n",
domain->revmap_type, domain);
+
+ irq_domain_free(domain);
}
+EXPORT_SYMBOL_GPL(irq_domain_remove);
static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq)
@@ -117,8 +178,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
if (WARN_ON(!irq_data || irq_data->domain)) {
mutex_unlock(&irq_domain_mutex);
- of_node_put(domain->of_node);
- kfree(domain);
+ irq_domain_free(domain);
return NULL;
}
}
@@ -152,10 +212,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_domain_add(domain);
return domain;
}
+EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
/**
* irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
+ * @size: Number of interrupts in the domain.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*/
@@ -181,6 +243,7 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
irq_domain_add(domain);
return domain;
}
+EXPORT_SYMBOL_GPL(irq_domain_add_linear);
struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
@@ -195,6 +258,7 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
}
return domain;
}
+EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
/**
* irq_domain_add_tree()
@@ -216,6 +280,7 @@ struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
}
return domain;
}
+EXPORT_SYMBOL_GPL(irq_domain_add_tree);
/**
* irq_find_host() - Locates a domain for a given device node
@@ -259,10 +324,11 @@ EXPORT_SYMBOL_GPL(irq_find_host);
*/
void irq_set_default_host(struct irq_domain *domain)
{
- pr_debug("irq: Default domain set to @0x%p\n", domain);
+ pr_debug("Default domain set to @0x%p\n", domain);
irq_default_domain = domain;
}
+EXPORT_SYMBOL_GPL(irq_set_default_host);
static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
@@ -272,7 +338,7 @@ static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
irq_data->hwirq = hwirq;
irq_data->domain = domain;
if (domain->ops->map(domain, virq, hwirq)) {
- pr_debug("irq: -> mapping failed, freeing\n");
+ pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq);
irq_data->domain = NULL;
irq_data->hwirq = 0;
return -1;
@@ -303,7 +369,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
virq = irq_alloc_desc_from(1, 0);
if (!virq) {
- pr_debug("irq: create_direct virq allocation failed\n");
+ pr_debug("create_direct virq allocation failed\n");
return 0;
}
if (virq >= domain->revmap_data.nomap.max_irq) {
@@ -312,7 +378,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
irq_free_desc(virq);
return 0;
}
- pr_debug("irq: create_direct obtained virq %d\n", virq);
+ pr_debug("create_direct obtained virq %d\n", virq);
if (irq_setup_virq(domain, virq, virq)) {
irq_free_desc(virq);
@@ -321,6 +387,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
return virq;
}
+EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
/**
* irq_create_mapping() - Map a hardware interrupt into linux irq space
@@ -338,23 +405,23 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
unsigned int hint;
int virq;
- pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+ pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
/* Look for default domain if nececssary */
if (domain == NULL)
domain = irq_default_domain;
if (domain == NULL) {
- printk(KERN_WARNING "irq_create_mapping called for"
- " NULL domain, hwirq=%lx\n", hwirq);
+ pr_warning("irq_create_mapping called for"
+ " NULL domain, hwirq=%lx\n", hwirq);
WARN_ON(1);
return 0;
}
- pr_debug("irq: -> using domain @%p\n", domain);
+ pr_debug("-> using domain @%p\n", domain);
/* Check if mapping already exists */
virq = irq_find_mapping(domain, hwirq);
if (virq) {
- pr_debug("irq: -> existing mapping on virq %d\n", virq);
+ pr_debug("-> existing mapping on virq %d\n", virq);
return virq;
}
@@ -370,7 +437,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
if (virq <= 0)
virq = irq_alloc_desc_from(1, 0);
if (virq <= 0) {
- pr_debug("irq: -> virq allocation failed\n");
+ pr_debug("-> virq allocation failed\n");
return 0;
}
@@ -380,7 +447,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
return 0;
}
- pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
+ pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
return virq;
@@ -409,8 +476,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
if (intsize > 0)
return intspec[0];
#endif
- printk(KERN_WARNING "irq: no irq domain found for %s !\n",
- controller->full_name);
+ pr_warning("no irq domain found for %s !\n",
+ controller->full_name);
return 0;
}
@@ -560,6 +627,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
*/
return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
}
+EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup);
/**
* irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
@@ -584,6 +652,7 @@ void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
mutex_unlock(&revmap_trees_mutex);
}
}
+EXPORT_SYMBOL_GPL(irq_radix_revmap_insert);
/**
* irq_linear_revmap() - Find a linux irq from a hw irq number.
@@ -617,6 +686,7 @@ unsigned int irq_linear_revmap(struct irq_domain *domain,
return revmap[hwirq];
}
+EXPORT_SYMBOL_GPL(irq_linear_revmap);
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
@@ -691,8 +761,8 @@ static int __init irq_debugfs_init(void)
__initcall(irq_debugfs_init);
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
-int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
+static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
{
return 0;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index bb32326..8c54823 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -7,6 +7,8 @@
* This file contains driver APIs to the irq subsystem.
*/
+#define pr_fmt(fmt) "genirq: " fmt
+
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/module.h>
@@ -14,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/task_work.h>
#include "internals.h"
@@ -139,6 +142,25 @@ static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_desc *desc = irq_data_to_desc(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ int ret;
+
+ ret = chip->irq_set_affinity(data, mask, false);
+ switch (ret) {
+ case IRQ_SET_MASK_OK:
+ cpumask_copy(data->affinity, mask);
+ case IRQ_SET_MASK_OK_NOCOPY:
+ irq_set_thread_affinity(desc);
+ ret = 0;
+ }
+
+ return ret;
+}
+
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -149,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
return -EINVAL;
if (irq_can_move_pcntxt(data)) {
- ret = chip->irq_set_affinity(data, mask, false);
- switch (ret) {
- case IRQ_SET_MASK_OK:
- cpumask_copy(data->affinity, mask);
- case IRQ_SET_MASK_OK_NOCOPY:
- irq_set_thread_affinity(desc);
- ret = 0;
- }
+ ret = irq_do_set_affinity(data, mask, false);
} else {
irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
@@ -280,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
- struct irq_chip *chip = irq_desc_get_chip(desc);
struct cpumask *set = irq_default_affinity;
- int ret, node = desc->irq_data.node;
+ int node = desc->irq_data.node;
/* Excludes PER_CPU and NO_BALANCE interrupts */
if (!irq_can_set_affinity(irq))
@@ -308,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
if (cpumask_intersects(mask, nodemask))
cpumask_and(mask, mask, nodemask);
}
- ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
- switch (ret) {
- case IRQ_SET_MASK_OK:
- cpumask_copy(desc->irq_data.affinity, mask);
- case IRQ_SET_MASK_OK_NOCOPY:
- irq_set_thread_affinity(desc);
- }
+ irq_do_set_affinity(&desc->irq_data, mask, false);
return 0;
}
#else
@@ -565,7 +573,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
* IRQF_TRIGGER_* but the PIC does not support multiple
* flow-types?
*/
- pr_debug("genirq: No set_type function for IRQ %d (%s)\n", irq,
+ pr_debug("No set_type function for IRQ %d (%s)\n", irq,
chip ? (chip->name ? : "unknown") : "unknown");
return 0;
}
@@ -600,7 +608,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
ret = 0;
break;
default:
- pr_err("genirq: Setting trigger mode %lu for irq %u failed (%pF)\n",
+ pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
flags, irq, chip->irq_set_type);
}
if (unmask)
@@ -773,11 +781,39 @@ static void wake_threads_waitq(struct irq_desc *desc)
wake_up(&desc->wait_for_threads);
}
+static void irq_thread_dtor(struct task_work *unused)
+{
+ struct task_struct *tsk = current;
+ struct irq_desc *desc;
+ struct irqaction *action;
+
+ if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
+ return;
+
+ action = kthread_data(tsk);
+
+ pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+ tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
+
+
+ desc = irq_to_desc(action->irq);
+ /*
+ * If IRQTF_RUNTHREAD is set, we need to decrement
+ * desc->threads_active and wake possible waiters.
+ */
+ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ wake_threads_waitq(desc);
+
+ /* Prevent a stale desc->threads_oneshot */
+ irq_finalize_oneshot(desc, action);
+}
+
/*
* Interrupt handler thread
*/
static int irq_thread(void *data)
{
+ struct task_work on_exit_work;
static const struct sched_param param = {
.sched_priority = MAX_USER_RT_PRIO/2,
};
@@ -793,7 +829,9 @@ static int irq_thread(void *data)
handler_fn = irq_thread_fn;
sched_setscheduler(current, SCHED_FIFO, &param);
- current->irq_thread = 1;
+
+ init_task_work(&on_exit_work, irq_thread_dtor, NULL);
+ task_work_add(current, &on_exit_work, false);
while (!irq_wait_for_interrupt(action)) {
irqreturn_t action_ret;
@@ -815,44 +853,11 @@ static int irq_thread(void *data)
* cannot touch the oneshot mask at this point anymore as
* __setup_irq() might have given out currents thread_mask
* again.
- *
- * Clear irq_thread. Otherwise exit_irq_thread() would make
- * fuzz about an active irq thread going into nirvana.
*/
- current->irq_thread = 0;
+ task_work_cancel(current, irq_thread_dtor);
return 0;
}
-/*
- * Called from do_exit()
- */
-void exit_irq_thread(void)
-{
- struct task_struct *tsk = current;
- struct irq_desc *desc;
- struct irqaction *action;
-
- if (!tsk->irq_thread)
- return;
-
- action = kthread_data(tsk);
-
- pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
- tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
-
- desc = irq_to_desc(action->irq);
-
- /*
- * If IRQTF_RUNTHREAD is set, we need to decrement
- * desc->threads_active and wake possible waiters.
- */
- if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
- wake_threads_waitq(desc);
-
- /* Prevent a stale desc->threads_oneshot */
- irq_finalize_oneshot(desc, action);
-}
-
static void irq_setup_forced_threading(struct irqaction *new)
{
if (!force_irqthreads)
@@ -1044,7 +1049,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* has. The type flags are unreliable as the
* underlying chip implementation can override them.
*/
- pr_err("genirq: Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
+ pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
irq);
ret = -EINVAL;
goto out_mask;
@@ -1095,7 +1100,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (nmsk != omsk)
/* hope the handler works with current trigger mode */
- pr_warning("genirq: irq %d uses trigger mode %u; requested %u\n",
+ pr_warning("irq %d uses trigger mode %u; requested %u\n",
irq, nmsk, omsk);
}
@@ -1133,7 +1138,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
mismatch:
if (!(new->flags & IRQF_PROBE_SHARED)) {
- pr_err("genirq: Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
+ pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
irq, new->flags, new->name, old->flags, old->name);
#ifdef CONFIG_DEBUG_SHIRQ
dump_stack();
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index c3c8975..ca3f4aa 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata)
* For correct operation this depends on the caller
* masking the irqs.
*/
- if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
- < nr_cpu_ids)) {
- int ret = chip->irq_set_affinity(&desc->irq_data,
- desc->pending_mask, false);
- switch (ret) {
- case IRQ_SET_MASK_OK:
- cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
- case IRQ_SET_MASK_OK_NOCOPY:
- irq_set_thread_affinity(desc);
- }
- }
+ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+ irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
cpumask_clear(desc->pending_mask);
}
OpenPOWER on IntegriCloud