diff options
Diffstat (limited to 'arch/arm/kernel/irq.c')
-rw-r--r-- | arch/arm/kernel/irq.c | 124 |
1 files changed, 44 insertions, 80 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 28536e3..83bbad0 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -51,63 +51,18 @@ unsigned long irq_err_count; -int show_interrupts(struct seq_file *p, void *v) +int arch_show_interrupts(struct seq_file *p, int prec) { - int i = *(loff_t *) v, cpu; - struct irq_desc *desc; - struct irqaction * action; - unsigned long flags; - int prec, n; - - for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++) - n *= 10; - -#ifdef CONFIG_SMP - if (prec < 4) - prec = 4; -#endif - - if (i == 0) { - char cpuname[12]; - - seq_printf(p, "%*s ", prec, ""); - for_each_present_cpu(cpu) { - sprintf(cpuname, "CPU%d", cpu); - seq_printf(p, " %10s", cpuname); - } - seq_putc(p, '\n'); - } - - if (i < nr_irqs) { - desc = irq_to_desc(i); - raw_spin_lock_irqsave(&desc->lock, flags); - action = desc->action; - if (!action) - goto unlock; - - seq_printf(p, "%*d: ", prec, i); - for_each_present_cpu(cpu) - seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); - seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-"); - seq_printf(p, " %s", action->name); - for (action = action->next; action; action = action->next) - seq_printf(p, ", %s", action->name); - - seq_putc(p, '\n'); -unlock: - raw_spin_unlock_irqrestore(&desc->lock, flags); - } else if (i == nr_irqs) { #ifdef CONFIG_FIQ - show_fiq_list(p, prec); + show_fiq_list(p, prec); #endif #ifdef CONFIG_SMP - show_ipi_list(p, prec); + show_ipi_list(p, prec); #endif #ifdef CONFIG_LOCAL_TIMERS - show_local_irqs(p, prec); + show_local_irqs(p, prec); #endif - seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); - } + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); return 0; } @@ -144,24 +99,21 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs) void set_irq_flags(unsigned int irq, unsigned int iflags) { - struct irq_desc *desc; - unsigned long flags; + unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (irq >= nr_irqs) { printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); return; } - desc = irq_to_desc(irq); - raw_spin_lock_irqsave(&desc->lock, flags); - desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (iflags & IRQF_VALID) - desc->status &= ~IRQ_NOREQUEST; + clr |= IRQ_NOREQUEST; if (iflags & IRQF_PROBE) - desc->status &= ~IRQ_NOPROBE; + clr |= IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) - desc->status &= ~IRQ_NOAUTOEN; - raw_spin_unlock_irqrestore(&desc->lock, flags); + clr |= IRQ_NOAUTOEN; + /* Order is clear bits in "clr" then set bits in "set" */ + irq_modify_status(irq, clr, set & ~clr); } void __init init_IRQ(void) @@ -179,14 +131,21 @@ int __init arch_probe_nr_irqs(void) #ifdef CONFIG_HOTPLUG_CPU -static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) +static bool migrate_one_irq(struct irq_data *d) { - pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu); + unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); + bool ret = false; + + if (cpu >= nr_cpu_ids) { + cpu = cpumask_any(cpu_online_mask); + ret = true; + } - raw_spin_lock_irq(&desc->lock); - desc->irq_data.chip->irq_set_affinity(&desc->irq_data, - cpumask_of(cpu), false); - raw_spin_unlock_irq(&desc->lock); + pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); + + d->chip->irq_set_affinity(d, cpumask_of(cpu), true); + + return ret; } /* @@ -198,25 +157,30 @@ void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); struct irq_desc *desc; + unsigned long flags; + + local_irq_save(flags); for_each_irq_desc(i, desc) { struct irq_data *d = &desc->irq_data; + bool affinity_broken = false; + + raw_spin_lock(&desc->lock); + do { + if (desc->action == NULL) + break; + + if (d->node != cpu) + break; - if (d->node == cpu) { - unsigned int newcpu = cpumask_any_and(d->affinity, - cpu_online_mask); - if (newcpu >= nr_cpu_ids) { - if (printk_ratelimit()) - printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", - i, cpu); - - cpumask_setall(d->affinity); - newcpu = cpumask_any_and(d->affinity, - cpu_online_mask); - } - - route_irq(desc, i, newcpu); - } + affinity_broken = migrate_one_irq(d); + } while (0); + raw_spin_unlock(&desc->lock); + + if (affinity_broken && printk_ratelimit()) + pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); } + + local_irq_restore(flags); } #endif /* CONFIG_HOTPLUG_CPU */ |