summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/io_apic.c2
-rw-r--r--include/linux/irq.h2
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/irq/migration.c8
4 files changed, 6 insertions, 10 deletions
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 4a74b69..afe54f2 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -581,7 +581,7 @@ static int balanced_irq(void *unused)
/* push everything to CPU 0 to give us a starting point. */
for (i = 0 ; i < NR_IRQS ; i++) {
- pending_irq_cpumask[i] = cpumask_of_cpu(0);
+ irq_desc[i].pending_mask = cpumask_of_cpu(0);
set_pending_irq(i, cpumask_of_cpu(0));
}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 1022c5d..81f3d97 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -83,6 +83,7 @@ struct irq_desc {
cpumask_t affinity;
#endif
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
+ cpumask_t pending_mask;
unsigned int move_irq; /* need to re-target IRQ dest */
#endif
#ifdef CONFIG_PROC_FS
@@ -120,7 +121,6 @@ static inline void set_native_irq_info(int irq, cpumask_t mask)
#ifdef CONFIG_SMP
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
-extern cpumask_t pending_irq_cpumask[NR_IRQS];
void set_pending_irq(unsigned int irq, cpumask_t mask);
void move_native_irq(int irq);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6a6f1d3..ca9b5d3 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,10 +16,6 @@
#ifdef CONFIG_SMP
-#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
-cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
-#endif
-
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index a571c3a..a57ebe9 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -8,7 +8,7 @@ void set_pending_irq(unsigned int irq, cpumask_t mask)
spin_lock_irqsave(&desc->lock, flags);
desc->move_irq = 1;
- pending_irq_cpumask[irq] = mask;
+ irq_desc[irq].pending_mask = mask;
spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -30,7 +30,7 @@ void move_native_irq(int irq)
desc->move_irq = 0;
- if (unlikely(cpus_empty(pending_irq_cpumask[irq])))
+ if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
return;
if (!desc->chip->set_affinity)
@@ -38,7 +38,7 @@ void move_native_irq(int irq)
assert_spin_locked(&desc->lock);
- cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
+ cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
/*
* If there was a valid mask to work with, please
@@ -58,5 +58,5 @@ void move_native_irq(int irq)
if (likely(!(desc->status & IRQ_DISABLED)))
desc->chip->enable(irq);
}
- cpus_clear(pending_irq_cpumask[irq]);
+ cpus_clear(irq_desc[irq].pending_mask);
}
OpenPOWER on IntegriCloud