summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-09-29 17:18:47 +0200
committerThomas Gleixner <tglx@linutronix.de>2010-10-12 16:53:44 +0200
commit78f90d91f395cd0dc1ef3f21e0c5cd6fd50d202c (patch)
treee9a6b0a5d8a0b99cd23dbc1c1f376a7e78a93428 /kernel
parentb7b29338dc7111ed8bd4d6555d84afae13ebe752 (diff)
downloadop-kernel-dev-78f90d91f395cd0dc1ef3f21e0c5cd6fd50d202c.zip
op-kernel-dev-78f90d91f395cd0dc1ef3f21e0c5cd6fd50d202c.tar.gz
genirq: Remove the now unused sparse irq leftovers
The move_irq_desc() function was only used due to the problem that the allocator did not free the old descriptors. So the descriptors had to be moved in create_irq_nr(). That's history. The code would have never been able to move active interrupt descriptors on affinity settings. That can be done in a completely different way w/o all this horror. Remove all of it. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/Kconfig5
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/internals.h102
-rw-r--r--kernel/irq/irqdesc.c30
-rw-r--r--kernel/irq/numa_migrate.c120
5 files changed, 2 insertions, 256 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index a42c019..31d766b 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -26,11 +26,6 @@ config GENERIC_IRQ_PROBE
config GENERIC_PENDING_IRQ
def_bool n
-if SPARSE_IRQ && NUMA
-config NUMA_IRQ_DESC
- def_bool n
-endif
-
config AUTO_IRQ_AFFINITY
def_bool n
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 1eaab0da..54329cd 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -3,5 +3,4 @@ obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devr
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
-obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o
obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index f444203..4571ae7 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,17 +18,11 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
-extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
-extern raw_spinlock_t sparse_irq_lock;
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
-#ifdef CONFIG_SPARSE_IRQ
-void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
-#endif
-
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
@@ -110,99 +104,3 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
#undef P
-/* Stuff below will be cleaned up after the sparse allocator is done */
-
-#ifdef CONFIG_SMP
-/**
- * alloc_desc_masks - allocate cpumasks for irq_desc
- * @desc: pointer to irq_desc struct
- * @node: node which will be handling the cpumasks
- * @boot: true if need bootmem
- *
- * Allocates affinity and pending_mask cpumask if required.
- * Returns true if successful (or not required).
- */
-static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
- bool boot)
-{
- gfp_t gfp = GFP_ATOMIC;
-
- if (boot)
- gfp = GFP_NOWAIT;
-
-#ifdef CONFIG_CPUMASK_OFFSTACK
- if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
- return false;
-
-#ifdef CONFIG_GENERIC_PENDING_IRQ
- if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
- free_cpumask_var(desc->irq_data.affinity);
- return false;
- }
-#endif
-#endif
- return true;
-}
-
-static inline void init_desc_masks(struct irq_desc *desc)
-{
- cpumask_setall(desc->irq_data.affinity);
-#ifdef CONFIG_GENERIC_PENDING_IRQ
- cpumask_clear(desc->pending_mask);
-#endif
-}
-
-/**
- * init_copy_desc_masks - copy cpumasks for irq_desc
- * @old_desc: pointer to old irq_desc struct
- * @new_desc: pointer to new irq_desc struct
- *
- * Insures affinity and pending_masks are copied to new irq_desc.
- * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
- * irq_desc struct so the copy is redundant.
- */
-
-static inline void init_copy_desc_masks(struct irq_desc *old_desc,
- struct irq_desc *new_desc)
-{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
-
-#ifdef CONFIG_GENERIC_PENDING_IRQ
- cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
-#endif
-#endif
-}
-
-static inline void free_desc_masks(struct irq_desc *old_desc,
- struct irq_desc *new_desc)
-{
- free_cpumask_var(old_desc->irq_data.affinity);
-
-#ifdef CONFIG_GENERIC_PENDING_IRQ
- free_cpumask_var(old_desc->pending_mask);
-#endif
-}
-
-#else /* !CONFIG_SMP */
-
-static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
- bool boot)
-{
- return true;
-}
-
-static inline void init_desc_masks(struct irq_desc *desc)
-{
-}
-
-static inline void init_copy_desc_masks(struct irq_desc *old_desc,
- struct irq_desc *new_desc)
-{
-}
-
-static inline void free_desc_masks(struct irq_desc *old_desc,
- struct irq_desc *new_desc)
-{
-}
-#endif /* CONFIG_SMP */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index c9d5a1c..4f0b9c9 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -20,7 +20,7 @@
/*
* lockdep: we want to handle all irq_desc locks as a single lock-class:
*/
-struct lock_class_key irq_desc_lock_class;
+static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
static void __init init_irq_default_affinity(void)
@@ -90,28 +90,11 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
-DEFINE_RAW_SPINLOCK(sparse_irq_lock);
+static DEFINE_RAW_SPINLOCK(sparse_irq_lock);
static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
#ifdef CONFIG_SPARSE_IRQ
-void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
-{
- void *ptr;
-
- ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
- GFP_ATOMIC, node);
-
- /*
- * don't overwite if can not get new one
- * init_copy_kstat_irqs() could still use old one
- */
- if (ptr) {
- printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
- desc->kstat_irqs = ptr;
- }
-}
-
static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
@@ -124,15 +107,6 @@ struct irq_desc *irq_to_desc(unsigned int irq)
return radix_tree_lookup(&irq_desc_tree, irq);
}
-void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
-{
- void **ptr;
-
- ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
- if (ptr)
- radix_tree_replace_slot(ptr, desc);
-}
-
static void delete_irq_desc(unsigned int irq)
{
radix_tree_delete(&irq_desc_tree, irq);
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
deleted file mode 100644
index e7f1f16..0000000
--- a/kernel/irq/numa_migrate.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * NUMA irq-desc migration code
- *
- * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
- * the new "home node" of the IRQ.
- */
-
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-
-#include "internals.h"
-
-static void init_copy_kstat_irqs(struct irq_desc *old_desc,
- struct irq_desc *desc,
- int node, int nr)
-{
- init_kstat_irqs(desc, node, nr);
-
- if (desc->kstat_irqs != old_desc->kstat_irqs)
- memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
- nr * sizeof(*desc->kstat_irqs));
-}
-
-static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
-{
- if (old_desc->kstat_irqs == desc->kstat_irqs)
- return;
-
- kfree(old_desc->kstat_irqs);
- old_desc->kstat_irqs = NULL;
-}
-
-static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
- struct irq_desc *desc, int node)
-{
- memcpy(desc, old_desc, sizeof(struct irq_desc));
- if (!alloc_desc_masks(desc, node, false)) {
- printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
- "for migration.\n", irq);
- return false;
- }
- raw_spin_lock_init(&desc->lock);
- desc->irq_data.node = node;
- lockdep_set_class(&desc->lock, &irq_desc_lock_class);
- init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
- init_copy_desc_masks(old_desc, desc);
- arch_init_copy_chip_data(old_desc, desc, node);
- return true;
-}
-
-static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
-{
- free_kstat_irqs(old_desc, desc);
- free_desc_masks(old_desc, desc);
- arch_free_chip_data(old_desc, desc);
-}
-
-static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
- int node)
-{
- struct irq_desc *desc;
- unsigned int irq;
- unsigned long flags;
-
- irq = old_desc->irq_data.irq;
-
- raw_spin_lock_irqsave(&sparse_irq_lock, flags);
-
- /* We have to check it to avoid races with another CPU */
- desc = irq_to_desc(irq);
-
- if (desc && old_desc != desc)
- goto out_unlock;
-
- desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
- if (!desc) {
- printk(KERN_ERR "irq %d: can not get new irq_desc "
- "for migration.\n", irq);
- /* still use old one */
- desc = old_desc;
- goto out_unlock;
- }
- if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) {
- /* still use old one */
- kfree(desc);
- desc = old_desc;
- goto out_unlock;
- }
-
- replace_irq_desc(irq, desc);
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
-
- /* free the old one */
- free_one_irq_desc(old_desc, desc);
- kfree(old_desc);
-
- return desc;
-
-out_unlock:
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
-
- return desc;
-}
-
-struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
-{
- /* those static or target node is -1, do not move them */
- if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1)
- return desc;
-
- if (desc->irq_data.node != node)
- desc = __real_move_irq_desc(desc, node);
-
- return desc;
-}
-
OpenPOWER on IntegriCloud