summaryrefslogtreecommitdiffstats
path: root/kernel/irq/handle.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-05-22 10:40:09 +0900
committerIngo Molnar <mingo@elte.hu>2009-05-23 14:55:24 +0200
commit948cd52906baf1f92aeea2f9b5c515db1b2e592a (patch)
treea03482aebb7f8ed0b1706db8d6cd105f79ea53cc /kernel/irq/handle.c
parent4c6f18fc81565967da20f2d4a3922cdba33f8e2b (diff)
downloadop-kernel-dev-948cd52906baf1f92aeea2f9b5c515db1b2e592a.zip
op-kernel-dev-948cd52906baf1f92aeea2f9b5c515db1b2e592a.tar.gz
sparseirq: Allow early irq_desc allocation
Presently non-legacy IRQs have their irq_desc allocated with kzalloc_node(). This assumes that all callers of irq_to_desc_node_alloc() will be sufficiently late in the boot process that kmalloc is available. While porting sparseirq support to sh this blew up immediately, as at the time that we register the CPU's interrupt vector map only bootmem is available. Check slab_is_available() to work out which path to use. [ Impact: fix SH early boot crash with sparseirq enabled ] Signed-off-by: Paul Mundt <lethal@linux-sh.org> Acked-by: Yinghai Lu <yinghai@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Mel Gorman <mel@csn.ul.ie> LKML-Reference: <20090522014008.GA2806@linux-sh.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r--kernel/irq/handle.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a3c671e..18041a2 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,6 +11,7 @@
*/
#include <linux/irq.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
@@ -81,11 +82,16 @@ static struct irq_desc irq_desc_init = {
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
};
-void init_kstat_irqs(struct irq_desc *desc, int node, int nr)
+void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
{
void *ptr;
- ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
+ if (slab_is_available())
+ ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
+ GFP_ATOMIC, node);
+ else
+ ptr = alloc_bootmem_node(NODE_DATA(node),
+ nr * sizeof(*desc->kstat_irqs));
/*
* don't overwite if can not get new one
@@ -186,7 +192,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
return NULL;
}
-struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
+struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
{
struct irq_desc *desc;
unsigned long flags;
@@ -208,7 +214,11 @@ struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
if (desc)
goto out_unlock;
- desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+ if (slab_is_available())
+ desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+ else
+ desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
+
printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
if (!desc) {
printk(KERN_ERR "can not alloc irq_desc\n");
OpenPOWER on IntegriCloud