summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2008-04-28 16:21:22 +1000
committerPaul Mackerras <paulus@samba.org>2008-04-29 15:57:34 +1000
commit85218827cc4ca900867807f19345418164ffc108 (patch)
tree1813b7fadb7c077acd0ef62f57385b7424ca0121 /arch/powerpc
parentdd18434ff0b7d9b9ad3d596985fc84b329d2f9a8 (diff)
downloadop-kernel-dev-85218827cc4ca900867807f19345418164ffc108.zip
op-kernel-dev-85218827cc4ca900867807f19345418164ffc108.tar.gz
[POWERPC] Add IRQSTACKS support on ppc32
This makes it possible to use separate stacks for hard and soft IRQs on 32-bit powerpc as well as on 64-bit. The code for 32-bit is just the 32-bit analog of the 64-bit code. * Added allocation and initialization of the irq stacks. We limit the stacks to be in lowmem for ppc32. * Implemented ppc32 versions of call_do_softirq() and call_handle_irq() to switch the stack pointers * Reworked how we do stack overflow detection. We now keep around the limit of the stack in the thread_struct and compare against the limit to see if we've overflowed. We can now use this on ppc64 if desired. [ paulus@samba.org: Fixed bug on 6xx where we need to reload r9 with the thread_info pointer. ] Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig.debug1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/entry_32.S5
-rw-r--r--arch/powerpc/kernel/irq.c10
-rw-r--r--arch/powerpc/kernel/misc_32.S25
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c21
7 files changed, 60 insertions, 5 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 807a2dc..a7d24e6 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -118,7 +118,6 @@ config XMON_DISASSEMBLY
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
- depends on PPC64
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6213484..af1d2c8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -67,6 +67,7 @@ int main(void)
#endif /* CONFIG_PPC64 */
DEFINE(KSP, offsetof(struct thread_struct, ksp));
+ DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 84c8686..0c8614d 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -137,11 +137,12 @@ transfer_to_handler:
2: /* if from kernel, check interrupted DOZE/NAP mode and
* check for stack overflow
*/
- lwz r9,THREAD_INFO-THREAD(r12)
- cmplw r1,r9 /* if r1 <= current->thread_info */
+ lwz r9,KSP_LIMIT(r12)
+ cmplw r1,r9 /* if r1 <= ksp_limit */
ble- stack_ovf /* then the kernel stack overflowed */
5:
#ifdef CONFIG_6xx
+ rlwinm r9,r1,0,0,31-THREAD_SHIFT
tophys(r9,r9) /* check local flags */
lwz r12,TI_LOCAL_FLAGS(r9)
mtcrf 0x01,r12
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 425616f..2f73f70 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -307,6 +307,7 @@ void do_IRQ(struct pt_regs *regs)
if (curtp != irqtp) {
struct irq_desc *desc = irq_desc + irq;
void *handler = desc->handle_irq;
+ unsigned long saved_sp_limit = current->thread.ksp_limit;
if (handler == NULL)
handler = &__do_IRQ;
irqtp->task = curtp->task;
@@ -319,7 +320,10 @@ void do_IRQ(struct pt_regs *regs)
(irqtp->preempt_count & ~SOFTIRQ_MASK) |
(curtp->preempt_count & SOFTIRQ_MASK);
+ current->thread.ksp_limit = (unsigned long)irqtp +
+ _ALIGN_UP(sizeof(struct thread_info), 16);
call_handle_irq(irq, desc, irqtp, handler);
+ current->thread.ksp_limit = saved_sp_limit;
irqtp->task = NULL;
@@ -352,9 +356,7 @@ void __init init_IRQ(void)
{
if (ppc_md.init_IRQ)
ppc_md.init_IRQ();
-#ifdef CONFIG_PPC64
irq_ctx_init();
-#endif
}
@@ -383,11 +385,15 @@ void irq_ctx_init(void)
static inline void do_softirq_onstack(void)
{
struct thread_info *curtp, *irqtp;
+ unsigned long saved_sp_limit = current->thread.ksp_limit;
curtp = current_thread_info();
irqtp = softirq_ctx[smp_processor_id()];
irqtp->task = curtp->task;
+ current->thread.ksp_limit = (unsigned long)irqtp +
+ _ALIGN_UP(sizeof(struct thread_info), 16);
call_do_softirq(irqtp);
+ current->thread.ksp_limit = saved_sp_limit;
irqtp->task = NULL;
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 92ccc6f..89aaaa6 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -32,6 +32,31 @@
.text
+#ifdef CONFIG_IRQSTACKS
+_GLOBAL(call_do_softirq)
+ mflr r0
+ stw r0,4(r1)
+ stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
+ mr r1,r3
+ bl __do_softirq
+ lwz r1,0(r1)
+ lwz r0,4(r1)
+ mtlr r0
+ blr
+
+_GLOBAL(call_handle_irq)
+ mflr r0
+ stw r0,4(r1)
+ mtctr r6
+ stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
+ mr r1,r5
+ bctrl
+ lwz r1,0(r1)
+ lwz r0,4(r1)
+ mtlr r0
+ blr
+#endif /* CONFIG_IRQSTACKS */
+
/*
* This returns the high 64 bits of the product of two 64-bit numbers.
*/
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6caad17..7de41c3 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -589,6 +589,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp;
+ p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
+ _ALIGN_UP(sizeof(struct thread_info), 16);
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SLB)) {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 36f6779..5112a4a 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -16,6 +16,7 @@
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/console.h>
+#include <linux/lmb.h>
#include <asm/io.h>
#include <asm/prom.h>
@@ -229,6 +230,24 @@ int __init ppc_init(void)
arch_initcall(ppc_init);
+#ifdef CONFIG_IRQSTACKS
+static void __init irqstack_early_init(void)
+{
+ unsigned int i;
+
+ /* interrupt stacks must be in lowmem, we get that for free on ppc32
+ * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
+ for_each_possible_cpu(i) {
+ softirq_ctx[i] = (struct thread_info *)
+ __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ hardirq_ctx[i] = (struct thread_info *)
+ __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ }
+}
+#else
+#define irqstack_early_init()
+#endif
+
/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
@@ -286,6 +305,8 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
+ irqstack_early_init();
+
/* set up the bootmem stuff with available memory */
do_init_bootmem();
if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
OpenPOWER on IntegriCloud