summaryrefslogtreecommitdiffstats
path: root/arch/sh/lib
diff options
context:
space:
mode:
authorHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2009-10-06 17:36:55 +0200
committerHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2009-10-06 17:36:55 +0200
commitd94e5fcbf1420366dcb4102bafe04dbcfc0d0d4b (patch)
treea9b7de7df6da5c3132cc68169b9c47ba288ccd42 /arch/sh/lib
parentd55651168a20078a94597a297d5cdfd807bf07b6 (diff)
parent374576a8b6f865022c0fd1ca62396889b23d66dd (diff)
downloadop-kernel-dev-d94e5fcbf1420366dcb4102bafe04dbcfc0d0d4b.zip
op-kernel-dev-d94e5fcbf1420366dcb4102bafe04dbcfc0d0d4b.tar.gz
Merge commit 'v2.6.32-rc3'
Diffstat (limited to 'arch/sh/lib')
-rw-r--r--arch/sh/lib/Makefile4
-rw-r--r--arch/sh/lib/__clear_user.S (renamed from arch/sh/lib/clear_page.S)48
-rw-r--r--arch/sh/lib/copy_page.S11
-rw-r--r--arch/sh/lib/delay.c5
-rw-r--r--arch/sh/lib/mcount.S228
5 files changed, 233 insertions, 63 deletions
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index aaea580..a969b47 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -23,8 +23,8 @@ obj-y += io.o
memcpy-y := memcpy.o
memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o
-lib-$(CONFIG_MMU) += copy_page.o clear_page.o
-lib-$(CONFIG_FUNCTION_TRACER) += mcount.o
+lib-$(CONFIG_MMU) += copy_page.o __clear_user.o
+lib-$(CONFIG_MCOUNT) += mcount.o
lib-y += $(memcpy-y) $(udivsi3-y)
EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/__clear_user.S
index 8342bfb..db1dca7 100644
--- a/arch/sh/lib/clear_page.S
+++ b/arch/sh/lib/__clear_user.S
@@ -8,56 +8,10 @@
#include <linux/linkage.h>
#include <asm/page.h>
-/*
- * clear_page
- * @to: P1 address
- *
- * void clear_page(void *to)
- */
-
-/*
- * r0 --- scratch
- * r4 --- to
- * r5 --- to + PAGE_SIZE
- */
-ENTRY(clear_page)
- mov r4,r5
- mov.l .Llimit,r0
- add r0,r5
- mov #0,r0
- !
-1:
-#if defined(CONFIG_CPU_SH4)
- movca.l r0,@r4
- mov r4,r1
-#else
- mov.l r0,@r4
-#endif
- add #32,r4
- mov.l r0,@-r4
- mov.l r0,@-r4
- mov.l r0,@-r4
- mov.l r0,@-r4
- mov.l r0,@-r4
- mov.l r0,@-r4
- mov.l r0,@-r4
-#if defined(CONFIG_CPU_SH4)
- ocbwb @r1
-#endif
- cmp/eq r5,r4
- bf/s 1b
- add #28,r4
- !
- rts
- nop
-
- .balign 4
-.Llimit: .long (PAGE_SIZE-28)
-
ENTRY(__clear_user)
!
mov #0, r0
- mov #0xe0, r1 ! 0xffffffe0
+ mov #0xffffffe0, r1
!
! r4..(r4+31)&~32 -------- not aligned [ Area 0 ]
! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ]
diff --git a/arch/sh/lib/copy_page.S b/arch/sh/lib/copy_page.S
index 43de7e8..9d7b8bc 100644
--- a/arch/sh/lib/copy_page.S
+++ b/arch/sh/lib/copy_page.S
@@ -30,7 +30,9 @@ ENTRY(copy_page)
mov r4,r10
mov r5,r11
mov r5,r8
- mov.l .Lpsz,r0
+ mov #(PAGE_SIZE >> 10), r0
+ shll8 r0
+ shll2 r0
add r0,r8
!
1: mov.l @r11+,r0
@@ -43,7 +45,6 @@ ENTRY(copy_page)
mov.l @r11+,r7
#if defined(CONFIG_CPU_SH4)
movca.l r0,@r10
- mov r10,r0
#else
mov.l r0,@r10
#endif
@@ -55,9 +56,6 @@ ENTRY(copy_page)
mov.l r3,@-r10
mov.l r2,@-r10
mov.l r1,@-r10
-#if defined(CONFIG_CPU_SH4)
- ocbwb @r0
-#endif
cmp/eq r11,r8
bf/s 1b
add #28,r10
@@ -68,9 +66,6 @@ ENTRY(copy_page)
rts
nop
- .balign 4
-.Lpsz: .long PAGE_SIZE
-
/*
* __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
* Return the number of bytes NOT copied
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index f3ddd21..faa8f86 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -21,13 +21,14 @@ void __delay(unsigned long loops)
inline void __const_udelay(unsigned long xloops)
{
+ xloops *= 4;
__asm__("dmulu.l %0, %2\n\t"
"sts mach, %0"
: "=r" (xloops)
: "0" (xloops),
- "r" (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy)
+ "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))
: "macl", "mach");
- __delay(xloops);
+ __delay(++xloops);
}
void __udelay(unsigned long usecs)
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
index 110fbfe..84a5776 100644
--- a/arch/sh/lib/mcount.S
+++ b/arch/sh/lib/mcount.S
@@ -1,14 +1,16 @@
/*
* arch/sh/lib/mcount.S
*
- * Copyright (C) 2008 Paul Mundt
- * Copyright (C) 2008 Matt Fleming
+ * Copyright (C) 2008, 2009 Paul Mundt
+ * Copyright (C) 2008, 2009 Matt Fleming
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <asm/ftrace.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
#define MCOUNT_ENTER() \
mov.l r4, @-r15; \
@@ -28,6 +30,55 @@
rts; \
mov.l @r15+, r4
+#ifdef CONFIG_STACK_DEBUG
+/*
+ * Perform diagnostic checks on the state of the kernel stack.
+ *
+ * Check for stack overflow. If there is less than 1KB free
+ * then it has overflowed.
+ *
+ * Make sure the stack pointer contains a valid address. Valid
+ * addresses for kernel stacks are anywhere after the bss
+ * (after _ebss) and anywhere in init_thread_union (init_stack).
+ */
+#define STACK_CHECK() \
+ mov #(THREAD_SIZE >> 10), r0; \
+ shll8 r0; \
+ shll2 r0; \
+ \
+ /* r1 = sp & (THREAD_SIZE - 1) */ \
+ mov #-1, r1; \
+ add r0, r1; \
+ and r15, r1; \
+ \
+ mov #TI_SIZE, r3; \
+ mov #(STACK_WARN >> 8), r2; \
+ shll8 r2; \
+ add r3, r2; \
+ \
+ /* Is the stack overflowing? */ \
+ cmp/hi r2, r1; \
+ bf stack_panic; \
+ \
+ /* If sp > _ebss then we're OK. */ \
+ mov.l .L_ebss, r1; \
+ cmp/hi r1, r15; \
+ bt 1f; \
+ \
+ /* If sp < init_stack, we're not OK. */ \
+ mov.l .L_init_thread_union, r1; \
+ cmp/hs r1, r15; \
+ bf stack_panic; \
+ \
+ /* If sp > init_stack && sp < _ebss, not OK. */ \
+ add r0, r1; \
+ cmp/hs r1, r15; \
+ bt stack_panic; \
+1:
+#else
+#define STACK_CHECK()
+#endif /* CONFIG_STACK_DEBUG */
+
.align 2
.globl _mcount
.type _mcount,@function
@@ -35,6 +86,19 @@
.type mcount,@function
_mcount:
mcount:
+ STACK_CHECK()
+
+#ifndef CONFIG_FUNCTION_TRACER
+ rts
+ nop
+#else
+#ifndef CONFIG_DYNAMIC_FTRACE
+ mov.l .Lfunction_trace_stop, r0
+ mov.l @r0, r0
+ tst r0, r0
+ bf ftrace_stub
+#endif
+
MCOUNT_ENTER()
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -52,16 +116,69 @@ mcount_call:
jsr @r6
nop
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ mov.l .Lftrace_graph_return, r6
+ mov.l .Lftrace_stub, r7
+ cmp/eq r6, r7
+ bt 1f
+
+ mov.l .Lftrace_graph_caller, r0
+ jmp @r0
+ nop
+
+1:
+ mov.l .Lftrace_graph_entry, r6
+ mov.l .Lftrace_graph_entry_stub, r7
+ cmp/eq r6, r7
+ bt skip_trace
+
+ mov.l .Lftrace_graph_caller, r0
+ jmp @r0
+ nop
+
+ .align 2
+.Lftrace_graph_return:
+ .long ftrace_graph_return
+.Lftrace_graph_entry:
+ .long ftrace_graph_entry
+.Lftrace_graph_entry_stub:
+ .long ftrace_graph_entry_stub
+.Lftrace_graph_caller:
+ .long ftrace_graph_caller
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+ .globl skip_trace
skip_trace:
MCOUNT_LEAVE()
.align 2
.Lftrace_trace_function:
- .long ftrace_trace_function
+ .long ftrace_trace_function
#ifdef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * NOTE: Do not move either ftrace_graph_call or ftrace_caller
+ * as this will affect the calculation of GRAPH_INSN_OFFSET.
+ */
+ .globl ftrace_graph_call
+ftrace_graph_call:
+ mov.l .Lskip_trace, r0
+ jmp @r0
+ nop
+
+ .align 2
+.Lskip_trace:
+ .long skip_trace
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
.globl ftrace_caller
ftrace_caller:
+ mov.l .Lfunction_trace_stop, r0
+ mov.l @r0, r0
+ tst r0, r0
+ bf ftrace_stub
+
MCOUNT_ENTER()
.globl ftrace_call
@@ -70,9 +187,18 @@ ftrace_call:
jsr @r6
nop
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ bra ftrace_graph_call
+ nop
+#else
MCOUNT_LEAVE()
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE */
+ .align 2
+.Lfunction_trace_stop:
+ .long function_trace_stop
+
/*
* NOTE: From here on the locations of the .Lftrace_stub label and
* ftrace_stub itself are fixed. Adding additional data here will skew
@@ -80,7 +206,6 @@ ftrace_call:
* Place new labels either after the ftrace_stub body, or before
* ftrace_caller. You have been warned.
*/
- .align 2
.Lftrace_stub:
.long ftrace_stub
@@ -88,3 +213,98 @@ ftrace_call:
ftrace_stub:
rts
nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_caller
+ftrace_graph_caller:
+ mov.l 2f, r0
+ mov.l @r0, r0
+ tst r0, r0
+ bt 1f
+
+ mov.l 3f, r1
+ jmp @r1
+ nop
+1:
+ /*
+ * MCOUNT_ENTER() pushed 5 registers onto the stack, so
+ * the stack address containing our return address is
+ * r15 + 20.
+ */
+ mov #20, r0
+ add r15, r0
+ mov r0, r4
+
+ mov.l .Lprepare_ftrace_return, r0
+ jsr @r0
+ nop
+
+ MCOUNT_LEAVE()
+
+ .align 2
+2: .long function_trace_stop
+3: .long skip_trace
+.Lprepare_ftrace_return:
+ .long prepare_ftrace_return
+
+ .globl return_to_handler
+return_to_handler:
+ /*
+ * Save the return values.
+ */
+ mov.l r0, @-r15
+ mov.l r1, @-r15
+
+ mov #0, r4
+
+ mov.l .Lftrace_return_to_handler, r0
+ jsr @r0
+ nop
+
+ /*
+ * The return value from ftrace_return_handler has the real
+ * address that we should return to.
+ */
+ lds r0, pr
+ mov.l @r15+, r1
+ rts
+ mov.l @r15+, r0
+
+
+ .align 2
+.Lftrace_return_to_handler:
+ .long ftrace_return_to_handler
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_STACK_DEBUG
+ .globl stack_panic
+stack_panic:
+ mov.l .Ldump_stack, r0
+ jsr @r0
+ nop
+
+ mov.l .Lpanic, r0
+ jsr @r0
+ mov.l .Lpanic_s, r4
+
+ rts
+ nop
+
+ .align 2
+.L_ebss:
+ .long _ebss
+.L_init_thread_union:
+ .long init_thread_union
+.Lpanic:
+ .long panic
+.Lpanic_s:
+ .long .Lpanic_str
+.Ldump_stack:
+ .long dump_stack
+
+ .section .rodata
+ .align 2
+.Lpanic_str:
+ .string "Stack error"
+#endif /* CONFIG_STACK_DEBUG */
OpenPOWER on IntegriCloud