diff options
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/Makefile_32 | 1 | ||||
-rw-r--r-- | arch/sh/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/sh/kernel/ftrace.c | 122 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 5 | ||||
-rw-r--r-- | arch/sh/kernel/vmlinux_64.lds.S | 0 |
5 files changed, 127 insertions, 2 deletions
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index fee924a..94ed99b 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -30,6 +30,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index 99aceb28..d218e80 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c @@ -26,6 +26,7 @@ int main(void) DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); + DEFINE(TI_SIZE, sizeof(struct thread_info)); #ifdef CONFIG_HIBERNATION DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 4f62ece..6647dfc 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -16,11 +16,13 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/kernel.h> #include <asm/ftrace.h> #include <asm/cacheflush.h> #include <asm/unistd.h> #include <trace/syscall.h> +#ifdef CONFIG_DYNAMIC_FTRACE static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; static unsigned char ftrace_nop[4]; @@ -133,6 +135,126 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +static int ftrace_mod(unsigned long ip, unsigned long old_addr, + unsigned long new_addr) +{ + unsigned char code[MCOUNT_INSN_SIZE]; + + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (old_addr != __raw_readl((unsigned long *)code)) + return -EINVAL; + + __raw_writel(new_addr, ip); + return 0; +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&skip_trace); + new_addr = (unsigned long)(&ftrace_graph_caller); + + return ftrace_mod(ip, old_addr, new_addr); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&ftrace_graph_caller); + new_addr = (unsigned long)(&skip_trace); + + return ftrace_mod(ip, old_addr, new_addr); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in the current thread info. + * + * This is the main routine for the function graph tracer. The function + * graph tracer essentially works like this: + * + * parent is the stack address containing self_addr's return address. + * We pull the real return address out of parent and store it in + * current's ret_stack. Then, we replace the return address on the stack + * with the address of return_to_handler. self_addr is the function that + * called mcount. + * + * When self_addr returns, it will jump to return_to_handler which calls + * ftrace_return_to_handler. ftrace_return_to_handler will pull the real + * return address off of current's ret_stack and jump to it. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + unsigned long old; + int faulted, err; + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Protect against fault, even if it shouldn't + * happen. This tool is too much intrusive to + * ignore such a protection. + */ + __asm__ __volatile__( + "1: \n\t" + "mov.l @%2, %0 \n\t" + "2: \n\t" + "mov.l %3, @%2 \n\t" + "mov #0, %1 \n\t" + "3: \n\t" + ".section .fixup, \"ax\" \n\t" + "4: \n\t" + "mov.l 5f, %0 \n\t" + "jmp @%0 \n\t" + " mov #1, %1 \n\t" + ".balign 4 \n\t" + "5: .long 3b \n\t" + ".previous \n\t" + ".section __ex_table,\"a\" \n\t" + ".long 1b, 4b \n\t" + ".long 2b, 4b \n\t" + ".previous \n\t" + : "=&r" (old), "=r" (faulted) + : "r" (parent), "r" (return_hooker) + ); + + if (unlikely(faulted)) { + ftrace_graph_stop(); + WARN_ON(1); + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); + if (err == -EBUSY) { + __raw_writel(old, parent); + return; + } + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + __raw_writel(old, parent); + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_FTRACE_SYSCALLS diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 92d7740..9fee977 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -23,6 +23,7 @@ #include <linux/tick.h> #include <linux/reboot.h> #include <linux/fs.h> +#include <linux/ftrace.h> #include <linux/preempt.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> @@ -264,8 +265,8 @@ static void ubc_set_tracing(int asid, unsigned long pc) * switch_to(x,y) should switch tasks from x to y. * */ -struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *next) +__notrace_funcgraph struct task_struct * +__switch_to(struct task_struct *prev, struct task_struct *next) { #if defined(CONFIG_SH_FPU) unlazy_fpu(prev, task_pt_regs(prev)); diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/arch/sh/kernel/vmlinux_64.lds.S |