diff options
author | Anton Blanchard <anton@samba.org> | 2014-09-17 17:07:04 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2014-11-10 09:59:28 +1100 |
commit | b3c18725a0eb7ea2458e9ae3b7e5a477f52e361f (patch) | |
tree | faecb26b2ba42de9abe84471f02d8230f81a2b06 /arch/powerpc/kernel/ftrace.c | |
parent | 7d56c65a6ff9065c459fc63c509950d8ea66e00c (diff) | |
download | op-kernel-dev-b3c18725a0eb7ea2458e9ae3b7e5a477f52e361f.zip op-kernel-dev-b3c18725a0eb7ea2458e9ae3b7e5a477f52e361f.tar.gz |
powerpc/ftrace: simplify prepare_ftrace_return
Instead of passing in the stack address of the link register
to be modified, just pass in the old value and return the
new value and rely on ftrace_graph_caller to do the
modification.
This removes the exception handling around the stack update -
it isn't needed and we weren't consistent about it. Later on
we would do an unprotected modification:
if (!ftrace_graph_entry(&trace)) {
*parent = old;
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/ftrace.c')
-rw-r--r-- | arch/powerpc/kernel/ftrace.c | 59 |
1 files changed, 13 insertions, 46 deletions
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index abf7921..d795031 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -512,67 +512,34 @@ int ftrace_disable_ftrace_graph_caller(void) /* * Hook the return address and push it in the stack of return addrs - * in current thread info. + * in current thread info. Return the address we want to divert to. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) { - unsigned long old; - int faulted; struct ftrace_graph_ent trace; unsigned long return_hooker; if (unlikely(ftrace_graph_is_dead())) - return; + goto out; if (unlikely(atomic_read(¤t->tracing_graph_pause))) - return; + goto out; return_hooker = ppc_function_entry(return_to_handler); - /* - * Protect against fault, even if it shouldn't - * happen. This tool is too much intrusive to - * ignore such a protection. - */ - asm volatile( - "1: " PPC_LL "%[old], 0(%[parent])\n" - "2: " PPC_STL "%[return_hooker], 0(%[parent])\n" - " li %[faulted], 0\n" - "3:\n" - - ".section .fixup, \"ax\"\n" - "4: li %[faulted], 1\n" - " b 3b\n" - ".previous\n" - - ".section __ex_table,\"a\"\n" - PPC_LONG_ALIGN "\n" - PPC_LONG "1b,4b\n" - PPC_LONG "2b,4b\n" - ".previous" - - : [old] "=&r" (old), [faulted] "=r" (faulted) - : [parent] "r" (parent), [return_hooker] "r" (return_hooker) - : "memory" - ); - - if (unlikely(faulted)) { - ftrace_graph_stop(); - WARN_ON(1); - return; - } - - trace.func = self_addr; + trace.func = ip; trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - *parent = old; - return; - } + if (!ftrace_graph_entry(&trace)) + goto out; + + if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) + goto out; - if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) - *parent = old; + parent = return_hooker; +out: + return parent; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |