summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-12-17 05:40:34 +0100
committerIngo Molnar <mingo@elte.hu>2009-12-17 10:42:52 +0100
commit06d65bda75341485d32f33da474b0664819ad497 (patch)
tree3b5edc58b9c5a6a9b5cff4b5886f54929b12863a
parent61c1917f47f73c968e92d04d15370b1dc3ec4592 (diff)
downloadop-kernel-dev-06d65bda75341485d32f33da474b0664819ad497.zip
op-kernel-dev-06d65bda75341485d32f33da474b0664819ad497.tar.gz
perf events, x86/stacktrace: Fix performance/softlockup by providing a special frame pointer-only stack walker
It's just wasteful for stacktrace users like perf to walk through every entries on the stack whereas these only accept reliable ones, ie: that the frame pointer validates. Since perf requires pure reliable stacktraces, it needs a stack walker based on frame pointers-only to optimize the stacktrace processing. This might solve some near-lockup scenarios that can be triggered by call-graph tracing timer events. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1261024834-5336-2-git-send-regression-fweisbec@gmail.com> [ v2: fix for modular builds and small detail tidyup ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/include/asm/stacktrace.h6
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/dumpstack.c28
3 files changed, 33 insertions, 3 deletions
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 6c75151..35e8912 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -22,6 +22,12 @@ print_context_stack(struct thread_info *tinfo,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
+extern unsigned long
+print_context_stack_bp(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph);
+
/* Generic stack tracer with callbacks */
struct stacktrace_ops {
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index d3802ee..c223b7e 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2336,7 +2336,7 @@ static const struct stacktrace_ops backtrace_ops = {
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
- .walk_stack = print_context_stack,
+ .walk_stack = print_context_stack_bp,
};
#include "../dumpstack.h"
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 8aaa119..c56bc28 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -109,6 +109,30 @@ print_context_stack(struct thread_info *tinfo,
}
return bp;
}
+EXPORT_SYMBOL_GPL(print_context_stack);
+
+unsigned long
+print_context_stack_bp(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+{
+ struct stack_frame *frame = (struct stack_frame *)bp;
+ unsigned long *ret_addr = &frame->return_address;
+
+ while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+ unsigned long addr = *ret_addr;
+
+ if (__kernel_text_address(addr)) {
+ ops->address(data, addr, 1);
+ frame = frame->next_frame;
+ ret_addr = &frame->return_address;
+ print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ }
+ }
+ return (unsigned long)frame;
+}
+EXPORT_SYMBOL_GPL(print_context_stack_bp);
static void
@@ -143,8 +167,8 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
static const struct stacktrace_ops print_trace_ops = {
.warning = print_trace_warning,
.warning_symbol = print_trace_warning_symbol,
- .stack = print_trace_stack,
- .address = print_trace_address,
+ .stack = print_trace_stack,
+ .address = print_trace_address,
.walk_stack = print_context_stack,
};
OpenPOWER on IntegriCloud