summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-04-17 16:13:55 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-04-17 16:21:35 -0400
commit3189cdb31622f4e40688ce5a6fc5d940b42bc805 (patch)
tree89f370c0079281c55f37230c17951514c1b508c9 /kernel
parent261842b7c9099f56de2eb969c8ad65402d68e00e (diff)
downloadop-kernel-dev-3189cdb31622f4e40688ce5a6fc5d940b42bc805.zip
op-kernel-dev-3189cdb31622f4e40688ce5a6fc5d940b42bc805.tar.gz
tracing: protect trace_printk from recursion
trace_printk can be called from any context, including NMIs. If this happens, then we must test for for recursion before grabbing any spinlocks. This patch prevents trace_printk from being called recursively. [ Impact: prevent hard lockup in lockdep event tracer ] Cc: Peter Zijlstra <peterz@infradead.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 183d788..b9a3adc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1259,6 +1259,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
struct trace_array_cpu *data;
struct bprint_entry *entry;
unsigned long flags;
+ int disable;
int resched;
int cpu, len = 0, size, pc;
@@ -1273,7 +1274,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
cpu = raw_smp_processor_id();
data = tr->data[cpu];
- if (unlikely(atomic_read(&data->disabled)))
+ disable = atomic_inc_return(&data->disabled);
+ if (unlikely(disable != 1))
goto out;
/* Lockdep uses trace_printk for lock tracing */
@@ -1301,6 +1303,7 @@ out_unlock:
local_irq_restore(flags);
out:
+ atomic_dec_return(&data->disabled);
ftrace_preempt_enable(resched);
unpause_graph_tracing();
@@ -1320,6 +1323,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
int cpu, len = 0, size, pc;
struct print_entry *entry;
unsigned long irq_flags;
+ int disable;
if (tracing_disabled || tracing_selftest_running)
return 0;
@@ -1329,7 +1333,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
cpu = raw_smp_processor_id();
data = tr->data[cpu];
- if (unlikely(atomic_read(&data->disabled)))
+ disable = atomic_inc_return(&data->disabled);
+ if (unlikely(disable != 1))
goto out;
pause_graph_tracing();
@@ -1357,6 +1362,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out:
+ atomic_dec_return(&data->disabled);
preempt_enable_notrace();
return len;
OpenPOWER on IntegriCloud