diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-10-04 02:01:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 10:39:20 +0200 |
commit | 3ea2e6d71aafe35b8aaf89ed711a283815acfae6 (patch) | |
tree | e4bae61f9bbe5ff7ccf6eac95416b98ebd4974a4 /kernel/trace | |
parent | bf41a158cacba6ca5fc6407a54e7ad8ce1567e2e (diff) | |
download | op-kernel-dev-3ea2e6d71aafe35b8aaf89ed711a283815acfae6.zip op-kernel-dev-3ea2e6d71aafe35b8aaf89ed711a283815acfae6.tar.gz |
ftrace: make some tracers reentrant
Now that the ring buffer is reentrant, some of the ftrace tracers
(sched_swich, debugging traces) can also be reentrant.
Note: Never make the function tracer reentrant, that can cause
recursion problems all over the kernel. The function tracer
must disable reentrancy.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 10 |
2 files changed, 4 insertions, 16 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1cd2e81..caa4051 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -839,7 +839,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { struct trace_array *tr = &global_trace; struct trace_array_cpu *data; - long disabled; int cpu; int pc; @@ -850,12 +849,10 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (likely(disabled == 1)) + if (likely(!atomic_read(&data->disabled))) ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); - atomic_dec(&data->disabled); preempt_enable_notrace(); } @@ -2961,7 +2958,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) struct trace_array_cpu *data; struct print_entry *entry; unsigned long flags, irq_flags; - long disabled; int cpu, len = 0, size, pc; if (!tr->ctrl || tracing_disabled) @@ -2971,9 +2967,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (unlikely(disabled != 1)) + if (unlikely(atomic_read(&data->disabled))) goto out; spin_lock_irqsave(&trace_buf_lock, flags); @@ -2999,7 +2994,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) spin_unlock_irqrestore(&trace_buf_lock, flags); out: - atomic_dec(&data->disabled); preempt_enable_notrace(); return len; diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index c7fa08a..b8f56be 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -24,7 +24,6 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, { struct trace_array_cpu *data; unsigned long flags; - long disabled; int cpu; int pc; @@ -41,12 +40,10 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (likely(disabled == 1)) + if (likely(!atomic_read(&data->disabled))) tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); - atomic_dec(&data->disabled); local_irq_restore(flags); } @@ -55,7 +52,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) { struct trace_array_cpu *data; unsigned long flags; - long disabled; int cpu, pc; if (!likely(tracer_enabled)) @@ -67,13 +63,11 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (likely(disabled == 1)) + if (likely(!atomic_read(&data->disabled))) tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, flags, pc); - atomic_dec(&data->disabled); local_irq_restore(flags); } |