summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_switch.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-01 13:14:09 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-14 10:39:09 +0200
commit38697053fa006411224a1790e2adb8216440ab0f (patch)
tree30daab3a6ba93f1c8c922397ffe8a0d6a220e8b1 /kernel/trace/trace_sched_switch.c
parente4c2ce82ca2710e17cb4df8eb2b249fa2eb5af30 (diff)
downloadop-kernel-dev-38697053fa006411224a1790e2adb8216440ab0f.zip
op-kernel-dev-38697053fa006411224a1790e2adb8216440ab0f.tar.gz
ftrace: preempt disable over interrupt disable
With the new ring buffer infrastructure in ftrace, I'm trying to make ftrace a little more light weight. This patch converts a lot of the local_irq_save/restore into preempt_disable/enable. The original preempt count in a lot of cases has to be sent in as a parameter so that it can be recorded correctly. Some places were recording it incorrectly before anyway. This is also laying the ground work to make ftrace a little bit more reentrant, and remove all locking. The function tracers must still protect from reentrancy. Note: All the function tracers must be careful when using preempt_disable. It must do the following: resched = need_resched(); preempt_disable_notrace(); [...] if (resched) preempt_enable_no_resched_notrace(); else preempt_enable_notrace(); The reason is that if this function traces schedule() itself, the preempt_enable_notrace() will cause a schedule, which will lead us into a recursive failure. If we needed to reschedule before calling preempt_disable, we should have already scheduled. Since we did not, this is most likely that we should not and are probably inside a schedule function. If resched was not set, we still need to catch the need resched flag being set when preemption was off and the if case at the end will catch that for us. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r--kernel/trace/trace_sched_switch.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index e0b06db..c7fa08a 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -26,6 +26,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
unsigned long flags;
long disabled;
int cpu;
+ int pc;
if (!atomic_read(&sched_ref))
return;
@@ -36,13 +37,14 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
if (!tracer_enabled)
return;
+ pc = preempt_count();
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
- tracing_sched_switch_trace(ctx_trace, data, prev, next, flags);
+ tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
atomic_dec(&data->disabled);
local_irq_restore(flags);
@@ -54,11 +56,12 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
- int cpu;
+ int cpu, pc;
if (!likely(tracer_enabled))
return;
+ pc = preempt_count();
tracing_record_cmdline(current);
local_irq_save(flags);
@@ -68,7 +71,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
if (likely(disabled == 1))
tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
- flags);
+ flags, pc);
atomic_dec(&data->disabled);
local_irq_restore(flags);
OpenPOWER on IntegriCloud