summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_syscalls.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2013-12-03 12:41:20 -0500
committerSteven Rostedt <rostedt@goodmis.org>2013-12-05 14:22:30 -0500
commit3ccb01239201af06a07482ec686b14cd148102a5 (patch)
tree5ad078061e71dcf23cd1ee3f4ca353060c185324 /kernel/trace/trace_syscalls.c
parentdc1ccc48159d63eca5089e507c82c7d22ef60839 (diff)
downloadop-kernel-dev-3ccb01239201af06a07482ec686b14cd148102a5.zip
op-kernel-dev-3ccb01239201af06a07482ec686b14cd148102a5.tar.gz
tracing: Only run synchronize_sched() at instance deletion time
It has been reported that boot up with FTRACE_SELFTEST enabled can take a very long time. There can be stalls of over a minute. This was tracked down to the synchronize_sched() called when a system call event is disabled. As the self tests enable and disable thousands of events, this makes the synchronize_sched() get called thousands of times. The synchornize_sched() was added with d562aff93bfb53 "tracing: Add support for SOFT_DISABLE to syscall events" which caused this regression (added in 3.13-rc1). The synchronize_sched() is to protect against the events being accessed when a tracer instance is being deleted. When an instance is being deleted all the events associated to it are unregistered. The synchronize_sched() makes sure that no more users are running when it finishes. Instead of calling synchronize_sched() for all syscall events, we only need to call it once, after the events are unregistered and before the instance is deleted. The event_mutex is held during this action to prevent new users from enabling events. Link: http://lkml.kernel.org/r/20131203124120.427b9661@gandalf.local.home Reported-by: Petr Mladek <pmladek@suse.cz> Acked-by: Tom Zanussi <tom.zanussi@linux.intel.com> Acked-by: Petr Mladek <pmladek@suse.cz> Tested-by: Petr Mladek <pmladek@suse.cz> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r--kernel/trace/trace_syscalls.c10
1 files changed, 0 insertions, 10 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index e4b6d11..ea90eb5 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
if (!tr->sys_refcount_enter)
unregister_trace_sys_enter(ftrace_syscall_enter, tr);
mutex_unlock(&syscall_trace_lock);
- /*
- * Callers expect the event to be completely disabled on
- * return, so wait for current handlers to finish.
- */
- synchronize_sched();
}
static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
if (!tr->sys_refcount_exit)
unregister_trace_sys_exit(ftrace_syscall_exit, tr);
mutex_unlock(&syscall_trace_lock);
- /*
- * Callers expect the event to be completely disabled on
- * return, so wait for current handlers to finish.
- */
- synchronize_sched();
}
static int __init init_syscall_trace(struct ftrace_event_call *call)
OpenPOWER on IntegriCloud