summaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-10 11:27:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-10 11:27:30 -0700
commit2a56bb596b2c1fb612f9988afda9655c8c872a6e (patch)
tree8f76cd7a0d4f5a46e00d45e5605e161d4e16b81e /init
parent9f3a0941fb5efaa4d27911e251dc595034d58baa (diff)
parentb0dc52f15e7fe2b973ecfe4f3706f1b35ce3943a (diff)
downloadop-kernel-dev-2a56bb596b2c1fb612f9988afda9655c8c872a6e.zip
op-kernel-dev-2a56bb596b2c1fb612f9988afda9655c8c872a6e.tar.gz
Merge tag 'trace-v4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "New features: - Tom Zanussi's extended histogram work. This adds the synthetic events to have histograms from multiple event data Adds triggers "onmatch" and "onmax" to call the synthetic events Several updates to the histogram code from this - Allow way to nest ring buffer calls in the same context - Allow absolute time stamps in ring buffer - Rewrite of filter code parsing based on Al Viro's suggestions - Setting of trace_clock to global if TSC is unstable (on boot) - Better OOM handling when allocating large ring buffers - Added initcall tracepoints (consolidated initcall_debug code with them) And other various fixes and clean ups" * tag 'trace-v4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (68 commits) init: Have initcall_debug still work without CONFIG_TRACEPOINTS init, tracing: Have printk come through the trace events for initcall_debug init, tracing: instrument security and console initcall trace events init, tracing: Add initcall trace events tracing: Add rcu dereference annotation for test func that touches filter->prog tracing: Add rcu dereference annotation for filter->prog tracing: Fixup logic inversion on setting trace_global_clock defaults tracing: Hide global trace clock from lockdep ring-buffer: Add set/clear_current_oom_origin() during allocations ring-buffer: Check if memory is available before allocation lockdep: Add print_irqtrace_events() to __warn vsprintf: Do not preprocess non-dereferenced pointers for bprintf (%px and %pK) tracing: Uninitialized variable in create_tracing_map_fields() tracing: Make sure variable string fields are NULL-terminated tracing: Add action comparisons when testing matching hist triggers tracing: Don't add flag strings when displaying variable references tracing: Fix display of hist trigger expressions containing timestamps ftrace: Drop a VLA in module_exists() tracing: Mention trace_clock=global when warning about unstable clocks tracing: Default to using trace_global_clock if sched_clock is unstable ...
Diffstat (limited to 'init')
-rw-r--r--init/main.c84
1 files changed, 67 insertions, 17 deletions
diff --git a/init/main.c b/init/main.c
index e4a3160..d499f4a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -97,6 +97,9 @@
#include <asm/sections.h>
#include <asm/cacheflush.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/initcall.h>
+
static int kernel_init(void *);
extern void init_IRQ(void);
@@ -491,6 +494,17 @@ void __init __weak thread_stack_cache_init(void)
void __init __weak mem_encrypt_init(void) { }
+bool initcall_debug;
+core_param(initcall_debug, initcall_debug, bool, 0644);
+
+#ifdef TRACEPOINTS_ENABLED
+static void __init initcall_debug_enable(void);
+#else
+static inline void initcall_debug_enable(void)
+{
+}
+#endif
+
/*
* Set up kernel memory allocators
*/
@@ -612,6 +626,9 @@ asmlinkage __visible void __init start_kernel(void)
/* Trace events are available after this */
trace_init();
+ if (initcall_debug)
+ initcall_debug_enable();
+
context_tracking_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
@@ -728,9 +745,6 @@ static void __init do_ctors(void)
#endif
}
-bool initcall_debug;
-core_param(initcall_debug, initcall_debug, bool, 0644);
-
#ifdef CONFIG_KALLSYMS
struct blacklist_entry {
struct list_head next;
@@ -800,37 +814,71 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
#endif
__setup("initcall_blacklist=", initcall_blacklist);
-static int __init_or_module do_one_initcall_debug(initcall_t fn)
+static __init_or_module void
+trace_initcall_start_cb(void *data, initcall_t fn)
{
- ktime_t calltime, delta, rettime;
- unsigned long long duration;
- int ret;
+ ktime_t *calltime = (ktime_t *)data;
printk(KERN_DEBUG "calling %pF @ %i\n", fn, task_pid_nr(current));
- calltime = ktime_get();
- ret = fn();
+ *calltime = ktime_get();
+}
+
+static __init_or_module void
+trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
+{
+ ktime_t *calltime = (ktime_t *)data;
+ ktime_t delta, rettime;
+ unsigned long long duration;
+
rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
+ delta = ktime_sub(rettime, *calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs\n",
fn, ret, duration);
+}
- return ret;
+static ktime_t initcall_calltime;
+
+#ifdef TRACEPOINTS_ENABLED
+static void __init initcall_debug_enable(void)
+{
+ int ret;
+
+ ret = register_trace_initcall_start(trace_initcall_start_cb,
+ &initcall_calltime);
+ ret |= register_trace_initcall_finish(trace_initcall_finish_cb,
+ &initcall_calltime);
+ WARN(ret, "Failed to register initcall tracepoints\n");
}
+# define do_trace_initcall_start trace_initcall_start
+# define do_trace_initcall_finish trace_initcall_finish
+#else
+static inline void do_trace_initcall_start(initcall_t fn)
+{
+ if (!initcall_debug)
+ return;
+ trace_initcall_start_cb(&initcall_calltime, fn);
+}
+static inline void do_trace_initcall_finish(initcall_t fn, int ret)
+{
+ if (!initcall_debug)
+ return;
+ trace_initcall_finish_cb(&initcall_calltime, fn, ret);
+}
+#endif /* !TRACEPOINTS_ENABLED */
int __init_or_module do_one_initcall(initcall_t fn)
{
int count = preempt_count();
- int ret;
char msgbuf[64];
+ int ret;
if (initcall_blacklisted(fn))
return -EPERM;
- if (initcall_debug)
- ret = do_one_initcall_debug(fn);
- else
- ret = fn();
+ do_trace_initcall_start(fn);
+ ret = fn();
+ do_trace_initcall_finish(fn, ret);
msgbuf[0] = 0;
@@ -874,7 +922,7 @@ static initcall_t *initcall_levels[] __initdata = {
/* Keep these in sync with initcalls in include/linux/init.h */
static char *initcall_level_names[] __initdata = {
- "early",
+ "pure",
"core",
"postcore",
"arch",
@@ -895,6 +943,7 @@ static void __init do_initcall_level(int level)
level, level,
NULL, &repair_env_string);
+ trace_initcall_level(initcall_level_names[level]);
for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
do_one_initcall(*fn);
}
@@ -929,6 +978,7 @@ static void __init do_pre_smp_initcalls(void)
{
initcall_t *fn;
+ trace_initcall_level("early");
for (fn = __initcall_start; fn < __initcall0_start; fn++)
do_one_initcall(*fn);
}
OpenPOWER on IntegriCloud