summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/trace/trace.c35
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_branch.c2
-rw-r--r--kernel/trace/trace_functions_graph.c4
4 files changed, 33 insertions, 11 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b90a827..88111b0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -78,6 +78,13 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
}
/*
+ * To prevent the comm cache from being overwritten when no
+ * tracing is active, only save the comm when a trace event
+ * occurred.
+ */
+static DEFINE_PER_CPU(bool, trace_cmdline_save);
+
+/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
@@ -1135,6 +1142,11 @@ void tracing_record_cmdline(struct task_struct *tsk)
!tracing_is_on())
return;
+ if (!__this_cpu_read(trace_cmdline_save))
+ return;
+
+ __this_cpu_write(trace_cmdline_save, false);
+
trace_save_cmdline(tsk);
}
@@ -1178,13 +1190,20 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
return event;
}
+void
+__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+{
+ __this_cpu_write(trace_cmdline_save, true);
+ ring_buffer_unlock_commit(buffer, event);
+}
+
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
int wake)
{
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);
@@ -1232,7 +1251,7 @@ void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
unsigned long flags, int pc,
struct pt_regs *regs)
{
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
@@ -1269,7 +1288,7 @@ trace_function(struct trace_array *tr,
entry->parent_ip = parent_ip;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
}
void
@@ -1362,7 +1381,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
entry->size = trace.nr_entries;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out:
/* Again, don't let gcc optimize things here */
@@ -1458,7 +1477,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
save_stack_trace_user(&trace);
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out_drop_count:
__this_cpu_dec(user_stack_count);
@@ -1653,7 +1672,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) {
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
@@ -1724,7 +1743,7 @@ int trace_array_vprintk(struct trace_array *tr,
memcpy(&entry->buf, tbuffer, len);
entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) {
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
out:
@@ -3993,7 +4012,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
} else
entry->buf[cnt] = '\0';
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
written = cnt;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 7824a55..839ae00 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -359,6 +359,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts);
+void __buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+
int trace_empty(struct trace_iterator *iter);
void *trace_find_next_entry_inc(struct trace_iterator *iter);
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index bd3e0ee..95e9684 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->correct = val == expect;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index a84b558..4edb4b7 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
return 1;
}
@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->ret = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
}
void trace_graph_return(struct ftrace_graph_ret *trace)
OpenPOWER on IntegriCloud