summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c215
1 files changed, 91 insertions, 124 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index bb60732..8c804e2 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx)
pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
again:
- rec++;
+ if (idx != 0)
+ rec++;
+
if ((void *)rec >= (void *)&pg->records[pg->index]) {
pg = pg->next;
if (!pg)
@@ -766,7 +768,7 @@ static struct tracer_stat function_stats __initdata = {
.stat_show = function_stat_show
};
-static void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
{
struct ftrace_profile_stat *stat;
struct dentry *entry;
@@ -784,7 +786,6 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
* The files created are permanent, if something happens
* we still do not free memory.
*/
- kfree(stat);
WARN(1,
"Could not allocate stat file for cpu %d\n",
cpu);
@@ -811,7 +812,7 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
}
#else /* CONFIG_FUNCTION_PROFILER */
-static void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
{
}
#endif /* CONFIG_FUNCTION_PROFILER */
@@ -1015,71 +1016,35 @@ static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
unsigned long ftrace_addr;
- unsigned long ip, fl;
+ unsigned long flag = 0UL;
ftrace_addr = (unsigned long)FTRACE_ADDR;
- ip = rec->ip;
-
/*
- * If this record is not to be traced and
- * it is not enabled then do nothing.
+ * If this record is not to be traced or we want to disable it,
+ * then disable it.
*
- * If this record is not to be traced and
- * it is enabled then disable it.
+ * If we want to enable it and filtering is off, then enable it.
*
+ * If we want to enable it and filtering is on, enable it only if
+ * it's filtered
*/
- if (rec->flags & FTRACE_FL_NOTRACE) {
- if (rec->flags & FTRACE_FL_ENABLED)
- rec->flags &= ~FTRACE_FL_ENABLED;
- else
- return 0;
-
- } else if (ftrace_filtered && enable) {
- /*
- * Filtering is on:
- */
-
- fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
-
- /* Record is filtered and enabled, do nothing */
- if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
- return 0;
-
- /* Record is not filtered or enabled, do nothing */
- if (!fl)
- return 0;
-
- /* Record is not filtered but enabled, disable it */
- if (fl == FTRACE_FL_ENABLED)
- rec->flags &= ~FTRACE_FL_ENABLED;
- else
- /* Otherwise record is filtered but not enabled, enable it */
- rec->flags |= FTRACE_FL_ENABLED;
- } else {
- /* Disable or not filtered */
-
- if (enable) {
- /* if record is enabled, do nothing */
- if (rec->flags & FTRACE_FL_ENABLED)
- return 0;
-
- rec->flags |= FTRACE_FL_ENABLED;
-
- } else {
+ if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
+ if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
+ flag = FTRACE_FL_ENABLED;
+ }
- /* if record is not enabled, do nothing */
- if (!(rec->flags & FTRACE_FL_ENABLED))
- return 0;
+ /* If the state of this record hasn't changed, then do nothing */
+ if ((rec->flags & FTRACE_FL_ENABLED) == flag)
+ return 0;
- rec->flags &= ~FTRACE_FL_ENABLED;
- }
+ if (flag) {
+ rec->flags |= FTRACE_FL_ENABLED;
+ return ftrace_make_call(rec, ftrace_addr);
}
- if (rec->flags & FTRACE_FL_ENABLED)
- return ftrace_make_call(rec, ftrace_addr);
- else
- return ftrace_make_nop(NULL, rec, ftrace_addr);
+ rec->flags &= ~FTRACE_FL_ENABLED;
+ return ftrace_make_nop(NULL, rec, ftrace_addr);
}
static void ftrace_replace_code(int enable)
@@ -1224,6 +1189,13 @@ static void ftrace_shutdown(int command)
return;
ftrace_start_up--;
+ /*
+ * Just warn in case of unbalance, no need to kill ftrace, it's not
+ * critical but the ftrace_call callers may be never nopped again after
+ * further ftrace uses.
+ */
+ WARN_ON_ONCE(ftrace_start_up < 0);
+
if (!ftrace_start_up)
command |= FTRACE_DISABLE_CALLS;
@@ -1367,7 +1339,6 @@ struct ftrace_iterator {
unsigned flags;
unsigned char buffer[FTRACE_BUFF_MAX+1];
unsigned buffer_idx;
- unsigned filtered;
};
static void *
@@ -1410,28 +1381,33 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
+
+ if (!(iter->flags & FTRACE_ITER_HASH))
+ *pos = 0;
iter->flags |= FTRACE_ITER_HASH;
- return t_hash_next(m, p, pos);
+ iter->hidx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_hash_next(m, p, &l);
+ if (!p)
+ break;
+ }
+ return p;
}
static int t_hash_show(struct seq_file *m, void *v)
{
struct ftrace_func_probe *rec;
struct hlist_node *hnd = v;
- char str[KSYM_SYMBOL_LEN];
rec = hlist_entry(hnd, struct ftrace_func_probe, node);
if (rec->ops->print)
return rec->ops->print(m, rec->ip, rec->ops, rec->data);
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
- seq_printf(m, "%s:", str);
-
- kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
- seq_printf(m, "%s", str);
+ seq_printf(m, "%pf:%pf", (void *)rec->ip, (void *)rec->ops->func);
if (rec->data)
seq_printf(m, ":%p", rec->data);
@@ -1460,8 +1436,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
iter->pg = iter->pg->next;
iter->idx = 0;
goto retry;
- } else {
- iter->idx = -1;
}
} else {
rec = &iter->pg->records[iter->idx++];
@@ -1490,6 +1464,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
mutex_lock(&ftrace_lock);
/*
@@ -1501,23 +1476,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
- (*pos)++;
return iter;
}
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_start(m, pos);
- if (*pos > 0) {
- if (iter->idx < 0)
- return p;
- (*pos)--;
- iter->idx--;
+ iter->pg = ftrace_pages_start;
+ iter->idx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_next(m, p, &l);
+ if (!p)
+ break;
}
- p = t_next(m, p, pos);
-
- if (!p)
+ if (!p && iter->flags & FTRACE_ITER_FILTER)
return t_hash_start(m, pos);
return p;
@@ -1532,7 +1505,6 @@ static int t_show(struct seq_file *m, void *v)
{
struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = v;
- char str[KSYM_SYMBOL_LEN];
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_show(m, v);
@@ -1545,9 +1517,7 @@ static int t_show(struct seq_file *m, void *v)
if (!rec)
return 0;
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-
- seq_printf(m, "%s\n", str);
+ seq_printf(m, "%pf\n", (void *)rec->ip);
return 0;
}
@@ -1586,17 +1556,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
return ret;
}
-int ftrace_avail_release(struct inode *inode, struct file *file)
-{
- struct seq_file *m = (struct seq_file *)file->private_data;
- struct ftrace_iterator *iter = m->private;
-
- seq_release(inode, file);
- kfree(iter);
-
- return 0;
-}
-
static int
ftrace_failures_open(struct inode *inode, struct file *file)
{
@@ -1647,7 +1606,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
mutex_lock(&ftrace_regex_lock);
if ((file->f_mode & FMODE_WRITE) &&
- !(file->f_flags & O_APPEND))
+ (file->f_flags & O_TRUNC))
ftrace_filter_reset(enable);
if (file->f_mode & FMODE_READ) {
@@ -2263,7 +2222,11 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
read++;
cnt--;
- if (!(iter->flags & ~FTRACE_ITER_CONT)) {
+ /*
+ * If the parser haven't finished with the last write,
+ * continue reading the user input without skipping spaces.
+ */
+ if (!(iter->flags & FTRACE_ITER_CONT)) {
/* skip white space */
while (cnt && isspace(ch)) {
ret = get_user(ch, ubuf++);
@@ -2273,8 +2236,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
cnt--;
}
+ /* only spaces were written */
if (isspace(ch)) {
- file->f_pos += read;
+ *ppos += read;
ret = read;
goto out;
}
@@ -2297,19 +2261,18 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
}
if (isspace(ch)) {
- iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
ret = ftrace_process_regex(iter->buffer,
iter->buffer_idx, enable);
if (ret)
goto out;
iter->buffer_idx = 0;
- } else
+ } else {
iter->flags |= FTRACE_ITER_CONT;
+ iter->buffer[iter->buffer_idx++] = ch;
+ }
-
- file->f_pos += read;
-
+ *ppos += read;
ret = read;
out:
mutex_unlock(&ftrace_regex_lock);
@@ -2428,7 +2391,6 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
iter = file->private_data;
if (iter->buffer_idx) {
- iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
}
@@ -2459,14 +2421,14 @@ static const struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = ftrace_avail_release,
+ .release = seq_release_private,
};
static const struct file_operations ftrace_failures_fops = {
.open = ftrace_failures_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = ftrace_avail_release,
+ .release = seq_release_private,
};
static const struct file_operations ftrace_filter_fops = {
@@ -2493,32 +2455,31 @@ int ftrace_graph_count;
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
{
unsigned long *array = m->private;
- int index = *pos;
-
- (*pos)++;
- if (index >= ftrace_graph_count)
+ if (*pos >= ftrace_graph_count)
return NULL;
+ return &array[*pos];
+}
- return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return __g_next(m, pos);
}
static void *g_start(struct seq_file *m, loff_t *pos)
{
- void *p = NULL;
-
mutex_lock(&graph_lock);
/* Nothing, tell g_show to print all functions are enabled */
if (!ftrace_graph_count && !*pos)
return (void *)1;
- p = g_next(m, p, pos);
-
- return p;
+ return __g_next(m, pos);
}
static void g_stop(struct seq_file *m, void *p)
@@ -2529,7 +2490,6 @@ static void g_stop(struct seq_file *m, void *p)
static int g_show(struct seq_file *m, void *v)
{
unsigned long *ptr = v;
- char str[KSYM_SYMBOL_LEN];
if (!ptr)
return 0;
@@ -2539,9 +2499,7 @@ static int g_show(struct seq_file *m, void *v)
return 0;
}
- kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
-
- seq_printf(m, "%s\n", str);
+ seq_printf(m, "%pf\n", v);
return 0;
}
@@ -2563,7 +2521,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
mutex_lock(&graph_lock);
if ((file->f_mode & FMODE_WRITE) &&
- !(file->f_flags & O_APPEND)) {
+ (file->f_flags & O_TRUNC)) {
ftrace_graph_count = 0;
memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
}
@@ -2582,6 +2540,14 @@ ftrace_graph_open(struct inode *inode, struct file *file)
}
static int
+ftrace_graph_release(struct inode *inode, struct file *file)
+{
+ if (file->f_mode & FMODE_READ)
+ seq_release(inode, file);
+ return 0;
+}
+
+static int
ftrace_set_func(unsigned long *array, int *idx, char *buffer)
{
struct dyn_ftrace *rec;
@@ -2710,9 +2676,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
}
static const struct file_operations ftrace_graph_fops = {
- .open = ftrace_graph_open,
- .read = seq_read,
- .write = ftrace_graph_write,
+ .open = ftrace_graph_open,
+ .read = seq_read,
+ .write = ftrace_graph_write,
+ .release = ftrace_graph_release,
};
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -3145,10 +3112,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
- if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
+ if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
goto out;
- last_ftrace_enabled = ftrace_enabled;
+ last_ftrace_enabled = !!ftrace_enabled;
if (ftrace_enabled) {
OpenPOWER on IntegriCloud