summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-05-12 21:20:59 +0200
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 21:49:54 +0200
commita98a3c3fde3ae7614f19758a043691b6f59dac53 (patch)
tree7a04e3e7c1748ee5b258e5176e10e1fe13468001 /kernel
parent05bd68c514579e007b46e4fa0461b78416a3f4c2 (diff)
downloadop-kernel-dev-a98a3c3fde3ae7614f19758a043691b6f59dac53.zip
op-kernel-dev-a98a3c3fde3ae7614f19758a043691b6f59dac53.tar.gz
ftrace: trace_entries to dynamically change trace buffer size
This patch adds /debug/tracing/trace_entries that allows users to see as well as modify the number of trace entries the buffers hold. The number of entries only increments in ENTRIES_PER_PAGE which is calculated by the size of an entry with the number of entries that can fit in a page. The user does not need to use an exact size, but the entries will be rounded to one of the increments. Trying to set the entries to 0 will return with -EINVAL. To avoid race conditions, the modification of the buffer size can only be done when tracing is completely disabled (current_tracer == none). A info message will be printed if a user tries to modify the buffer size when not set to none. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c145
1 files changed, 137 insertions, 8 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3b4eaf3..4723e01 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -35,6 +35,15 @@
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly =
+{
+ .name = "none",
+};
+
+static int trace_alloc_page(void);
+static int trace_free_page(void);
+
static int tracing_disabled = 1;
long
@@ -2364,6 +2373,70 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
return read;
}
+static ssize_t
+tracing_entries_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%lu\n", tr->entries);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_entries_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ char buf[64];
+
+ if (cnt > 63)
+ cnt = 63;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ /* must have at least 1 entry */
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&trace_types_lock);
+
+ if (current_trace != &no_tracer) {
+ cnt = -EBUSY;
+ pr_info("ftrace: set current_tracer to none"
+ " before modifying buffer size\n");
+ goto out;
+ }
+
+ if (val > global_trace.entries) {
+ while (global_trace.entries < val) {
+ if (trace_alloc_page()) {
+ cnt = -ENOMEM;
+ goto out;
+ }
+ }
+ } else {
+ /* include the number of entries in val (inc of page entries) */
+ while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
+ trace_free_page();
+ }
+
+ filp->f_pos += cnt;
+
+ out:
+ max_tr.entries = global_trace.entries;
+ mutex_unlock(&trace_types_lock);
+
+ return cnt;
+}
+
static struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
@@ -2389,6 +2462,12 @@ static struct file_operations tracing_pipe_fops = {
.release = tracing_release_pipe,
};
+static struct file_operations tracing_entries_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_entries_read,
+ .write = tracing_entries_write,
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
static ssize_t
@@ -2500,6 +2579,12 @@ static __init void tracer_init_debugfs(void)
pr_warning("Could not create debugfs "
"'tracing_threash' entry\n");
+ entry = debugfs_create_file("trace_entries", 0644, d_tracer,
+ &global_trace, &tracing_entries_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'tracing_threash' entry\n");
+
#ifdef CONFIG_DYNAMIC_FTRACE
entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt,
@@ -2510,12 +2595,6 @@ static __init void tracer_init_debugfs(void)
#endif
}
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly =
-{
- .name = "none",
-};
-
static int trace_alloc_page(void)
{
struct trace_array_cpu *data;
@@ -2552,7 +2631,6 @@ static int trace_alloc_page(void)
/* Now that we successfully allocate a page per CPU, add them */
for_each_possible_cpu(i) {
data = global_trace.data[i];
- data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
@@ -2560,7 +2638,6 @@ static int trace_alloc_page(void)
#ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr.data[i];
- data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
@@ -2579,6 +2656,55 @@ static int trace_alloc_page(void)
return -ENOMEM;
}
+static int trace_free_page(void)
+{
+ struct trace_array_cpu *data;
+ struct page *page;
+ struct list_head *p;
+ int i;
+ int ret = 0;
+
+ /* free one page from each buffer */
+ for_each_possible_cpu(i) {
+ data = global_trace.data[i];
+ p = data->trace_pages.next;
+ if (p == &data->trace_pages) {
+ /* should never happen */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ ret = -1;
+ break;
+ }
+ page = list_entry(p, struct page, lru);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ __free_page(page);
+
+ tracing_reset(data);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ data = max_tr.data[i];
+ p = data->trace_pages.next;
+ if (p == &data->trace_pages) {
+ /* should never happen */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ ret = -1;
+ break;
+ }
+ page = list_entry(p, struct page, lru);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ __free_page(page);
+
+ tracing_reset(data);
+#endif
+ }
+ global_trace.entries -= ENTRIES_PER_PAGE;
+
+ return ret;
+}
+
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
@@ -2609,6 +2735,9 @@ __init static int tracer_alloc_buffers(void)
/* use the LRU flag to differentiate the two buffers */
ClearPageLRU(page);
+ data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
/* Only allocate if we are actually using the max trace */
#ifdef CONFIG_TRACER_MAX_TRACE
array = (void *)__get_free_page(GFP_KERNEL);
OpenPOWER on IntegriCloud