summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-05-21 13:32:26 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-25 11:57:26 -0400
commit2711ca237a084286ea1c2dcf82ab2aadab23a00d (patch)
treea795fa0e31e8557befd803153a33827b1d0c8764 /kernel/trace
parentb3230c8b44da5838cf396942d5c1ab19f8e8f720 (diff)
downloadop-kernel-dev-2711ca237a084286ea1c2dcf82ab2aadab23a00d.zip
op-kernel-dev-2711ca237a084286ea1c2dcf82ab2aadab23a00d.tar.gz
ring-buffer: Move zeroing out excess in page to ring buffer code
Currently the trace splice code zeros out the excess bytes in the page before sending it off to userspace. This is to make sure userspace is not getting anything it should not be when reading the pages, because the excess data was never initialized to zero before writing (for perfomance reasons). But the splice code has no business in doing this work, it should be done by the ring buffer. With the latest changes for recording lost events, the splice code gets it wrong anyway. Move the zeroing out of excess bytes into the ring buffer code. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c11
-rw-r--r--kernel/trace/trace.c6
2 files changed, 9 insertions, 8 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b0702ff..1da7b6e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3902,12 +3902,12 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
ret = read;
cpu_buffer->lost_events = 0;
+
+ commit = local_read(&bpage->commit);
/*
* Set a flag in the commit field if we lost events
*/
if (missed_events) {
- commit = local_read(&bpage->commit);
-
/* If there is room at the end of the page to save the
* missed events, then record it there.
*/
@@ -3915,10 +3915,17 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
memcpy(&bpage->data[commit], &missed_events,
sizeof(missed_events));
local_add(RB_MISSED_STORED, &bpage->commit);
+ commit += sizeof(missed_events);
}
local_add(RB_MISSED_EVENTS, &bpage->commit);
}
+ /*
+ * This page may be off to user land. Zero it out here.
+ */
+ if (commit < BUF_PAGE_SIZE)
+ memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
+
out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ba0ec81..95d0b1a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3661,7 +3661,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ftrace_buffer_info *info = filp->private_data;
- unsigned int pos;
ssize_t ret;
size_t size;
@@ -3688,11 +3687,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (ret < 0)
return 0;
- pos = ring_buffer_page_len(info->spare);
-
- if (pos < PAGE_SIZE)
- memset(info->spare + pos, 0, PAGE_SIZE - pos);
-
read:
size = PAGE_SIZE - info->read;
if (size > count)
OpenPOWER on IntegriCloud