summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-10-20 10:58:02 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-10-20 10:58:02 -0400
commite8bc43e84fada397af1b677b07dbf26e6ac78fcc (patch)
tree60f6fe1acbd15fcd9fdc051660479f300c164ab6 /kernel/trace
parent747e94ae3d1b4c9bf5380e569f614eb9040b79e7 (diff)
downloadop-kernel-dev-e8bc43e84fada397af1b677b07dbf26e6ac78fcc.zip
op-kernel-dev-e8bc43e84fada397af1b677b07dbf26e6ac78fcc.tar.gz
ring-buffer: Pass timestamp by value and not by reference
The original code for the ring buffer had locations that modified the timestamp and that change was used by the callers. Now, the timestamp is not reused by the callers and there is no reason to pass it by reference. By changing the call to pass by value, lets gcc optimize the code a bit more where it can store the timestamp in a register and not worry about updating the reference. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0b88df8..c8ce6bd 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1829,7 +1829,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
static noinline struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length, unsigned long tail,
- struct buffer_page *tail_page, u64 *ts)
+ struct buffer_page *tail_page, u64 ts)
{
struct buffer_page *commit_page = cpu_buffer->commit_page;
struct ring_buffer *buffer = cpu_buffer->buffer;
@@ -1912,8 +1912,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* Nested commits always have zero deltas, so
* just reread the time stamp
*/
- *ts = rb_time_stamp(buffer);
- next_page->page->time_stamp = *ts;
+ ts = rb_time_stamp(buffer);
+ next_page->page->time_stamp = ts;
}
out_again:
@@ -1932,7 +1932,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
- unsigned type, unsigned long length, u64 *ts)
+ unsigned type, unsigned long length, u64 ts)
{
struct buffer_page *tail_page;
struct ring_buffer_event *event;
@@ -1965,7 +1965,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* its timestamp.
*/
if (!tail)
- tail_page->page->time_stamp = *ts;
+ tail_page->page->time_stamp = ts;
return event;
}
@@ -2008,7 +2008,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
static int
rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
- u64 *ts, u64 *delta)
+ u64 ts, u64 *delta)
{
struct ring_buffer_event *event;
int ret;
@@ -2016,7 +2016,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
WARN_ONCE(*delta > (1ULL << 59),
KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
(unsigned long long)*delta,
- (unsigned long long)*ts,
+ (unsigned long long)ts,
(unsigned long long)cpu_buffer->write_stamp);
/*
@@ -2051,7 +2051,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
event->array[0] = 0;
}
}
- cpu_buffer->write_stamp = *ts;
+ cpu_buffer->write_stamp = ts;
/* let the caller know this was the commit */
ret = 1;
} else {
@@ -2175,7 +2175,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
delta = diff;
if (unlikely(test_time_stamp(delta))) {
- commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
+ commit = rb_add_time_stamp(cpu_buffer, ts, &delta);
if (commit == -EBUSY)
goto out_fail;
@@ -2187,7 +2187,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
}
get_event:
- event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
+ event = __rb_reserve_next(cpu_buffer, 0, length, ts);
if (unlikely(PTR_ERR(event) == -EAGAIN))
goto again;
OpenPOWER on IntegriCloud