diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-05-11 22:11:05 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-05-11 23:33:06 -0400 |
commit | 168b6b1d0594c7866caa73b12f3b8d91075695f2 (patch) | |
tree | 2dd8d0e8b6ffc87360180c878a87b3632625b3f0 /kernel/trace/ring_buffer.c | |
parent | 88eb0125362f2ab272cbaf84252cf101ddc2dec9 (diff) | |
download | op-kernel-dev-168b6b1d0594c7866caa73b12f3b8d91075695f2.zip op-kernel-dev-168b6b1d0594c7866caa73b12f3b8d91075695f2.tar.gz |
ring-buffer: move code around to remove some branches
This is a bit of micro-optimizations. But since the ring buffer is used
in tracing every function call, it is an extreme hot path. Every nanosecond
counts.
This change shows over 5% improvement in the ring-buffer-benchmark.
[ Impact: more efficient code ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a9e645a..16b24d4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1400,7 +1400,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, unsigned long length) { struct ring_buffer_event *event; - u64 ts, delta; + u64 ts, delta = 0; int commit = 0; int nr_loops = 0; @@ -1431,20 +1431,21 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page && rb_page_write(cpu_buffer->tail_page) == rb_commit_index(cpu_buffer))) { + u64 diff; - delta = ts - cpu_buffer->write_stamp; + diff = ts - cpu_buffer->write_stamp; - /* make sure this delta is calculated here */ + /* make sure this diff is calculated here */ barrier(); /* Did the write stamp get updated already? */ if (unlikely(ts < cpu_buffer->write_stamp)) - delta = 0; + goto get_event; - else if (unlikely(test_time_stamp(delta))) { + delta = diff; + if (unlikely(test_time_stamp(delta))) { commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); - if (commit == -EBUSY) return NULL; @@ -1453,12 +1454,11 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, RB_WARN_ON(cpu_buffer, commit < 0); } - } else - /* Non commits have zero deltas */ - delta = 0; + } + get_event: event = __rb_reserve_next(cpu_buffer, 0, length, &ts); - if (PTR_ERR(event) == -EAGAIN) + if (unlikely(PTR_ERR(event) == -EAGAIN)) goto again; if (!event) { |