summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-04-29 13:43:37 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-05-05 13:51:02 -0400
commitf0d2c681ac0a85142fc8abe65fc33fcad35cb9b7 (patch)
treeeafd2b655fc7ad0695fc5dbc343bcc1eb17e2e32 /kernel
parentd6ce96dabe2c4409fd009ec14250a1fdbab4b133 (diff)
downloadop-kernel-dev-f0d2c681ac0a85142fc8abe65fc33fcad35cb9b7.zip
op-kernel-dev-f0d2c681ac0a85142fc8abe65fc33fcad35cb9b7.tar.gz
ring-buffer: add counters for commit overrun and nmi dropped entries
The WARN_ON in the ring buffer when a commit is preempted and the buffer is filled by preceding writes can happen in normal operations. The WARN_ON makes it look like a bug, not to mention, because it does not stop tracing and calls printk which can also recurse, this is prone to deadlock (the WARN_ON is not in a position to recurse). This patch removes the WARN_ON and replaces it with a counter that can be retrieved by a tracer. This counter is called commit_overrun. While at it, I added a nmi_dropped counter to count any time an NMI entry is dropped because the NMI could not take the spinlock. [ Impact: prevent deadlock by printing normal case warning ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c52
1 files changed, 49 insertions, 3 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3e86da9..26e1359 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -402,6 +402,8 @@ struct ring_buffer_per_cpu {
struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
+ unsigned long nmi_dropped;
+ unsigned long commit_overrun;
unsigned long overrun;
unsigned long entries;
u64 write_stamp;
@@ -1216,8 +1218,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* simply fail.
*/
if (unlikely(in_nmi())) {
- if (!__raw_spin_trylock(&cpu_buffer->lock))
+ if (!__raw_spin_trylock(&cpu_buffer->lock)) {
+ cpu_buffer->nmi_dropped++;
goto out_reset;
+ }
} else
__raw_spin_lock(&cpu_buffer->lock);
@@ -1238,8 +1242,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* about it.
*/
if (unlikely(next_page == commit_page)) {
- /* This can easily happen on small ring buffers */
- WARN_ON_ONCE(buffer->pages > 2);
+ cpu_buffer->commit_overrun++;
goto out_reset;
}
@@ -1926,6 +1929,47 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
+ * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of overruns from
+ */
+unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+ ret = cpu_buffer->nmi_dropped;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
+
+/**
+ * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of overruns from
+ */
+unsigned long
+ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+ ret = cpu_buffer->commit_overrun;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
+
+/**
* ring_buffer_entries - get the number of entries in a buffer
* @buffer: The ring buffer
*
@@ -2595,6 +2639,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
+ cpu_buffer->nmi_dropped = 0;
+ cpu_buffer->commit_overrun = 0;
cpu_buffer->overrun = 0;
cpu_buffer->entries = 0;
OpenPOWER on IntegriCloud