summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-04-05 11:37:28 +0900
committerTejun Heo <tj@kernel.org>2010-04-05 11:37:28 +0900
commit336f5899d287f06d8329e208fc14ce50f7ec9698 (patch)
tree9b762d450d5eb248a6ff8317badb7e223d93ed58 /kernel/trace
parenta4ab2773205e8b94c18625455f85e3b6bb9d7ad6 (diff)
parentdb217dece3003df0841bacf9556b5c06aa097dae (diff)
downloadop-kernel-dev-336f5899d287f06d8329e208fc14ce50f7ec9698.zip
op-kernel-dev-336f5899d287f06d8329e208fc14ce50f7ec9698.tar.gz
Merge branch 'master' into export-slabh
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--kernel/trace/trace_clock.c4
-rw-r--r--kernel/trace/trace_event_perf.c11
3 files changed, 16 insertions, 7 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2c839ca..41ca394 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1210,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
- return;
+ goto out;
p = cpu_buffer->pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
- return;
+ goto out;
rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer);
+out:
spin_unlock_irq(&cpu_buffer->reader_lock);
}
@@ -1238,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
- return;
+ goto out;
p = pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
@@ -1247,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer);
+out:
spin_unlock_irq(&cpu_buffer->reader_lock);
}
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 6fbfb8f..9d589d8 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void)
int this_cpu;
u64 now;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
this_cpu = raw_smp_processor_id();
now = cpu_clock(this_cpu);
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void)
arch_spin_unlock(&trace_clock_struct.lock);
out:
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
return now;
}
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 81f691e..0565bb4 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
static char *perf_trace_buf;
static char *perf_trace_buf_nmi;
-typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
+/*
+ * Force it to be aligned to unsigned long to avoid misaligned accesses
+ * suprises
+ */
+typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
+ perf_trace_t;
/* Count the events in use (per event id, not per instance) */
static int total_ref_count;
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
char *trace_buf, *raw_data;
int pc, cpu;
+ BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
+
pc = preempt_count();
/* Protect the per cpu buffer, begin the rcu read side */
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
raw_data = per_cpu_ptr(trace_buf, cpu);
/* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+ memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
entry = (struct trace_entry *)raw_data;
tracing_generic_entry_update(entry, *irq_flags, pc);
OpenPOWER on IntegriCloud