summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-08-08 04:26:37 +0200
committerIngo Molnar <mingo@elte.hu>2009-08-09 12:54:45 +0200
commit3a43ce68ae1758fa6a839386025ef45acb6baa22 (patch)
tree5431e80f427ac6312dc123ecfdb101ea71b3d364
parent10b8e3066066708f304e0fc5cfe658e05abf943d (diff)
downloadop-kernel-dev-3a43ce68ae1758fa6a839386025ef45acb6baa22.zip
op-kernel-dev-3a43ce68ae1758fa6a839386025ef45acb6baa22.tar.gz
perf_counter: Fix tracepoint sampling to be part of generic sampling
Based on Peter's comments, make tracepoint sampling generic just like all the other sampling bits are. This is a rename with no code changes: - PERF_SAMPLE_TP_RECORD to PERF_SAMPLE_RAW - struct perf_tracepoint_record to perf_raw_record We want the system in place that transport tracepoints raw samples events into the perf ring buffer to be generalized and usable by any type of counter. Reported-by; Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1249698400-5441-4-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h10
-rw-r--r--kernel/perf_counter.c20
2 files changed, 15 insertions, 15 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index a67dd5c..2aabe43 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -121,7 +121,7 @@ enum perf_counter_sample_format {
PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
- PERF_SAMPLE_TP_RECORD = 1U << 10,
+ PERF_SAMPLE_RAW = 1U << 10,
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
};
@@ -414,9 +414,9 @@ struct perf_callchain_entry {
__u64 ip[PERF_MAX_STACK_DEPTH];
};
-struct perf_tracepoint_record {
- int size;
- char *record;
+struct perf_raw_record {
+ u32 size;
+ void *data;
};
struct task_struct;
@@ -687,7 +687,7 @@ struct perf_sample_data {
struct pt_regs *regs;
u64 addr;
u64 period;
- void *private;
+ struct perf_raw_record *raw;
};
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 117622c..0023105 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2646,7 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
u64 counter;
} group_entry;
struct perf_callchain_entry *callchain = NULL;
- struct perf_tracepoint_record *tp = NULL;
+ struct perf_raw_record *raw = NULL;
int callchain_size = 0;
u64 time;
struct {
@@ -2715,10 +2715,10 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
header.size += sizeof(u64);
}
- if (sample_type & PERF_SAMPLE_TP_RECORD) {
- tp = data->private;
- if (tp)
- header.size += tp->size;
+ if (sample_type & PERF_SAMPLE_RAW) {
+ raw = data->raw;
+ if (raw)
+ header.size += raw->size;
}
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
@@ -2784,8 +2784,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
}
}
- if ((sample_type & PERF_SAMPLE_TP_RECORD) && tp)
- perf_output_copy(&handle, tp->record, tp->size);
+ if ((sample_type & PERF_SAMPLE_RAW) && raw)
+ perf_output_copy(&handle, raw->data, raw->size);
perf_output_end(&handle);
}
@@ -3740,15 +3740,15 @@ static const struct pmu perf_ops_task_clock = {
void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
int entry_size)
{
- struct perf_tracepoint_record tp = {
+ struct perf_raw_record raw = {
.size = entry_size,
- .record = record,
+ .data = record,
};
struct perf_sample_data data = {
.regs = get_irq_regs(),
.addr = addr,
- .private = &tp,
+ .raw = &raw,
};
if (!data.regs)
OpenPOWER on IntegriCloud