summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-04 16:53:44 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-04 17:51:38 +0200
commit60313ebed739b331e8e61079da27a11ee3b73a30 (patch)
tree8476fbd05be709595876360765776dd9ca20608d /kernel
parent20c84e959ec11b1803d2b2832eef703d5fbe7f7b (diff)
downloadop-kernel-dev-60313ebed739b331e8e61079da27a11ee3b73a30.zip
op-kernel-dev-60313ebed739b331e8e61079da27a11ee3b73a30.tar.gz
perf_counter: Add fork event
Create a fork event so that we can easily clone the comm and dso maps without having to generate all those events. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/perf_counter.c131
2 files changed, 116 insertions, 19 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index b7d7a9f..f4466ca 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1412,12 +1412,12 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
- } else {
+ } else if (!(clone_flags & CLONE_VM)) {
/*
* vfork will do an exec which will call
* set_task_comm()
*/
- perf_counter_comm(p);
+ perf_counter_fork(p);
}
audit_finish_fork(p);
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0bb03f1..78c5862 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -40,9 +40,9 @@ static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;
static atomic_t nr_counters __read_mostly;
-static atomic_t nr_mmap_tracking __read_mostly;
-static atomic_t nr_munmap_tracking __read_mostly;
-static atomic_t nr_comm_tracking __read_mostly;
+static atomic_t nr_mmap_counters __read_mostly;
+static atomic_t nr_munmap_counters __read_mostly;
+static atomic_t nr_comm_counters __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
@@ -1447,11 +1447,11 @@ static void free_counter(struct perf_counter *counter)
atomic_dec(&nr_counters);
if (counter->attr.mmap)
- atomic_dec(&nr_mmap_tracking);
+ atomic_dec(&nr_mmap_counters);
if (counter->attr.munmap)
- atomic_dec(&nr_munmap_tracking);
+ atomic_dec(&nr_munmap_counters);
if (counter->attr.comm)
- atomic_dec(&nr_comm_tracking);
+ atomic_dec(&nr_comm_counters);
if (counter->destroy)
counter->destroy(counter);
@@ -2476,6 +2476,105 @@ static void perf_counter_output(struct perf_counter *counter,
}
/*
+ * fork tracking
+ */
+
+struct perf_fork_event {
+ struct task_struct *task;
+
+ struct {
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 ppid;
+ } event;
+};
+
+static void perf_counter_fork_output(struct perf_counter *counter,
+ struct perf_fork_event *fork_event)
+{
+ struct perf_output_handle handle;
+ int size = fork_event->event.header.size;
+ struct task_struct *task = fork_event->task;
+ int ret = perf_output_begin(&handle, counter, size, 0, 0);
+
+ if (ret)
+ return;
+
+ fork_event->event.pid = perf_counter_pid(counter, task);
+ fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
+
+ perf_output_put(&handle, fork_event->event);
+ perf_output_end(&handle);
+}
+
+static int perf_counter_fork_match(struct perf_counter *counter)
+{
+ if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap)
+ return 1;
+
+ return 0;
+}
+
+static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
+ struct perf_fork_event *fork_event)
+{
+ struct perf_counter *counter;
+
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
+ if (perf_counter_fork_match(counter))
+ perf_counter_fork_output(counter, fork_event);
+ }
+ rcu_read_unlock();
+}
+
+static void perf_counter_fork_event(struct perf_fork_event *fork_event)
+{
+ struct perf_cpu_context *cpuctx;
+ struct perf_counter_context *ctx;
+
+ cpuctx = &get_cpu_var(perf_cpu_context);
+ perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
+ put_cpu_var(perf_cpu_context);
+
+ rcu_read_lock();
+ /*
+ * doesn't really matter which of the child contexts the
+ * events ends up in.
+ */
+ ctx = rcu_dereference(current->perf_counter_ctxp);
+ if (ctx)
+ perf_counter_fork_ctx(ctx, fork_event);
+ rcu_read_unlock();
+}
+
+void perf_counter_fork(struct task_struct *task)
+{
+ struct perf_fork_event fork_event;
+
+ if (!atomic_read(&nr_comm_counters) &&
+ !atomic_read(&nr_mmap_counters) &&
+ !atomic_read(&nr_munmap_counters))
+ return;
+
+ fork_event = (struct perf_fork_event){
+ .task = task,
+ .event = {
+ .header = {
+ .type = PERF_EVENT_FORK,
+ .size = sizeof(fork_event.event),
+ },
+ },
+ };
+
+ perf_counter_fork_event(&fork_event);
+}
+
+/*
* comm tracking
*/
@@ -2511,11 +2610,9 @@ static void perf_counter_comm_output(struct perf_counter *counter,
perf_output_end(&handle);
}
-static int perf_counter_comm_match(struct perf_counter *counter,
- struct perf_comm_event *comm_event)
+static int perf_counter_comm_match(struct perf_counter *counter)
{
- if (counter->attr.comm &&
- comm_event->event.header.type == PERF_EVENT_COMM)
+ if (counter->attr.comm)
return 1;
return 0;
@@ -2531,7 +2628,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
rcu_read_lock();
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
- if (perf_counter_comm_match(counter, comm_event))
+ if (perf_counter_comm_match(counter))
perf_counter_comm_output(counter, comm_event);
}
rcu_read_unlock();
@@ -2570,7 +2667,7 @@ void perf_counter_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
- if (!atomic_read(&nr_comm_tracking))
+ if (!atomic_read(&nr_comm_counters))
return;
comm_event = (struct perf_comm_event){
@@ -2708,7 +2805,7 @@ void perf_counter_mmap(unsigned long addr, unsigned long len,
{
struct perf_mmap_event mmap_event;
- if (!atomic_read(&nr_mmap_tracking))
+ if (!atomic_read(&nr_mmap_counters))
return;
mmap_event = (struct perf_mmap_event){
@@ -2729,7 +2826,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
{
struct perf_mmap_event mmap_event;
- if (!atomic_read(&nr_munmap_tracking))
+ if (!atomic_read(&nr_munmap_counters))
return;
mmap_event = (struct perf_mmap_event){
@@ -3427,11 +3524,11 @@ done:
atomic_inc(&nr_counters);
if (counter->attr.mmap)
- atomic_inc(&nr_mmap_tracking);
+ atomic_inc(&nr_mmap_counters);
if (counter->attr.munmap)
- atomic_inc(&nr_munmap_tracking);
+ atomic_inc(&nr_munmap_counters);
if (counter->attr.comm)
- atomic_inc(&nr_comm_tracking);
+ atomic_inc(&nr_comm_counters);
return counter;
}
OpenPOWER on IntegriCloud