summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-14 12:34:15 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-14 20:31:26 +0100
commit6c594c21fcb02c662f11c97be4d7d2b73060a205 (patch)
treedbd56d57fbc4576e18002a5cc08b1f4327a2248f /kernel
parent5d6a27d8a096868ae313f71f563b06074a7e34fe (diff)
downloadop-kernel-dev-6c594c21fcb02c662f11c97be4d7d2b73060a205.zip
op-kernel-dev-6c594c21fcb02c662f11c97be4d7d2b73060a205.tar.gz
perfcounters: add task migrations counter
Impact: add new feature, new sw counter Add a counter that counts the number of cross-CPU migrations a task is suffering. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c49
-rw-r--r--kernel/sched.c7
2 files changed, 54 insertions, 2 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0928709..fb11e35 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -936,6 +936,52 @@ static const struct hw_perf_counter_ops perf_ops_context_switches = {
.hw_perf_counter_read = context_switches_perf_counter_read,
};
+static inline u64 get_cpu_migrations(void)
+{
+ return current->se.nr_migrations;
+}
+
+static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
+{
+ u64 prev, now;
+ s64 delta;
+
+ prev = atomic64_read(&counter->hw.prev_count);
+ now = get_cpu_migrations();
+
+ atomic64_set(&counter->hw.prev_count, now);
+
+ delta = now - prev;
+ if (WARN_ON_ONCE(delta < 0))
+ delta = 0;
+
+ atomic64_add(delta, &counter->count);
+}
+
+static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
+{
+ cpu_migrations_perf_counter_update(counter);
+}
+
+static void cpu_migrations_perf_counter_enable(struct perf_counter *counter)
+{
+ /*
+ * se.nr_migrations is a per-task value already,
+ * so we dont have to clear it on switch-in.
+ */
+}
+
+static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
+{
+ cpu_migrations_perf_counter_update(counter);
+}
+
+static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
+ .hw_perf_counter_enable = cpu_migrations_perf_counter_enable,
+ .hw_perf_counter_disable = cpu_migrations_perf_counter_disable,
+ .hw_perf_counter_read = cpu_migrations_perf_counter_read,
+};
+
static const struct hw_perf_counter_ops *
sw_perf_counter_init(struct perf_counter *counter)
{
@@ -951,6 +997,9 @@ sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_CONTEXT_SWITCHES:
hw_ops = &perf_ops_context_switches;
break;
+ case PERF_COUNT_CPU_MIGRATIONS:
+ hw_ops = &perf_ops_cpu_migrations;
+ break;
default:
break;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 5c3f410..382cfdb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1852,12 +1852,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
p->se.sleep_start -= clock_offset;
if (p->se.block_start)
p->se.block_start -= clock_offset;
+#endif
if (old_cpu != new_cpu) {
- schedstat_inc(p, se.nr_migrations);
+ p->se.nr_migrations++;
+#ifdef CONFIG_SCHEDSTATS
if (task_hot(p, old_rq->clock, NULL))
schedstat_inc(p, se.nr_forced2_migrations);
- }
#endif
+ }
p->se.vruntime -= old_cfsrq->min_vruntime -
new_cfsrq->min_vruntime;
@@ -2375,6 +2377,7 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
+ p->se.nr_migrations = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;
OpenPOWER on IntegriCloud