summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/perf_event.h50
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/syscalls.h10
8 files changed, 62 insertions, 19 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ce104e3..e654fa2 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -474,7 +474,8 @@ struct cgroup_subsys {
struct cgroup *old_cgrp, struct task_struct *tsk,
bool threadgroup);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
- void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
+ void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup *old_cgrp, struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
@@ -626,6 +627,7 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg,
/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
unsigned short css_depth(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
#else /* !CONFIG_CGROUPS */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index ccefff0..cdbfcb8 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -65,4 +65,8 @@ SUBSYS(net_cls)
SUBSYS(blkio)
#endif
+#ifdef CONFIG_CGROUP_PERF
+SUBSYS(perf)
+#endif
+
/* */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dcd6a7c..ca29e03 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void);
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
+extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
static inline int task_curr_ret_stack(struct task_struct *t)
{
@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void)
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 47e3997..22b32af 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -37,7 +37,6 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
- int lock_depth;
};
#define FTRACE_MAX_EVENT \
@@ -208,7 +207,6 @@ struct ftrace_event_call {
#define PERF_MAX_TRACE_SIZE 2048
-#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
extern void destroy_preds(struct ftrace_event_call *call);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index dda5b0a..614615b 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -225,8 +225,14 @@ struct perf_event_attr {
};
__u32 bp_type;
- __u64 bp_addr;
- __u64 bp_len;
+ union {
+ __u64 bp_addr;
+ __u64 config1; /* extension of config */
+ };
+ union {
+ __u64 bp_len;
+ __u64 config2; /* extension of config1 */
+ };
};
/*
@@ -464,6 +470,7 @@ enum perf_callchain_context {
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
#define PERF_FLAG_FD_OUTPUT (1U << 1)
+#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
#ifdef __KERNEL__
/*
@@ -471,6 +478,7 @@ enum perf_callchain_context {
*/
#ifdef CONFIG_PERF_EVENTS
+# include <linux/cgroup.h>
# include <asm/perf_event.h>
# include <asm/local64.h>
#endif
@@ -539,6 +547,9 @@ struct hw_perf_event {
unsigned long event_base;
int idx;
int last_cpu;
+ unsigned int extra_reg;
+ u64 extra_config;
+ int extra_alloc;
};
struct { /* software */
struct hrtimer hrtimer;
@@ -716,6 +727,22 @@ struct swevent_hlist {
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
+#ifdef CONFIG_CGROUP_PERF
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+ u64 time;
+ u64 timestamp;
+};
+
+struct perf_cgroup {
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info *info; /* timing info, one per cpu */
+};
+#endif
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -832,6 +859,11 @@ struct perf_event {
struct event_filter *filter;
#endif
+#ifdef CONFIG_CGROUP_PERF
+ struct perf_cgroup *cgrp; /* cgroup event is attach to */
+ int cgrp_defer_enabled;
+#endif
+
#endif /* CONFIG_PERF_EVENTS */
};
@@ -886,6 +918,7 @@ struct perf_event_context {
u64 generation;
int pin_count;
struct rcu_head rcu_head;
+ int nr_cgroups; /* cgroup events present */
};
/*
@@ -905,6 +938,9 @@ struct perf_cpu_context {
struct list_head rotation_list;
int jiffies_interval;
struct pmu *active_pmu;
+#ifdef CONFIG_CGROUP_PERF
+ struct perf_cgroup *cgrp;
+#endif
};
struct perf_output_handle {
@@ -1040,11 +1076,11 @@ have_event:
__perf_sw_event(event_id, nr, nmi, regs, addr);
}
-extern atomic_t perf_task_events;
+extern atomic_t perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *task)
{
- COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
+ COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task));
}
static inline
@@ -1052,7 +1088,7 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
- COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
+ COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next));
}
extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1083,6 +1119,10 @@ extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
+extern int perf_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
static inline bool perf_paranoid_tracepoint_raw(void)
{
return sysctl_perf_event_paranoid > -1;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 8d3a248..ab38ac8 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -100,6 +100,8 @@ void ring_buffer_free(struct ring_buffer *buffer);
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
+void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
+
struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
unsigned long length);
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 777d8a5..c57e527 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2578,13 +2578,6 @@ static inline void inc_syscw(struct task_struct *tsk)
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
-/*
- * Call the function if the target task is executing on a CPU right now:
- */
-extern void task_oncpu_function_call(struct task_struct *p,
- void (*func) (void *info), void *info);
-
-
#ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2d9b79c0..4acc6b6 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -133,11 +133,11 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_enter, \
.event.funcs = &enter_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
+ .flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \
- *__event_enter_##sname = &event_enter_##sname; \
- __TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
+ *__event_enter_##sname = &event_enter_##sname;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
@@ -147,11 +147,11 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_exit, \
.event.funcs = &exit_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
+ .flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \
- *__event_exit_##sname = &event_exit_##sname; \
- __TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
+ *__event_exit_##sname = &event_exit_##sname;
#define SYSCALL_METADATA(sname, nb) \
SYSCALL_TRACE_ENTER_EVENT(sname); \
@@ -159,6 +159,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
static struct syscall_metadata __used \
__syscall_meta_##sname = { \
.name = "sys"#sname, \
+ .syscall_nr = -1, /* Filled in at boot */ \
.nb_args = nb, \
.types = types_##sname, \
.args = args_##sname, \
@@ -176,6 +177,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
static struct syscall_metadata __used \
__syscall_meta__##sname = { \
.name = "sys_"#sname, \
+ .syscall_nr = -1, /* Filled in at boot */ \
.nb_args = 0, \
.enter_event = &event_enter__##sname, \
.exit_event = &event_exit__##sname, \
OpenPOWER on IntegriCloud