summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c94
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/irq/numa_migrate.c7
-rw-r--r--kernel/power/main.c26
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sys.c16
-rw-r--r--kernel/trace/kmemtrace.c23
-rw-r--r--kernel/trace/ring_buffer.c9
-rw-r--r--kernel/trace/trace.c207
-rw-r--r--kernel/trace/trace.h16
-rw-r--r--kernel/trace/trace_boot.c24
-rw-r--r--kernel/trace/trace_branch.c33
-rw-r--r--kernel/trace/trace_functions.c25
-rw-r--r--kernel/trace/trace_functions_graph.c1
-rw-r--r--kernel/trace/trace_hw_branches.c8
-rw-r--r--kernel/trace/trace_irqsoff.c10
-rw-r--r--kernel/trace/trace_mmiotrace.c24
-rw-r--r--kernel/trace/trace_nop.c1
-rw-r--r--kernel/trace/trace_output.c13
-rw-r--r--kernel/trace/trace_power.c24
-rw-r--r--kernel/trace/trace_sched_switch.c12
-rw-r--r--kernel/trace/trace_sched_wakeup.c12
-rw-r--r--kernel/trace/trace_selftest.c18
-rw-r--r--kernel/trace/trace_sysprof.c14
-rw-r--r--kernel/wait.c59
25 files changed, 346 insertions, 335 deletions
diff --git a/kernel/async.c b/kernel/async.c
index 608b32b..f565891 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -54,6 +54,7 @@ asynchronous and synchronous parts of the kernel.
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
#include <asm/atomic.h>
static async_cookie_t next_cookie = 1;
@@ -132,21 +133,23 @@ static void run_one_entry(void)
entry = list_first_entry(&async_pending, struct async_entry, list);
/* 2) move it to the running queue */
- list_del(&entry->list);
- list_add_tail(&entry->list, &async_running);
+ list_move_tail(&entry->list, entry->running);
spin_unlock_irqrestore(&async_lock, flags);
/* 3) run it (and print duration)*/
if (initcall_debug && system_state == SYSTEM_BOOTING) {
- printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current));
+ printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
+ entry->func, task_pid_nr(current));
calltime = ktime_get();
}
entry->func(entry->data, entry->cookie);
if (initcall_debug && system_state == SYSTEM_BOOTING) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie,
- entry->func, ktime_to_ns(delta) >> 10);
+ printk("initcall %lli_%pF returned 0 after %lld usecs\n",
+ (long long)entry->cookie,
+ entry->func,
+ (long long)ktime_to_ns(delta) >> 10);
}
/* 4) remove it from the running queue */
@@ -205,18 +208,44 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
return newcookie;
}
+/**
+ * async_schedule - schedule a function for asynchronous execution
+ * @ptr: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
{
- return __async_schedule(ptr, data, &async_pending);
+ return __async_schedule(ptr, data, &async_running);
}
EXPORT_SYMBOL_GPL(async_schedule);
-async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
+/**
+ * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
+ * @ptr: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ * @running: running list for the domain
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @running may be used in the async_synchronize_*_domain() functions
+ * to wait within a certain synchronization domain rather than globally.
+ * A synchronization domain is specified via the running queue @running to use.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
+ struct list_head *running)
{
return __async_schedule(ptr, data, running);
}
-EXPORT_SYMBOL_GPL(async_schedule_special);
+EXPORT_SYMBOL_GPL(async_schedule_domain);
+/**
+ * async_synchronize_full - synchronize all asynchronous function calls
+ *
+ * This function waits until all asynchronous function calls have been done.
+ */
void async_synchronize_full(void)
{
do {
@@ -225,13 +254,30 @@ void async_synchronize_full(void)
}
EXPORT_SYMBOL_GPL(async_synchronize_full);
-void async_synchronize_full_special(struct list_head *list)
+/**
+ * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
+ * @list: running list to synchronize on
+ *
+ * This function waits until all asynchronous function calls for the
+ * synchronization domain specified by the running list @list have been done.
+ */
+void async_synchronize_full_domain(struct list_head *list)
{
- async_synchronize_cookie_special(next_cookie, list);
+ async_synchronize_cookie_domain(next_cookie, list);
}
-EXPORT_SYMBOL_GPL(async_synchronize_full_special);
+EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
-void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
+/**
+ * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
+ * @cookie: async_cookie_t to use as checkpoint
+ * @running: running list to synchronize on
+ *
+ * This function waits until all asynchronous function calls for the
+ * synchronization domain specified by the running list @list submitted
+ * prior to @cookie have been done.
+ */
+void async_synchronize_cookie_domain(async_cookie_t cookie,
+ struct list_head *running)
{
ktime_t starttime, delta, endtime;
@@ -247,14 +293,22 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r
delta = ktime_sub(endtime, starttime);
printk("async_continuing @ %i after %lli usec\n",
- task_pid_nr(current), ktime_to_ns(delta) >> 10);
+ task_pid_nr(current),
+ (long long)ktime_to_ns(delta) >> 10);
}
}
-EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
+EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
+/**
+ * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
+ * @cookie: async_cookie_t to use as checkpoint
+ *
+ * This function waits until all asynchronous function calls prior to @cookie
+ * have been done.
+ */
void async_synchronize_cookie(async_cookie_t cookie)
{
- async_synchronize_cookie_special(cookie, &async_running);
+ async_synchronize_cookie_domain(cookie, &async_running);
}
EXPORT_SYMBOL_GPL(async_synchronize_cookie);
@@ -315,7 +369,11 @@ static int async_manager_thread(void *unused)
ec = atomic_read(&entry_count);
while (tc < ec && tc < MAX_THREADS) {
- kthread_run(async_thread, NULL, "async/%i", tc);
+ if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
+ tc))) {
+ msleep(100);
+ continue;
+ }
atomic_inc(&thread_count);
tc++;
}
@@ -330,7 +388,9 @@ static int async_manager_thread(void *unused)
static int __init async_init(void)
{
if (async_enabled)
- kthread_run(async_manager_thread, NULL, "async/mgr");
+ if (IS_ERR(kthread_run(async_manager_thread, NULL,
+ "async/mgr")))
+ async_enabled = 0;
return 0;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 242a706..6d5dbb7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1005,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
+ retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index ecf765c..acd8835 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -71,7 +71,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
desc = irq_desc_ptrs[irq];
if (desc && old_desc != desc)
- goto out_unlock;
+ goto out_unlock;
node = cpu_to_node(cpu);
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
@@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
init_copy_one_irq_desc(irq, old_desc, desc, cpu);
irq_desc_ptrs[irq] = desc;
+ spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
free_one_irq_desc(old_desc, desc);
+ spin_unlock(&old_desc->lock);
kfree(old_desc);
+ spin_lock(&desc->lock);
+
+ return desc;
out_unlock:
spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 2399888..b4d2190 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -57,16 +57,6 @@ int pm_notifier_call_chain(unsigned long val)
#ifdef CONFIG_PM_DEBUG
int pm_test_level = TEST_NONE;
-static int suspend_test(int level)
-{
- if (pm_test_level == level) {
- printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
- mdelay(5000);
- return 1;
- }
- return 0;
-}
-
static const char * const pm_tests[__TEST_AFTER_LAST] = {
[TEST_NONE] = "none",
[TEST_CORE] = "core",
@@ -125,14 +115,24 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
}
power_attr(pm_test);
-#else /* !CONFIG_PM_DEBUG */
-static inline int suspend_test(int level) { return 0; }
-#endif /* !CONFIG_PM_DEBUG */
+#endif /* CONFIG_PM_DEBUG */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
+static int suspend_test(int level)
+{
+#ifdef CONFIG_PM_DEBUG
+ if (pm_test_level == level) {
+ printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
+ mdelay(5000);
+ return 1;
+ }
+#endif /* !CONFIG_PM_DEBUG */
+ return 0;
+}
+
#ifdef CONFIG_PM_TEST_SUSPEND
/*
diff --git a/kernel/sched.c b/kernel/sched.c
index 566c8c9..1ffb895 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4697,8 +4697,8 @@ EXPORT_SYMBOL(default_wake_function);
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, int sync, void *key)
+void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+ int nr_exclusive, int sync, void *key)
{
wait_queue_t *curr, *next;
diff --git a/kernel/sys.c b/kernel/sys.c
index e7dc0e1..f145c41 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1525,22 +1525,14 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
return -EINVAL;
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
+ if (new_rlim.rlim_cur > new_rlim.rlim_max)
+ return -EINVAL;
old_rlim = current->signal->rlim + resource;
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
!capable(CAP_SYS_RESOURCE))
return -EPERM;
-
- if (resource == RLIMIT_NOFILE) {
- if (new_rlim.rlim_max == RLIM_INFINITY)
- new_rlim.rlim_max = sysctl_nr_open;
- if (new_rlim.rlim_cur == RLIM_INFINITY)
- new_rlim.rlim_cur = sysctl_nr_open;
- if (new_rlim.rlim_max > sysctl_nr_open)
- return -EPERM;
- }
-
- if (new_rlim.rlim_cur > new_rlim.rlim_max)
- return -EINVAL;
+ if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
+ return -EPERM;
retval = security_task_setrlimit(resource, &new_rlim);
if (retval)
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index f04c062..ae201b3 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -272,19 +272,16 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
struct ring_buffer_event *event;
struct kmemtrace_alloc_entry *entry;
struct trace_array *tr = kmemtrace_array;
- unsigned long irq_flags;
if (!kmem_tracing_enabled)
return;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
+ sizeof(*entry), 0, 0);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_KMEM_ALLOC;
entry->call_site = call_site;
entry->ptr = ptr;
entry->bytes_req = bytes_req;
@@ -292,9 +289,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
entry->gfp_flags = gfp_flags;
entry->node = node;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, 0);
}
EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
@@ -305,26 +300,20 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
struct ring_buffer_event *event;
struct kmemtrace_free_entry *entry;
struct trace_array *tr = kmemtrace_array;
- unsigned long irq_flags;
if (!kmem_tracing_enabled)
return;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
+ sizeof(*entry), 0, 0);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
-
- entry->ent.type = TRACE_KMEM_FREE;
entry->type_id = type_id;
entry->call_site = call_site;
entry->ptr = ptr;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, 0);
}
EXPORT_SYMBOL(kmemtrace_mark_free);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5ee3444..53ba3a6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1284,7 +1284,6 @@ static DEFINE_PER_CPU(int, rb_need_resched);
* ring_buffer_lock_reserve - reserve a part of the buffer
* @buffer: the ring buffer to reserve from
* @length: the length of the data to reserve (excluding event header)
- * @flags: a pointer to save the interrupt flags
*
* Returns a reseverd event on the ring buffer to copy directly to.
* The user of this interface will need to get the body to write into
@@ -1297,9 +1296,7 @@ static DEFINE_PER_CPU(int, rb_need_resched);
* If NULL is returned, then nothing has been allocated or locked.
*/
struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer,
- unsigned long length,
- unsigned long *flags)
+ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
@@ -1366,15 +1363,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
* ring_buffer_unlock_commit - commit a reserved
* @buffer: The buffer to commit to
* @event: The event pointer to commit.
- * @flags: the interrupt flags received from ring_buffer_lock_reserve.
*
* This commits the data to the ring buffer, and releases any locks held.
*
* Must be paired with ring_buffer_lock_reserve.
*/
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags)
+ struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu = raw_smp_processor_id();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bd4d9f8..03fbd4c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -522,23 +522,24 @@ int register_tracer(struct tracer *type)
tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
- if (!ret && default_bootup_tracer) {
- if (!strncmp(default_bootup_tracer, type->name,
- BOOTUP_TRACER_SIZE)) {
- printk(KERN_INFO "Starting tracer '%s'\n",
- type->name);
- /* Do we want this tracer to start on bootup? */
- tracing_set_tracer(type->name);
- default_bootup_tracer = NULL;
- /* disable other selftests, since this will break it. */
- tracing_selftest_disabled = 1;
+ if (ret || !default_bootup_tracer)
+ goto out_unlock;
+
+ if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
+ goto out_unlock;
+
+ printk(KERN_INFO "Starting tracer '%s'\n", type->name);
+ /* Do we want this tracer to start on bootup? */
+ tracing_set_tracer(type->name);
+ default_bootup_tracer = NULL;
+ /* disable other selftests, since this will break it. */
+ tracing_selftest_disabled = 1;
#ifdef CONFIG_FTRACE_STARTUP_TEST
- printk(KERN_INFO "Disabling FTRACE selftests due"
- " to running tracer '%s'\n", type->name);
+ printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
+ type->name);
#endif
- }
- }
+ out_unlock:
lock_kernel();
return ret;
}
@@ -775,78 +776,100 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
+struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
+ unsigned char type,
+ unsigned long len,
+ unsigned long flags, int pc)
+{
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(tr->buffer, len);
+ if (event != NULL) {
+ struct trace_entry *ent = ring_buffer_event_data(event);
+
+ tracing_generic_entry_update(ent, flags, pc);
+ ent->type = type;
+ }
+
+ return event;
+}
+static void ftrace_trace_stack(struct trace_array *tr,
+ unsigned long flags, int skip, int pc);
+static void ftrace_trace_userstack(struct trace_array *tr,
+ unsigned long flags, int pc);
+
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc)
+{
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ ftrace_trace_stack(tr, flags, 6, pc);
+ ftrace_trace_userstack(tr, flags, pc);
+ trace_wake_up();
+}
+
void
-trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
int pc)
{
struct ring_buffer_event *event;
struct ftrace_entry *entry;
- unsigned long irq_flags;
/* If we are reading the ring buffer, don't trace */
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
+ flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_FN;
entry->ip = ip;
entry->parent_ip = parent_ip;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void __trace_graph_entry(struct trace_array *tr,
- struct trace_array_cpu *data,
struct ftrace_graph_ent *trace,
unsigned long flags,
int pc)
{
struct ring_buffer_event *event;
struct ftrace_graph_ent_entry *entry;
- unsigned long irq_flags;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_GRAPH_ENT;
entry->graph_ent = *trace;
- ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+ ring_buffer_unlock_commit(global_trace.buffer, event);
}
static void __trace_graph_return(struct trace_array *tr,
- struct trace_array_cpu *data,
struct ftrace_graph_ret *trace,
unsigned long flags,
int pc)
{
struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *entry;
- unsigned long irq_flags;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_GRAPH_RET;
entry->ret = *trace;
- ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+ ring_buffer_unlock_commit(global_trace.buffer, event);
}
#endif
@@ -856,11 +879,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
int pc)
{
if (likely(!atomic_read(&data->disabled)))
- trace_function(tr, data, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, flags, pc);
}
static void __ftrace_trace_stack(struct trace_array *tr,
- struct trace_array_cpu *data,
unsigned long flags,
int skip, int pc)
{
@@ -868,16 +890,12 @@ static void __ftrace_trace_stack(struct trace_array *tr,
struct ring_buffer_event *event;
struct stack_entry *entry;
struct stack_trace trace;
- unsigned long irq_flags;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_STACK,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_STACK;
-
memset(&entry->caller, 0, sizeof(entry->caller));
trace.nr_entries = 0;
@@ -886,49 +904,43 @@ static void __ftrace_trace_stack(struct trace_array *tr,
trace.entries = entry->caller;
save_stack_trace(&trace);
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
#endif
}
static void ftrace_trace_stack(struct trace_array *tr,
- struct trace_array_cpu *data,
unsigned long flags,
int skip, int pc)
{
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
- __ftrace_trace_stack(tr, data, flags, skip, pc);
+ __ftrace_trace_stack(tr, flags, skip, pc);
}
void __trace_stack(struct trace_array *tr,
- struct trace_array_cpu *data,
unsigned long flags,
int skip, int pc)
{
- __ftrace_trace_stack(tr, data, flags, skip, pc);
+ __ftrace_trace_stack(tr, flags, skip, pc);
}
static void ftrace_trace_userstack(struct trace_array *tr,
- struct trace_array_cpu *data,
- unsigned long flags, int pc)
+ unsigned long flags, int pc)
{
#ifdef CONFIG_STACKTRACE
struct ring_buffer_event *event;
struct userstack_entry *entry;
struct stack_trace trace;
- unsigned long irq_flags;
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_USER_STACK;
memset(&entry->caller, 0, sizeof(entry->caller));
@@ -938,70 +950,56 @@ static void ftrace_trace_userstack(struct trace_array *tr,
trace.entries = entry->caller;
save_stack_trace_user(&trace);
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
#endif
}
-void __trace_userstack(struct trace_array *tr,
- struct trace_array_cpu *data,
- unsigned long flags)
+void __trace_userstack(struct trace_array *tr, unsigned long flags)
{
- ftrace_trace_userstack(tr, data, flags, preempt_count());
+ ftrace_trace_userstack(tr, flags, preempt_count());
}
static void
-ftrace_trace_special(void *__tr, void *__data,
+ftrace_trace_special(void *__tr,
unsigned long arg1, unsigned long arg2, unsigned long arg3,
int pc)
{
struct ring_buffer_event *event;
- struct trace_array_cpu *data = __data;
struct trace_array *tr = __tr;
struct special_entry *entry;
- unsigned long irq_flags;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
+ sizeof(*entry), 0, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, pc);
- entry->ent.type = TRACE_SPECIAL;
entry->arg1 = arg1;
entry->arg2 = arg2;
entry->arg3 = arg3;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
- ftrace_trace_stack(tr, data, irq_flags, 4, pc);
- ftrace_trace_userstack(tr, data, irq_flags, pc);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void
__trace_special(void *__tr, void *__data,
unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
- ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
+ ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
}
void
tracing_sched_switch_trace(struct trace_array *tr,
- struct trace_array_cpu *data,
struct task_struct *prev,
struct task_struct *next,
unsigned long flags, int pc)
{
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- unsigned long irq_flags;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_CTX,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_CTX;
entry->prev_pid = prev->pid;
entry->prev_prio = prev->prio;
entry->prev_state = prev->state;
@@ -1009,29 +1007,23 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_prio = next->prio;
entry->next_state = next->state;
entry->next_cpu = task_cpu(next);
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
- ftrace_trace_stack(tr, data, flags, 5, pc);
- ftrace_trace_userstack(tr, data, flags, pc);
+ trace_buffer_unlock_commit(tr, event, flags, pc);
}
void
tracing_sched_wakeup_trace(struct trace_array *tr,
- struct trace_array_cpu *data,
struct task_struct *wakee,
struct task_struct *curr,
unsigned long flags, int pc)
{
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- unsigned long irq_flags;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_WAKE;
entry->prev_pid = curr->pid;
entry->prev_prio = curr->prio;
entry->prev_state = curr->state;
@@ -1039,11 +1031,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_prio = wakee->prio;
entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee);
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
- ftrace_trace_stack(tr, data, flags, 6, pc);
- ftrace_trace_userstack(tr, data, flags, pc);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, flags, pc);
}
void
@@ -1064,7 +1052,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
data = tr->data[cpu];
if (likely(atomic_inc_return(&data->disabled) == 1))
- ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
+ ftrace_trace_special(tr, arg1, arg2, arg3, pc);
atomic_dec(&data->disabled);
local_irq_restore(flags);
@@ -1092,7 +1080,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
- __trace_graph_entry(tr, data, trace, flags, pc);
+ __trace_graph_entry(tr, trace, flags, pc);
}
/* Only do the atomic if it is not already set */
if (!test_tsk_trace_graph(current))
@@ -1118,7 +1106,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
- __trace_graph_return(tr, data, trace, flags, pc);
+ __trace_graph_return(tr, trace, flags, pc);
}
if (!trace->depth)
clear_tsk_trace_graph(current);
@@ -1412,7 +1400,7 @@ static enum print_line_t print_lat_fmt(struct trace_iterator *iter)
goto partial;
}
- if (event && event->latency_trace)
+ if (event)
return event->latency_trace(iter, sym_flags);
if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
@@ -1441,7 +1429,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
goto partial;
}
- if (event && event->trace)
+ if (event)
return event->trace(iter, sym_flags);
if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
@@ -1467,7 +1455,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
}
event = ftrace_find_event(entry->type);
- if (event && event->raw)
+ if (event)
return event->raw(iter, 0);
if (!trace_seq_printf(s, "%d ?\n", entry->type))
@@ -1494,7 +1482,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
}
event = ftrace_find_event(entry->type);
- if (event && event->hex) {
+ if (event) {
enum print_line_t ret = event->hex(iter, 0);
if (ret != TRACE_TYPE_HANDLED)
return ret;
@@ -1536,10 +1524,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
}
event = ftrace_find_event(entry->type);
- if (event && event->binary)
- return event->binary(iter, 0);
-
- return TRACE_TYPE_HANDLED;
+ return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
}
static int trace_empty(struct trace_iterator *iter)
@@ -2186,6 +2171,12 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
+int tracer_init(struct tracer *t, struct trace_array *tr)
+{
+ tracing_reset_online_cpus(tr);
+ return t->init(tr);
+}
+
static int tracing_set_tracer(const char *buf)
{
struct trace_array *tr = &global_trace;
@@ -2210,7 +2201,7 @@ static int tracing_set_tracer(const char *buf)
current_trace = t;
if (t->init) {
- ret = t->init(tr);
+ ret = tracer_init(t, tr);
if (ret)
goto out;
}
@@ -2854,18 +2845,16 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
trace_buf[len] = 0;
size = sizeof(*entry) + len + 1;
- event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, irq_flags, pc);
- entry->ent.type = TRACE_PRINT;
entry->ip = ip;
entry->depth = depth;
memcpy(&entry->buf, trace_buf, len);
entry->buf[len] = 0;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
out_unlock:
spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f92aba5..b9838f4 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -394,6 +394,7 @@ struct trace_iterator {
cpumask_var_t started;
};
+int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
@@ -402,6 +403,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
+struct ring_buffer_event;
+
+struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
+ unsigned char type,
+ unsigned long len,
+ unsigned long flags,
+ int pc);
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -418,14 +430,12 @@ void ftrace(struct trace_array *tr,
unsigned long parent_ip,
unsigned long flags, int pc);
void tracing_sched_switch_trace(struct trace_array *tr,
- struct trace_array_cpu *data,
struct task_struct *prev,
struct task_struct *next,
unsigned long flags, int pc);
void tracing_record_cmdline(struct task_struct *tsk);
void tracing_sched_wakeup_trace(struct trace_array *tr,
- struct trace_array_cpu *data,
struct task_struct *wakee,
struct task_struct *cur,
unsigned long flags, int pc);
@@ -435,7 +445,6 @@ void trace_special(struct trace_array *tr,
unsigned long arg2,
unsigned long arg3, int pc);
void trace_function(struct trace_array *tr,
- struct trace_array_cpu *data,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
@@ -461,7 +470,6 @@ void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
void __trace_stack(struct trace_array *tr,
- struct trace_array_cpu *data,
unsigned long flags,
int skip, int pc);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 1f07895..7a30fc4 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -132,7 +132,6 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
{
struct ring_buffer_event *event;
struct trace_boot_call *entry;
- unsigned long irq_flags;
struct trace_array *tr = boot_trace;
if (!tr || !pre_initcalls_finished)
@@ -144,18 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
+ sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_BOOT_CALL;
entry->boot_call = *bt;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
-
+ trace_buffer_unlock_commit(tr, event, 0, 0);
out:
preempt_enable();
}
@@ -164,7 +158,6 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
{
struct ring_buffer_event *event;
struct trace_boot_ret *entry;
- unsigned long irq_flags;
struct trace_array *tr = boot_trace;
if (!tr || !pre_initcalls_finished)
@@ -173,18 +166,13 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
+ sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_BOOT_RET;
entry->boot_ret = *bt;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
-
+ trace_buffer_unlock_commit(tr, event, 0, 0);
out:
preempt_enable();
}
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 7ac72a4..f8ae2c5 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -33,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event;
struct trace_branch *entry;
- unsigned long flags, irq_flags;
+ unsigned long flags;
int cpu, pc;
const char *p;
@@ -52,15 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ pc = preempt_count();
+ event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
+ sizeof(*entry), flags, pc);
if (!event)
goto out;
- pc = preempt_count();
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_BRANCH;
/* Strip off the path, only save the file */
p = f->file + strlen(f->file);
@@ -75,7 +73,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line;
entry->correct = val == expect;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
@@ -133,7 +131,6 @@ static void stop_branch_trace(struct trace_array *tr)
static int branch_trace_init(struct trace_array *tr)
{
- tracing_reset_online_cpus(tr);
start_branch_trace(tr);
return 0;
}
@@ -143,23 +140,6 @@ static void branch_trace_reset(struct trace_array *tr)
stop_branch_trace(tr);
}
-static int
-trace_print_print(struct trace_seq *s, struct trace_entry *entry, int flags)
-{
- struct print_entry *field;
-
- trace_assign_type(field, entry);
-
- if (seq_print_ip_sym(s, field->ip, flags))
- goto partial;
-
- if (trace_seq_printf(s, ": %s", field->buf))
- goto partial;
-
- partial:
- return TRACE_TYPE_PARTIAL_LINE;
-}
-
static enum print_line_t trace_branch_print(struct trace_iterator *iter,
int flags)
{
@@ -182,9 +162,6 @@ static struct trace_event trace_branch_event = {
.type = TRACE_BRANCH,
.trace = trace_branch_print,
.latency_trace = trace_branch_print,
- .raw = trace_nop_print,
- .hex = trace_nop_print,
- .binary = trace_nop_print,
};
static struct tracer branch_trace __read_mostly =
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index b3a320f..36bf956 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -24,32 +24,21 @@ static struct trace_array *func_trace;
static void tracing_start_function_trace(void);
static void tracing_stop_function_trace(void);
-static void start_function_trace(struct trace_array *tr)
+static int function_trace_init(struct trace_array *tr)
{
func_trace = tr;
tr->cpu = get_cpu();
- tracing_reset_online_cpus(tr);
put_cpu();
tracing_start_cmdline_record();
tracing_start_function_trace();
-}
-
-static void stop_function_trace(struct trace_array *tr)
-{
- tracing_stop_function_trace();
- tracing_stop_cmdline_record();
-}
-
-static int function_trace_init(struct trace_array *tr)
-{
- start_function_trace(tr);
return 0;
}
static void function_trace_reset(struct trace_array *tr)
{
- stop_function_trace(tr);
+ tracing_stop_function_trace();
+ tracing_stop_cmdline_record();
}
static void function_trace_start(struct trace_array *tr)
@@ -78,7 +67,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
- trace_function(tr, data, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, flags, pc);
atomic_dec(&data->disabled);
ftrace_preempt_enable(resched);
@@ -108,7 +97,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
if (likely(disabled == 1)) {
pc = preempt_count();
- trace_function(tr, data, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, flags, pc);
}
atomic_dec(&data->disabled);
@@ -139,7 +128,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
if (likely(disabled == 1)) {
pc = preempt_count();
- trace_function(tr, data, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, flags, pc);
/*
* skip over 5 funcs:
* __ftrace_trace_stack,
@@ -148,7 +137,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
* ftrace_list_func
* ftrace_call
*/
- __trace_stack(tr, data, flags, 5, pc);
+ __trace_stack(tr, flags, 5, pc);
}
atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index c97594d..222f97d 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -56,7 +56,6 @@ static int graph_trace_init(struct trace_array *tr)
&trace_graph_entry);
if (ret)
return ret;
- tracing_reset_online_cpus(tr);
tracing_start_cmdline_record();
return 0;
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 549238a..e3e7db6 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -132,7 +132,6 @@ static int bts_trace_init(struct trace_array *tr)
hw_branch_trace = tr;
register_hotcpu_notifier(&bts_hotcpu_notifier);
- tracing_reset_online_cpus(tr);
bts_trace_start(tr);
return 0;
@@ -175,7 +174,7 @@ void trace_hw_branch(u64 from, u64 to)
struct trace_array *tr = hw_branch_trace;
struct ring_buffer_event *event;
struct hw_branch_entry *entry;
- unsigned long irq1, irq2;
+ unsigned long irq1;
int cpu;
if (unlikely(!tr))
@@ -189,7 +188,8 @@ void trace_hw_branch(u64 from, u64 to)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq2);
+ event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
+ sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -197,7 +197,7 @@ void trace_hw_branch(u64 from, u64 to)
entry->ent.type = TRACE_HW_BRANCHES;
entry->from = from;
entry->to = to;
- ring_buffer_unlock_commit(tr->buffer, event, irq2);
+ trace_buffer_unlock_commit(tr, event, 0, 0);
out:
atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index ed344b0..c6b442d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
- trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
atomic_dec(&data->disabled);
}
@@ -153,7 +153,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out_unlock;
- trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
+ trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
latency = nsecs_to_usecs(delta);
@@ -177,7 +177,7 @@ out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
tracing_reset(tr, cpu);
- trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
+ trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
}
static inline void
@@ -210,7 +210,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
local_save_flags(flags);
- trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
per_cpu(tracing_cpu, cpu) = 1;
@@ -244,7 +244,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
atomic_inc(&data->disabled);
local_save_flags(flags);
- trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index ec78e24..c401b90 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -307,21 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
{
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
- unsigned long irq_flags;
+ int pc = preempt_count();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW,
+ sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
return;
}
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, preempt_count());
- entry->ent.type = TRACE_MMIO_RW;
entry->rw = *rw;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -337,21 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
{
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
- unsigned long irq_flags;
+ int pc = preempt_count();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP,
+ sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
return;
}
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, preempt_count());
- entry->ent.type = TRACE_MMIO_MAP;
entry->map = *map;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void mmio_trace_mapping(struct mmiotrace_map *map)
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index 087b6cb..9aa84bd 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -48,7 +48,6 @@ static void stop_nop_trace(struct trace_array *tr)
static int nop_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
- tracing_reset_online_cpus(tr);
start_nop_trace(tr);
return 0;
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 463a310..9fc8150 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -435,6 +435,17 @@ int register_ftrace_event(struct trace_event *event)
if (ftrace_find_event(event->type))
goto out;
+ if (event->trace == NULL)
+ event->trace = trace_nop_print;
+ if (event->latency_trace == NULL)
+ event->latency_trace = trace_nop_print;
+ if (event->raw == NULL)
+ event->raw = trace_nop_print;
+ if (event->hex == NULL)
+ event->hex = trace_nop_print;
+ if (event->binary == NULL)
+ event->binary = trace_nop_print;
+
key = event->type & (EVENT_HASHSIZE - 1);
hlist_add_head_rcu(&event->node, &event_hash[key]);
@@ -874,8 +885,6 @@ static struct trace_event trace_print_event = {
.trace = trace_print_print,
.latency_trace = trace_print_print,
.raw = trace_print_raw,
- .hex = trace_nop_print,
- .binary = trace_nop_print,
};
static struct trace_event *events[] __initdata = {
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index faa6ab7..bfc21f8 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -115,7 +115,6 @@ void trace_power_end(struct power_trace *it)
struct ring_buffer_event *event;
struct trace_power *entry;
struct trace_array_cpu *data;
- unsigned long irq_flags;
struct trace_array *tr = power_trace;
if (!trace_power_enabled)
@@ -125,18 +124,13 @@ void trace_power_end(struct power_trace *it)
it->end = ktime_get();
data = tr->data[smp_processor_id()];
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_POWER,
+ sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_POWER;
entry->state_data = *it;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
-
+ trace_buffer_unlock_commit(tr, event, 0, 0);
out:
preempt_enable();
}
@@ -148,7 +142,6 @@ void trace_power_mark(struct power_trace *it, unsigned int type,
struct ring_buffer_event *event;
struct trace_power *entry;
struct trace_array_cpu *data;
- unsigned long irq_flags;
struct trace_array *tr = power_trace;
if (!trace_power_enabled)
@@ -162,18 +155,13 @@ void trace_power_mark(struct power_trace *it, unsigned int type,
it->end = it->stamp;
data = tr->data[smp_processor_id()];
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_POWER,
+ sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_POWER;
entry->state_data = *it;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
-
+ trace_buffer_unlock_commit(tr, event, 0, 0);
out:
preempt_enable();
}
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index df175cb..30e14fe 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -43,7 +43,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
data = ctx_trace->data[cpu];
if (likely(!atomic_read(&data->disabled)))
- tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
+ tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
local_irq_restore(flags);
}
@@ -66,7 +66,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
data = ctx_trace->data[cpu];
if (likely(!atomic_read(&data->disabled)))
- tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
+ tracing_sched_wakeup_trace(ctx_trace, wakee, current,
flags, pc);
local_irq_restore(flags);
@@ -185,12 +185,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
ctx_trace = tr;
}
-static void start_sched_trace(struct trace_array *tr)
-{
- tracing_reset_online_cpus(tr);
- tracing_start_sched_switch_record();
-}
-
static void stop_sched_trace(struct trace_array *tr)
{
tracing_stop_sched_switch_record();
@@ -199,7 +193,7 @@ static void stop_sched_trace(struct trace_array *tr)
static int sched_switch_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
- start_sched_trace(tr);
+ tracing_start_sched_switch_record();
return 0;
}
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index a48c9b4..96d7164 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -72,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
if (task_cpu(wakeup_task) != cpu)
goto unlock;
- trace_function(tr, data, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, flags, pc);
unlock:
__raw_spin_unlock(&wakeup_lock);
@@ -152,8 +152,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
if (unlikely(!tracer_enabled || next != wakeup_task))
goto out_unlock;
- trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
- tracing_sched_switch_trace(wakeup_trace, data, prev, next, flags, pc);
+ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+ tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
/*
* usecs conversion is slow so we try to delay the conversion
@@ -254,10 +254,8 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
data = wakeup_trace->data[wakeup_cpu];
data->preempt_timestamp = ftrace_now(cpu);
- tracing_sched_wakeup_trace(wakeup_trace, data, p, current,
- flags, pc);
- trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2,
- flags, pc);
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
__raw_spin_unlock(&wakeup_lock);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 5013812..445700e 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -115,7 +115,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
ftrace_set_filter(func_name, strlen(func_name), 1);
/* enable tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
@@ -189,7 +189,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ftrace_enabled = 1;
tracer_enabled = 1;
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
@@ -236,7 +236,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
int ret;
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -290,7 +290,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
}
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -344,7 +344,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
}
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
@@ -476,7 +476,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
wait_for_completion(&isrt);
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -537,7 +537,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
int ret;
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -569,7 +569,7 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
int ret;
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return 0;
@@ -596,7 +596,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
int ret;
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index eaca5ad..84ca9d8 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -226,15 +226,6 @@ static void stop_stack_timers(void)
stop_stack_timer(cpu);
}
-static void start_stack_trace(struct trace_array *tr)
-{
- mutex_lock(&sample_timer_lock);
- tracing_reset_online_cpus(tr);
- start_stack_timers();
- tracer_enabled = 1;
- mutex_unlock(&sample_timer_lock);
-}
-
static void stop_stack_trace(struct trace_array *tr)
{
mutex_lock(&sample_timer_lock);
@@ -247,7 +238,10 @@ static int stack_trace_init(struct trace_array *tr)
{
sysprof_trace = tr;
- start_stack_trace(tr);
+ mutex_lock(&sample_timer_lock);
+ start_stack_timers();
+ tracer_enabled = 1;
+ mutex_unlock(&sample_timer_lock);
return 0;
}
diff --git a/kernel/wait.c b/kernel/wait.c
index cd87131..42a2dbc 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -91,6 +91,15 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
+/*
+ * finish_wait - clean up after waiting in a queue
+ * @q: waitqueue waited on
+ * @wait: wait descriptor
+ *
+ * Sets current thread back to running state and removes
+ * the wait descriptor from the given waitqueue if still
+ * queued.
+ */
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
@@ -117,6 +126,39 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
}
EXPORT_SYMBOL(finish_wait);
+/*
+ * abort_exclusive_wait - abort exclusive waiting in a queue
+ * @q: waitqueue waited on
+ * @wait: wait descriptor
+ * @state: runstate of the waiter to be woken
+ * @key: key to identify a wait bit queue or %NULL
+ *
+ * Sets current thread back to running state and removes
+ * the wait descriptor from the given waitqueue if still
+ * queued.
+ *
+ * Wakes up the next waiter if the caller is concurrently
+ * woken up through the queue.
+ *
+ * This prevents waiter starvation where an exclusive waiter
+ * aborts and is woken up concurrently and noone wakes up
+ * the next waiter.
+ */
+void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
+ unsigned int mode, void *key)
+{
+ unsigned long flags;
+
+ __set_current_state(TASK_RUNNING);
+ spin_lock_irqsave(&q->lock, flags);
+ if (!list_empty(&wait->task_list))
+ list_del_init(&wait->task_list);
+ else if (waitqueue_active(q))
+ __wake_up_common(q, mode, 1, 0, key);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(abort_exclusive_wait);
+
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
int ret = default_wake_function(wait, mode, sync, key);
@@ -177,17 +219,20 @@ int __sched
__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
int (*action)(void *), unsigned mode)
{
- int ret = 0;
-
do {
+ int ret;
+
prepare_to_wait_exclusive(wq, &q->wait, mode);
- if (test_bit(q->key.bit_nr, q->key.flags)) {
- if ((ret = (*action)(q->key.flags)))
- break;
- }
+ if (!test_bit(q->key.bit_nr, q->key.flags))
+ continue;
+ ret = action(q->key.flags);
+ if (!ret)
+ continue;
+ abort_exclusive_wait(wq, &q->wait, mode, &q->key);
+ return ret;
} while (test_and_set_bit(q->key.bit_nr, q->key.flags));
finish_wait(wq, &q->wait);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(__wait_on_bit_lock);
OpenPOWER on IntegriCloud