summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-05-05 18:03:47 -0400
committerSteven Rostedt <rostedt@goodmis.org>2011-05-18 15:29:50 -0400
commit07fd5515f3b5c20704707f63e7f4485b534508a8 (patch)
treef018ec497f8c6b49a0fcfcd7a92a600e670f90f2 /kernel/trace
parent2b499381bc50ede01b3d8eab164ca2fad00655f0 (diff)
downloadop-kernel-dev-07fd5515f3b5c20704707f63e7f4485b534508a8.zip
op-kernel-dev-07fd5515f3b5c20704707f63e7f4485b534508a8.tar.gz
ftrace: Free hash with call_rcu_sched()
When a hash is modified and might be in use, we need to perform a schedule RCU operation on it, as the hashes will soon be used directly in the function tracer callback. Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c55
1 files changed, 28 insertions, 27 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index dcce0bf..92b6fdf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -913,6 +913,7 @@ struct ftrace_hash {
unsigned long size_bits;
struct hlist_head *buckets;
unsigned long count;
+ struct rcu_head rcu;
};
/*
@@ -1058,6 +1059,21 @@ static void free_ftrace_hash(struct ftrace_hash *hash)
kfree(hash);
}
+static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
+{
+ struct ftrace_hash *hash;
+
+ hash = container_of(rcu, struct ftrace_hash, rcu);
+ free_ftrace_hash(hash);
+}
+
+static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
+{
+ if (!hash || hash == EMPTY_HASH)
+ return;
+ call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
+}
+
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
struct ftrace_hash *hash;
@@ -1122,7 +1138,8 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
struct ftrace_func_entry *entry;
struct hlist_node *tp, *tn;
struct hlist_head *hhd;
- struct ftrace_hash *hash = *dst;
+ struct ftrace_hash *old_hash;
+ struct ftrace_hash *new_hash;
unsigned long key;
int size = src->count;
int bits = 0;
@@ -1133,13 +1150,11 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
* the empty_hash.
*/
if (!src->count) {
- free_ftrace_hash(*dst);
- *dst = EMPTY_HASH;
+ free_ftrace_hash_rcu(*dst);
+ rcu_assign_pointer(*dst, EMPTY_HASH);
return 0;
}
- ftrace_hash_clear(hash);
-
/*
* Make the hash size about 1/2 the # found
*/
@@ -1150,27 +1165,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
if (bits > FTRACE_HASH_MAX_BITS)
bits = FTRACE_HASH_MAX_BITS;
- /* We can't modify the empty_hash */
- if (hash == EMPTY_HASH) {
- /* Create a new hash */
- *dst = alloc_ftrace_hash(bits);
- if (!*dst) {
- *dst = EMPTY_HASH;
- return -ENOMEM;
- }
- hash = *dst;
- } else {
- size = 1 << bits;
-
- /* Use the old hash, but create new buckets */
- hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL);
- if (!hhd)
- return -ENOMEM;
-
- kfree(hash->buckets);
- hash->buckets = hhd;
- hash->size_bits = bits;
- }
+ new_hash = alloc_ftrace_hash(bits);
+ if (!new_hash)
+ return -ENOMEM;
size = 1 << src->size_bits;
for (i = 0; i < size; i++) {
@@ -1181,10 +1178,14 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
else
key = 0;
remove_hash_entry(src, entry);
- __add_hash_entry(hash, entry);
+ __add_hash_entry(new_hash, entry);
}
}
+ old_hash = *dst;
+ rcu_assign_pointer(*dst, new_hash);
+ free_ftrace_hash_rcu(old_hash);
+
return 0;
}
OpenPOWER on IntegriCloud