summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-09-06 17:39:49 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-09-25 06:44:41 -0700
commitcc6783f788d8fe8b23ec6fc2762f5e8c9a418eee (patch)
treedfecd009a80c422393c2c6130d9a38ec7329e851 /kernel
parentc337f8f58ed7cf150651d232af8222421a71463d (diff)
downloadop-kernel-dev-cc6783f788d8fe8b23ec6fc2762f5e8c9a418eee.zip
op-kernel-dev-cc6783f788d8fe8b23ec6fc2762f5e8c9a418eee.tar.gz
rcu: Is it safe to enter an RCU read-side critical section?
There is currently no way for kernel code to determine whether it is safe to enter an RCU read-side critical section, in other words, whether or not RCU is paying attention to the currently running CPU. Given the large and increasing quantity of code shared by the idle loop and non-idle code, the this shortcoming is becoming increasingly painful. This commit therefore adds __rcu_is_watching(), which returns true if it is safe to enter an RCU read-side critical section on the currently running CPU. This function is quite fast, using only a __this_cpu_read(). However, the caller must disable preemption. Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutiny.c4
-rw-r--r--kernel/rcutree.c13
2 files changed, 15 insertions, 2 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 9ed6075..b4bc618 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -174,7 +174,7 @@ void rcu_irq_enter(void)
}
EXPORT_SYMBOL_GPL(rcu_irq_enter);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
/*
* Test whether RCU thinks that the current CPU is idle.
@@ -185,7 +185,7 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
/*
* Test whether the current CPU was interrupted from idle. Nested
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 32618b3..910d868 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -671,6 +671,19 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
+/**
+ * __rcu_is_watching - are RCU read-side critical sections safe?
+ *
+ * Return true if RCU is watching the running CPU, which means that
+ * this CPU can safely enter RCU read-side critical sections. Unlike
+ * rcu_is_cpu_idle(), the caller of __rcu_is_watching() must have at
+ * least disabled preemption.
+ */
+bool __rcu_is_watching(void)
+{
+ return !!(atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1);
+}
+
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
OpenPOWER on IntegriCloud