summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rcupdate.h8
-rw-r--r--include/linux/rcutiny.h9
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--kernel/rcutiny.c4
-rw-r--r--kernel/rcutree.c13
5 files changed, 30 insertions, 6 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f1f1bc3..a53a21a 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
rcu_irq_exit(); \
} while (0)
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
+extern int rcu_is_cpu_idle(void);
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
-extern int rcu_is_cpu_idle(void);
-#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
-
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e31005e..bee6659 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -132,4 +132,13 @@ static inline void rcu_scheduler_starting(void)
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#ifdef CONFIG_RCU_TRACE
+
+static inline bool __rcu_is_watching(void)
+{
+ return !rcu_is_cpu_idle();
+}
+
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 226169d..293613d 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -90,4 +90,6 @@ extern void exit_rcu(void);
extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
+extern bool __rcu_is_watching(void);
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 9ed6075..b4bc618 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -174,7 +174,7 @@ void rcu_irq_enter(void)
}
EXPORT_SYMBOL_GPL(rcu_irq_enter);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
/*
* Test whether RCU thinks that the current CPU is idle.
@@ -185,7 +185,7 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
/*
* Test whether the current CPU was interrupted from idle. Nested
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 32618b3..910d868 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -671,6 +671,19 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
+/**
+ * __rcu_is_watching - are RCU read-side critical sections safe?
+ *
+ * Return true if RCU is watching the running CPU, which means that
+ * this CPU can safely enter RCU read-side critical sections. Unlike
+ * rcu_is_cpu_idle(), the caller of __rcu_is_watching() must have at
+ * least disabled preemption.
+ */
+bool __rcu_is_watching(void)
+{
+ return !!(atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1);
+}
+
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
OpenPOWER on IntegriCloud