diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-09-13 17:20:11 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-09-25 06:45:06 -0700 |
commit | 5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420 (patch) | |
tree | 5dd725412d2bccbdf0bcee0ac8fefc1f1a3d18c8 /kernel | |
parent | f9ffc31ebd38d2d74dbfe9f0b67274e99ad668f5 (diff) | |
download | op-kernel-dev-5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420.zip op-kernel-dev-5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420.tar.gz |
rcu: Consistent rcu_is_watching() naming
The old rcu_is_cpu_idle() function is just __rcu_is_watching() with
preemption disabled. This commit therefore renames rcu_is_cpu_idle()
to rcu_is_watching.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 4 | ||||
-rw-r--r-- | kernel/rcupdate.c | 2 | ||||
-rw-r--r-- | kernel/rcutiny.c | 6 | ||||
-rw-r--r-- | kernel/rcutree.c | 36 |
4 files changed, 24 insertions, 24 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e16c45b..4e8e14c 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", !rcu_lockdep_current_cpu_online() ? "RCU used illegally from offline CPU!\n" - : rcu_is_cpu_idle() + : !rcu_is_watching() ? "RCU used illegally from idle CPU!\n" : "", rcu_scheduler_active, debug_locks); @@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) * So complain bitterly if someone does call rcu_read_lock(), * rcu_read_lock_bh() and so on from extended quiescent states. */ - if (rcu_is_cpu_idle()) + if (!rcu_is_watching()) printk("RCU used illegally from extended quiescent state!\n"); lockdep_print_held_locks(curr); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index b02a339..3b3c046 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -148,7 +148,7 @@ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) + if (!rcu_is_watching()) return 0; if (!rcu_lockdep_current_cpu_online()) return 0; diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index b4bc618..0fa061d 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -179,11 +179,11 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter); /* * Test whether RCU thinks that the current CPU is idle. */ -int rcu_is_cpu_idle(void) +bool __rcu_is_watching(void) { - return !rcu_dynticks_nesting; + return rcu_dynticks_nesting; } -EXPORT_SYMBOL(rcu_is_cpu_idle); +EXPORT_SYMBOL(__rcu_is_watching); #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1b123e1..981d0c1 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -655,34 +655,34 @@ void rcu_nmi_exit(void) } /** - * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle + * __rcu_is_watching - are RCU read-side critical sections safe? + * + * Return true if RCU is watching the running CPU, which means that + * this CPU can safely enter RCU read-side critical sections. Unlike + * rcu_is_watching(), the caller of __rcu_is_watching() must have at + * least disabled preemption. + */ +bool __rcu_is_watching(void) +{ + return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; +} + +/** + * rcu_is_watching - see if RCU thinks that the current CPU is idle * * If the current CPU is in its idle loop and is neither in an interrupt * or NMI handler, return true. */ -int rcu_is_cpu_idle(void) +bool rcu_is_watching(void) { int ret; preempt_disable(); - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; + ret = __rcu_is_watching(); preempt_enable(); return ret; } -EXPORT_SYMBOL_GPL(rcu_is_cpu_idle); - -/** - * __rcu_is_watching - are RCU read-side critical sections safe? - * - * Return true if RCU is watching the running CPU, which means that - * this CPU can safely enter RCU read-side critical sections. Unlike - * rcu_is_cpu_idle(), the caller of __rcu_is_watching() must have at - * least disabled preemption. - */ -bool __rcu_is_watching(void) -{ - return !!(atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1); -} +EXPORT_SYMBOL_GPL(rcu_is_watching); #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) @@ -2268,7 +2268,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, * If called from an extended quiescent state, invoke the RCU * core in order to force a re-evaluation of RCU's idleness. */ - if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) + if (!rcu_is_watching() && cpu_online(smp_processor_id())) invoke_rcu_core(); /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ |