diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-10-06 21:48:17 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-07 08:11:20 +0200 |
commit | e74f4c4564455c91a3b4075bb1721993c2a95dda (patch) | |
tree | 213f9df0974c6e1e729de207b2c6dd942a39ba8c /kernel/rcutree_plugin.h | |
parent | d0ec774cb2599c858be9d923bb873cf6697520d8 (diff) | |
download | op-kernel-dev-e74f4c4564455c91a3b4075bb1721993c2a95dda.zip op-kernel-dev-e74f4c4564455c91a3b4075bb1721993c2a95dda.tar.gz |
rcu: Make hot-unplugged CPU relinquish its own RCU callbacks
The current interaction between RCU and CPU hotplug requires that
RCU block in CPU notifiers waiting for callbacks to drain.
This can be greatly simplified by having each CPU relinquish its
own callbacks, and for both _rcu_barrier() and CPU_DEAD notifiers
to adopt all callbacks that were previously relinquished.
This change also eliminates the possibility of certain types of
hangs due to the previous practice of waiting for callbacks to be
invoked from within CPU notifiers. If you don't every wait, you
cannot hang.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1254890898456-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 34 |
1 files changed, 34 insertions, 0 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 57200fe..c0cb783 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -410,6 +410,15 @@ static int rcu_preempt_needs_cpu(int cpu) return !!per_cpu(rcu_preempt_data, cpu).nxtlist; } +/** + * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. + */ +void rcu_barrier(void) +{ + _rcu_barrier(&rcu_preempt_state, call_rcu); +} +EXPORT_SYMBOL_GPL(rcu_barrier); + /* * Initialize preemptable RCU's per-CPU data. */ @@ -419,6 +428,14 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) } /* + * Move preemptable RCU's callbacks to ->orphan_cbs_list. + */ +static void rcu_preempt_send_cbs_to_orphanage(void) +{ + rcu_send_cbs_to_orphanage(&rcu_preempt_state); +} + +/* * Initialize preemptable RCU's state structures. */ static void __init __rcu_init_preempt(void) @@ -564,6 +581,16 @@ static int rcu_preempt_needs_cpu(int cpu) } /* + * Because preemptable RCU does not exist, rcu_barrier() is just + * another name for rcu_barrier_sched(). + */ +void rcu_barrier(void) +{ + rcu_barrier_sched(); +} +EXPORT_SYMBOL_GPL(rcu_barrier); + +/* * Because preemptable RCU does not exist, there is no per-CPU * data to initialize. */ @@ -572,6 +599,13 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) } /* + * Because there is no preemptable RCU, there are no callbacks to move. + */ +static void rcu_preempt_send_cbs_to_orphanage(void) +{ +} + +/* * Because preemptable RCU does not exist, it need not be initialized. */ static void __init __rcu_init_preempt(void) |