summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-03-15 12:16:26 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-05-01 08:22:50 -0700
commitf511fc624642f0bb8cf65aaa28979737514d4746 (patch)
treedb1395c5d33da33c8ea6c82f6e5b7346cc5beb91 /kernel/rcutree_plugin.h
parent79b9a75fb703b6a2670e46b9dc495af5bc7029b3 (diff)
downloadop-kernel-dev-f511fc624642f0bb8cf65aaa28979737514d4746.zip
op-kernel-dev-f511fc624642f0bb8cf65aaa28979737514d4746.tar.gz
rcu: Ensure that RCU_FAST_NO_HZ timers expire on correct CPU
Timers are subject to migration, which can lead to the following system-hang scenario when CONFIG_RCU_FAST_NO_HZ=y: 1. CPU 0 executes synchronize_rcu(), which posts an RCU callback. 2. CPU 0 then goes idle. It cannot immediately invoke the callback, but there is nothing RCU needs from ti, so it enters dyntick-idle mode after posting a timer. 3. The timer gets migrated to CPU 1. 4. CPU 0 never wakes up, so the synchronize_rcu() never returns, so the system hangs. This commit fixes this problem by using mod_timer_pinned(), as suggested by Peter Zijlstra, to ensure that the timer is actually posted on the running CPU. Reported-by: Dipankar Sarma <dipankar@in.ibm.com> Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h13
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index ad61da7..d01e26d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2110,6 +2110,8 @@ static void rcu_cleanup_after_idle(int cpu)
*/
static void rcu_prepare_for_idle(int cpu)
{
+ struct timer_list *tp;
+
/*
* If this is an idle re-entry, for example, due to use of
* RCU_NONIDLE() or the new idle-loop tracing API within the idle
@@ -2121,9 +2123,10 @@ static void rcu_prepare_for_idle(int cpu)
if (!per_cpu(rcu_idle_first_pass, cpu) &&
(per_cpu(rcu_nonlazy_posted, cpu) ==
per_cpu(rcu_nonlazy_posted_snap, cpu))) {
- if (rcu_cpu_has_callbacks(cpu))
- mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
- per_cpu(rcu_idle_gp_timer_expires, cpu));
+ if (rcu_cpu_has_callbacks(cpu)) {
+ tp = &per_cpu(rcu_idle_gp_timer, cpu);
+ mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
+ }
return;
}
per_cpu(rcu_idle_first_pass, cpu) = 0;
@@ -2167,8 +2170,8 @@ static void rcu_prepare_for_idle(int cpu)
else
per_cpu(rcu_idle_gp_timer_expires, cpu) =
jiffies + RCU_IDLE_LAZY_GP_DELAY;
- mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
- per_cpu(rcu_idle_gp_timer_expires, cpu));
+ tp = &per_cpu(rcu_idle_gp_timer, cpu);
+ mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
per_cpu(rcu_nonlazy_posted_snap, cpu) =
per_cpu(rcu_nonlazy_posted, cpu);
return; /* Nothing more to do immediately. */
OpenPOWER on IntegriCloud