summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-01-04 15:09:02 -0800
committerIngo Molnar <mingo@elte.hu>2010-01-13 09:06:02 +0100
commit07079d5357a4d53c2b13126c4a38fb40e6e04966 (patch)
tree1a97552a220a9bbdfceb1cda01c1ee5b92ce75bd /kernel/rcutree.c
parent559569acf94f538b56bd6eead80b439d6a78cdff (diff)
downloadop-kernel-dev-07079d5357a4d53c2b13126c4a38fb40e6e04966.zip
op-kernel-dev-07079d5357a4d53c2b13126c4a38fb40e6e04966.tar.gz
rcu: Prohibit starting new grace periods while forcing quiescent states
Reduce the number and variety of race conditions by prohibiting the start of a new grace period while force_quiescent_state() is active. A new fqs_active flag in the rcu_state structure is used to trace whether or not force_quiescent_state() is active, and this new flag is tested by rcu_start_gp(). If the CPU that closed out the last grace period needs another grace period, this new grace period may be delayed up to one scheduling-clock tick, but it will eventually get started. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <126264655052-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d42ad30..41688ff 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -659,7 +659,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
struct rcu_data *rdp = rsp->rda[smp_processor_id()];
struct rcu_node *rnp = rcu_get_root(rsp);
- if (!cpu_needs_another_gp(rsp, rdp)) {
+ if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
if (rnp->completed == rsp->completed) {
spin_unlock_irqrestore(&rnp->lock, flags);
return;
@@ -1195,6 +1195,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
struct rcu_node *rnp = rcu_get_root(rsp);
u8 signaled;
u8 forcenow;
+ u8 gpdone;
if (!rcu_gp_in_progress(rsp))
return; /* No grace period in progress, nothing to force. */
@@ -1206,15 +1207,16 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
(long)(rsp->jiffies_force_qs - jiffies) >= 0)
goto unlock_fqs_ret; /* no emergency and done recently. */
rsp->n_force_qs++;
- spin_lock(&rnp->lock);
+ spin_lock(&rnp->lock); /* irqs already disabled */
lastcomp = rsp->gpnum - 1;
signaled = rsp->signaled;
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
if(!rcu_gp_in_progress(rsp)) {
rsp->n_force_qs_ngp++;
- spin_unlock(&rnp->lock);
+ spin_unlock(&rnp->lock); /* irqs remain disabled */
goto unlock_fqs_ret; /* no GP in progress, time updated. */
}
+ rsp->fqs_active = 1;
switch (signaled) {
case RCU_GP_IDLE:
case RCU_GP_INIT:
@@ -1223,15 +1225,16 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
case RCU_SAVE_DYNTICK:
- spin_unlock(&rnp->lock);
+ spin_unlock(&rnp->lock); /* irqs remain disabled */
if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
break; /* So gcc recognizes the dead code. */
/* Record dyntick-idle state. */
- if (rcu_process_dyntick(rsp, lastcomp,
- dyntick_save_progress_counter))
- goto unlock_fqs_ret;
- spin_lock(&rnp->lock);
+ gpdone = rcu_process_dyntick(rsp, lastcomp,
+ dyntick_save_progress_counter);
+ spin_lock(&rnp->lock); /* irqs already disabled */
+ if (gpdone)
+ break;
/* fall into next case. */
case RCU_SAVE_COMPLETED:
@@ -1252,17 +1255,17 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
case RCU_FORCE_QS:
/* Check dyntick-idle state, send IPI to laggarts. */
- spin_unlock(&rnp->lock);
- if (rcu_process_dyntick(rsp, rsp->completed_fqs,
- rcu_implicit_dynticks_qs))
- goto unlock_fqs_ret;
+ spin_unlock(&rnp->lock); /* irqs remain disabled */
+ gpdone = rcu_process_dyntick(rsp, rsp->completed_fqs,
+ rcu_implicit_dynticks_qs);
/* Leave state in case more forcing is required. */
- spin_lock(&rnp->lock);
+ spin_lock(&rnp->lock); /* irqs already disabled */
break;
}
- spin_unlock(&rnp->lock);
+ rsp->fqs_active = 0;
+ spin_unlock(&rnp->lock); /* irqs remain disabled */
unlock_fqs_ret:
spin_unlock_irqrestore(&rsp->fqslock, flags);
}
OpenPOWER on IntegriCloud