diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-09-18 09:50:18 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-19 08:53:21 +0200 |
commit | e7d8842ed34a7fe19d1ed90f84c211fb056ac523 (patch) | |
tree | d49d5b8ff8829e525b8f80d60a18ef1f37e09529 /kernel/rcutree_plugin.h | |
parent | 28ecd58020409be8eb176c716f957fc3386fa2fa (diff) | |
download | op-kernel-dev-e7d8842ed34a7fe19d1ed90f84c211fb056ac523.zip op-kernel-dev-e7d8842ed34a7fe19d1ed90f84c211fb056ac523.tar.gz |
rcu: Apply results of code inspection of kernel/rcutree_plugin.h
o Drop the calls to cpu_quiet() from the online/offline code.
These are unnecessary, since force_quiescent_state() will
clean up, and removing them simplifies the code a bit.
o Add a warning to check that we don't enqueue the same blocked
task twice onto the ->blocked_tasks[] lists.
o Rework the phase computation in rcu_preempt_note_context_switch()
to be more readable, as suggested by Josh Triplett.
o Disable irqs to close a race between the scheduling clock
interrupt and rcu_preempt_note_context_switch() WRT the
->rcu_read_unlock_special field.
o Add comments to rnp->lock acquisition and release within
rcu_read_unlock_special() noting that irqs are already
disabled.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
LKML-Reference: <12532926201851-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 5f94619..cd6047c 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -117,9 +117,9 @@ static void rcu_preempt_note_context_switch(int cpu) * on line! */ WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); - phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); + WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); + phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); - smp_mb(); /* Ensure later ctxt swtch seen after above. */ spin_unlock_irqrestore(&rnp->lock, flags); } @@ -133,7 +133,9 @@ static void rcu_preempt_note_context_switch(int cpu) * means that we continue to block the current grace period. */ rcu_preempt_qs(cpu); + local_irq_save(flags); t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + local_irq_restore(flags); } /* @@ -189,10 +191,10 @@ static void rcu_read_unlock_special(struct task_struct *t) */ for (;;) { rnp = t->rcu_blocked_node; - spin_lock(&rnp->lock); + spin_lock(&rnp->lock); /* irqs already disabled. */ if (rnp == t->rcu_blocked_node) break; - spin_unlock(&rnp->lock); + spin_unlock(&rnp->lock); /* irqs remain disabled. */ } empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); list_del_init(&t->rcu_node_entry); |