diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2013-02-08 15:55:02 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-03-26 08:04:46 -0700 |
commit | 09c7b890622d72b5e004cc249bbe610e8b928ddf (patch) | |
tree | 4326a3d6baaebaeb3417f0593bb017c2477b973b /kernel/rcutree_plugin.h | |
parent | 21e7a6087480451804124cee27c0a7d0a7de1564 (diff) | |
download | op-kernel-dev-09c7b890622d72b5e004cc249bbe610e8b928ddf.zip op-kernel-dev-09c7b890622d72b5e004cc249bbe610e8b928ddf.tar.gz |
rcu: Add event tracing for no-CBs CPUs' grace periods
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 7225a5a..e32236e 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -2201,6 +2201,9 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) wake_up_all(&rnp->nocb_gp_wq[c & 0x1]); rnp->n_nocb_gp_requests[c & 0x1] = 0; needmore = rnp->n_nocb_gp_requests[(c + 1) & 0x1]; + trace_rcu_nocb_grace_period(rsp->name, rnp->gpnum, rnp->completed, + c, rnp->level, rnp->grplo, rnp->grphi, + needmore ? "CleanupMore" : "Cleanup"); return needmore; } @@ -2347,6 +2350,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) /* Count our request for a grace period. */ rnp->n_nocb_gp_requests[c & 0x1]++; + trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed, + c, rnp->level, rnp->grplo, rnp->grphi, + "Startleaf"); if (rnp->gpnum != rnp->completed) { @@ -2355,6 +2361,10 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) * is in progress, so we are done. When this grace * period ends, our request will be acted upon. */ + trace_rcu_nocb_grace_period(rdp->rsp->name, + rnp->gpnum, rnp->completed, c, + rnp->level, rnp->grplo, rnp->grphi, + "Startedleaf"); raw_spin_unlock_irqrestore(&rnp->lock, flags); } else { @@ -2366,6 +2376,11 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) if (rnp != rnp_root) raw_spin_lock(&rnp_root->lock); /* irqs disabled. */ if (rnp_root->gpnum != rnp_root->completed) { + trace_rcu_nocb_grace_period(rdp->rsp->name, + rnp->gpnum, rnp->completed, + c, rnp->level, + rnp->grplo, rnp->grphi, + "Startedleafroot"); raw_spin_unlock(&rnp_root->lock); /* irqs disabled. */ } else { @@ -2381,6 +2396,11 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) c = rnp_root->completed + 1; rnp->n_nocb_gp_requests[c & 0x1]++; rnp_root->n_nocb_gp_requests[c & 0x1]++; + trace_rcu_nocb_grace_period(rdp->rsp->name, + rnp->gpnum, rnp->completed, + c, rnp->level, + rnp->grplo, rnp->grphi, + "Startedroot"); local_save_flags(flags1); rcu_start_gp(rdp->rsp, flags1); /* Rlses ->lock. */ } @@ -2396,6 +2416,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) * Wait for the grace period. Do so interruptibly to avoid messing * up the load average. */ + trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed, + c, rnp->level, rnp->grplo, rnp->grphi, + "StartWait"); for (;;) { wait_event_interruptible( rnp->nocb_gp_wq[c & 0x1], @@ -2403,7 +2426,14 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) if (likely(d)) break; flush_signals(current); + trace_rcu_nocb_grace_period(rdp->rsp->name, + rnp->gpnum, rnp->completed, c, + rnp->level, rnp->grplo, rnp->grphi, + "ResumeWait"); } + trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed, + c, rnp->level, rnp->grplo, rnp->grphi, + "EndWait"); smp_mb(); /* Ensure that CB invocation happens after GP end. */ } |