summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-06-25 19:03:16 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-07-17 14:58:56 -0700
commit29fd930940193a9a035a75a3847457160d65559a (patch)
tree7e59baa7e75ddb46b51b35f2c0e06815ecd47ad1 /kernel
parent7fd0ddc5bf1ab5259c80a53a01984e13befd658b (diff)
downloadop-kernel-dev-29fd930940193a9a035a75a3847457160d65559a.zip
op-kernel-dev-29fd930940193a9a035a75a3847457160d65559a.tar.gz
rcu: Use funnel locking for synchronize_rcu_expedited()'s polling loop
This commit gets rid of synchronize_rcu_expedited()'s mutex_trylock() polling loop in favor of the funnel-locking scheme that was abstracted from synchronize_sched_expedited(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c15
-rw-r--r--kernel/rcu/tree_plugin.h36
2 files changed, 18 insertions, 33 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 094ed8f..338ea61 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3309,9 +3309,9 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
return rcu_seq_done(&rsp->expedited_sequence, s);
}
-/* Common code for synchronize_sched_expedited() work-done checking. */
-static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp,
- atomic_long_t *stat, unsigned long s)
+/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
+static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
+ atomic_long_t *stat, unsigned long s)
{
if (rcu_exp_gp_seq_done(rsp, s)) {
if (rnp)
@@ -3319,7 +3319,6 @@ static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp,
/* Ensure test happens before caller kfree(). */
smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(stat);
- put_online_cpus();
return true;
}
return false;
@@ -3345,14 +3344,14 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
*/
rnp0 = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
for (; rnp0 != NULL; rnp0 = rnp0->parent) {
- if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone1, s))
+ if (sync_exp_work_done(rsp, rnp1, &rsp->expedited_workdone1, s))
return NULL;
mutex_lock(&rnp0->exp_funnel_mutex);
if (rnp1)
mutex_unlock(&rnp1->exp_funnel_mutex);
rnp1 = rnp0;
}
- if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone2, s))
+ if (sync_exp_work_done(rsp, rnp1, &rsp->expedited_workdone2, s))
return NULL;
return rnp1;
}
@@ -3402,8 +3401,10 @@ void synchronize_sched_expedited(void)
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
rnp = exp_funnel_lock(rsp, s);
- if (rnp == NULL)
+ if (rnp == NULL) {
+ put_online_cpus();
return; /* Someone else did our work for us. */
+ }
rcu_exp_gp_seq_start(rsp);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 759883f..f0d7144 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -536,7 +536,6 @@ void synchronize_rcu(void)
EXPORT_SYMBOL_GPL(synchronize_rcu);
static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
-static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
/*
* Return non-zero if there are any tasks in RCU read-side critical
@@ -556,7 +555,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
* for the current expedited grace period. Works only for preemptible
* RCU -- other RCU implementation use other means.
*
- * Caller must hold sync_rcu_preempt_exp_mutex.
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
*/
static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
{
@@ -572,7 +571,7 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
* recursively up the tree. (Calm down, calm down, we do the recursion
* iteratively!)
*
- * Caller must hold sync_rcu_preempt_exp_mutex.
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
*/
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
bool wake)
@@ -611,7 +610,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
* set the ->expmask bits on the leaf rcu_node structures to tell phase 2
* that work is needed here.
*
- * Caller must hold sync_rcu_preempt_exp_mutex.
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
*/
static void
sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp)
@@ -654,7 +653,7 @@ sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp)
* invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits,
* enabling rcu_read_unlock_special() to do the bit-clearing.
*
- * Caller must hold sync_rcu_preempt_exp_mutex.
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
*/
static void
sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
@@ -702,29 +701,16 @@ sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
void synchronize_rcu_expedited(void)
{
struct rcu_node *rnp;
+ struct rcu_node *rnp_unlock;
struct rcu_state *rsp = rcu_state_p;
unsigned long s;
- int trycount = 0;
s = rcu_exp_gp_seq_snap(rsp);
- /*
- * Acquire lock, falling back to synchronize_rcu() if too many
- * lock-acquisition failures. Of course, if someone does the
- * expedited grace period for us, just leave.
- */
- while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
- if (rcu_exp_gp_seq_done(rsp, s))
- goto mb_ret; /* Others did our work for us. */
- if (trycount++ < 10) {
- udelay(trycount * num_online_cpus());
- } else {
- wait_rcu_gp(call_rcu);
- return;
- }
- }
- if (rcu_exp_gp_seq_done(rsp, s))
- goto unlock_mb_ret; /* Others did our work for us. */
+ rnp_unlock = exp_funnel_lock(rsp, s);
+ if (rnp_unlock == NULL)
+ return; /* Someone else did our work for us. */
+
rcu_exp_gp_seq_start(rsp);
/* force all RCU readers onto ->blkd_tasks lists. */
@@ -748,9 +734,7 @@ void synchronize_rcu_expedited(void)
/* Clean up and exit. */
rcu_exp_gp_seq_end(rsp);
-unlock_mb_ret:
- mutex_unlock(&sync_rcu_preempt_exp_mutex);
-mb_ret:
+ mutex_unlock(&rnp_unlock->exp_funnel_mutex);
smp_mb(); /* ensure subsequent action seen after grace period. */
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
OpenPOWER on IntegriCloud