summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-03 12:25:56 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-10-28 13:49:30 -0700
commite0775cefb5ede661dbdc0611d7bf3fcd4640005c (patch)
tree58bf71c00bef3462c92649ef80b393c44dd412f5
parent61cfd0970ea27764434fba5c41bdaefb26c44183 (diff)
downloadop-kernel-dev-e0775cefb5ede661dbdc0611d7bf3fcd4640005c.zip
op-kernel-dev-e0775cefb5ede661dbdc0611d7bf3fcd4640005c.tar.gz
rcu: Avoid IPIing idle CPUs from synchronize_sched_expedited()
Currently, synchronize_sched_expedited() sends IPIs to all online CPUs, even those that are idle or executing in nohz_full= userspace. Because idle CPUs and nohz_full= userspace CPUs are in extended quiescent states, there is no need to IPI them in the first place. This commit therefore avoids IPIing CPUs that are already in extended quiescent states. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcu/tree.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7f73c5e..9e3c20f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2950,6 +2950,9 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
*/
void synchronize_sched_expedited(void)
{
+ cpumask_var_t cm;
+ bool cma = false;
+ int cpu;
long firstsnap, s, snap;
int trycount = 0;
struct rcu_state *rsp = &rcu_sched_state;
@@ -2984,11 +2987,26 @@ void synchronize_sched_expedited(void)
}
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
+ /* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
+ cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
+ if (cma) {
+ cpumask_copy(cm, cpu_online_mask);
+ cpumask_clear_cpu(raw_smp_processor_id(), cm);
+ for_each_cpu(cpu, cm) {
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+ if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
+ cpumask_clear_cpu(cpu, cm);
+ }
+ if (cpumask_weight(cm) == 0)
+ goto all_cpus_idle;
+ }
+
/*
* Each pass through the following loop attempts to force a
* context switch on each CPU.
*/
- while (try_stop_cpus(cpu_online_mask,
+ while (try_stop_cpus(cma ? cm : cpu_online_mask,
synchronize_sched_expedited_cpu_stop,
NULL) == -EAGAIN) {
put_online_cpus();
@@ -3000,6 +3018,7 @@ void synchronize_sched_expedited(void)
/* ensure test happens before caller kfree */
smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone1);
+ free_cpumask_var(cm);
return;
}
@@ -3009,6 +3028,7 @@ void synchronize_sched_expedited(void)
} else {
wait_rcu_gp(call_rcu_sched);
atomic_long_inc(&rsp->expedited_normal);
+ free_cpumask_var(cm);
return;
}
@@ -3018,6 +3038,7 @@ void synchronize_sched_expedited(void)
/* ensure test happens before caller kfree */
smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone2);
+ free_cpumask_var(cm);
return;
}
@@ -3032,6 +3053,7 @@ void synchronize_sched_expedited(void)
/* CPU hotplug operation in flight, use normal GP. */
wait_rcu_gp(call_rcu_sched);
atomic_long_inc(&rsp->expedited_normal);
+ free_cpumask_var(cm);
return;
}
snap = atomic_long_read(&rsp->expedited_start);
@@ -3039,6 +3061,9 @@ void synchronize_sched_expedited(void)
}
atomic_long_inc(&rsp->expedited_stoppedcpus);
+all_cpus_idle:
+ free_cpumask_var(cm);
+
/*
* Everyone up to our most recent fetch is covered by our grace
* period. Update the counter, but only if our work is still
OpenPOWER on IntegriCloud