summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-21 01:29:39 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-28 21:38:16 -0700
commite0f23060adfa3f27beaa7918eff70258b88471b6 (patch)
treef0e8ed4ad97a1fc4d3c2c5af639a63063ae52e32 /kernel/rcutree.c
parent72fe701b70e6ced35d734b676c13efbc8fc769a9 (diff)
downloadop-kernel-dev-e0f23060adfa3f27beaa7918eff70258b88471b6.zip
op-kernel-dev-e0f23060adfa3f27beaa7918eff70258b88471b6.tar.gz
rcu: Update comments to reflect softirqs vs. kthreads
We now have kthreads only for flavors of RCU that support boosting, so update the now-misleading comments accordingly. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index eb6e731..4e24399 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -198,7 +198,7 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
};
#endif /* #ifdef CONFIG_NO_HZ */
-static int blimit = 10; /* Maximum callbacks per softirq. */
+static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static int qhimark = 10000; /* If this many pending, ignore blimit. */
static int qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -1261,7 +1261,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_restore(flags);
- /* Re-raise the RCU softirq if there are callbacks remaining. */
+ /* Re-invoke RCU core processing if there are callbacks remaining. */
if (cpu_has_callbacks_ready_to_invoke(rdp))
invoke_rcu_core();
}
@@ -1269,7 +1269,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Check to see if this CPU is in a non-context-switch quiescent state
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
- * Also schedule the RCU softirq handler.
+ * Also schedule RCU core processing.
*
* This function must be called with hardirqs disabled. It is normally
* invoked from the scheduling-clock interrupt. If rcu_pending returns
@@ -1448,9 +1448,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
#endif /* #else #ifdef CONFIG_SMP */
/*
- * This does the RCU processing work from softirq context for the
- * specified rcu_state and rcu_data structures. This may be called
- * only from the CPU to whom the rdp belongs.
+ * This does the RCU core processing work for the specified rcu_state
+ * and rcu_data structures. This may be called only from the CPU to
+ * whom the rdp belongs.
*/
static void
__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -1487,7 +1487,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
- * Do softirq processing for the current CPU.
+ * Do RCU core processing for the current CPU.
*/
static void rcu_process_callbacks(struct softirq_action *unused)
{
@@ -1503,10 +1503,11 @@ static void rcu_process_callbacks(struct softirq_action *unused)
}
/*
- * Wake up the current CPU's kthread. This replaces raise_softirq()
- * in earlier versions of RCU. Note that because we are running on
- * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
- * cannot disappear out from under us.
+ * Schedule RCU callback invocation. If the specified type of RCU
+ * does not support RCU priority boosting, just do a direct call,
+ * otherwise wake up the per-CPU kernel kthread. Note that because we
+ * are running on the current CPU with interrupts disabled, the
+ * rcu_cpu_kthread_task cannot disappear out from under us.
*/
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
OpenPOWER on IntegriCloud