summaryrefslogtreecommitdiffstats
path: root/kernel/rcutiny.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-18 09:55:39 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-28 21:38:20 -0700
commit965a002b4f1a458c5dcb334ec29f48a0046faa25 (patch)
tree9aa3847fd44b322a73631758e7337632e5e3a32d /kernel/rcutiny.c
parent385680a9487d2f85382ad6d74e2a15837e47bfd9 (diff)
downloadop-kernel-dev-965a002b4f1a458c5dcb334ec29f48a0046faa25.zip
op-kernel-dev-965a002b4f1a458c5dcb334ec29f48a0046faa25.tar.gz
rcu: Make TINY_RCU also use softirq for RCU_BOOST=n
This patch #ifdefs TINY_RCU kthreads out of the kernel unless RCU_BOOST=y, thus eliminating context-switch overhead if RCU priority boosting has not been configured. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r--kernel/rcutiny.c74
1 files changed, 10 insertions, 64 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 1c37bdd..c9321d8 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -43,16 +43,11 @@
#include "rcu.h"
-/* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */
-static struct task_struct *rcu_kthread_task;
-static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
-static unsigned long have_rcu_kthread_work;
-
/* Forward declarations for rcutiny_plugin.h. */
struct rcu_ctrlblk;
-static void invoke_rcu_kthread(void);
-static void rcu_process_callbacks(struct rcu_ctrlblk *rcp);
-static int rcu_kthread(void *arg);
+static void invoke_rcu_callbacks(void);
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+static void rcu_process_callbacks(struct softirq_action *unused);
static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp);
@@ -102,16 +97,6 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
}
/*
- * Wake up rcu_kthread() to process callbacks now eligible for invocation
- * or to boost readers.
- */
-static void invoke_rcu_kthread(void)
-{
- have_rcu_kthread_work = 1;
- wake_up(&rcu_kthread_wq);
-}
-
-/*
* Record an rcu quiescent state. And an rcu_bh quiescent state while we
* are at it, given that any rcu quiescent state is also an rcu_bh
* quiescent state. Use "+" instead of "||" to defeat short circuiting.
@@ -123,7 +108,7 @@ void rcu_sched_qs(int cpu)
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
rcu_qsctr_help(&rcu_bh_ctrlblk))
- invoke_rcu_kthread();
+ invoke_rcu_callbacks();
local_irq_restore(flags);
}
@@ -136,7 +121,7 @@ void rcu_bh_qs(int cpu)
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_bh_ctrlblk))
- invoke_rcu_kthread();
+ invoke_rcu_callbacks();
local_irq_restore(flags);
}
@@ -160,7 +145,7 @@ void rcu_check_callbacks(int cpu, int user)
* Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
* whose grace period has elapsed.
*/
-static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
struct rcu_head *next, *list;
unsigned long flags;
@@ -200,36 +185,11 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
}
-/*
- * This kthread invokes RCU callbacks whose grace periods have
- * elapsed. It is awakened as needed, and takes the place of the
- * RCU_SOFTIRQ that was used previously for this purpose.
- * This is a kthread, but it is never stopped, at least not until
- * the system goes down.
- */
-static int rcu_kthread(void *arg)
+static void rcu_process_callbacks(struct softirq_action *unused)
{
- unsigned long work;
- unsigned long morework;
- unsigned long flags;
-
- for (;;) {
- wait_event_interruptible(rcu_kthread_wq,
- have_rcu_kthread_work != 0);
- morework = rcu_boost();
- local_irq_save(flags);
- work = have_rcu_kthread_work;
- have_rcu_kthread_work = morework;
- local_irq_restore(flags);
- if (work) {
- rcu_process_callbacks(&rcu_sched_ctrlblk);
- rcu_process_callbacks(&rcu_bh_ctrlblk);
- rcu_preempt_process_callbacks();
- }
- schedule_timeout_interruptible(1); /* Leave CPU for others. */
- }
-
- return 0; /* Not reached, but needed to shut gcc up. */
+ __rcu_process_callbacks(&rcu_sched_ctrlblk);
+ __rcu_process_callbacks(&rcu_bh_ctrlblk);
+ rcu_preempt_process_callbacks();
}
/*
@@ -291,17 +251,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
-
-/*
- * Spawn the kthread that invokes RCU callbacks.
- */
-static int __init rcu_spawn_kthreads(void)
-{
- struct sched_param sp;
-
- rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
- sp.sched_priority = RCU_BOOST_PRIO;
- sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
- return 0;
-}
-early_initcall(rcu_spawn_kthreads);
OpenPOWER on IntegriCloud