summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2015-09-09 15:38:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 13:29:01 -0700
commit90f023030e26ce8f981b3e688cb79329d8d07cc3 (patch)
treec000e9501dabe18902380d25cedaaf6ceecc2a2f /kernel
parentb639e86bae431db3fbc9fae8d09a9bbf97b74711 (diff)
downloadop-kernel-dev-90f023030e26ce8f981b3e688cb79329d8d07cc3.zip
op-kernel-dev-90f023030e26ce8f981b3e688cb79329d8d07cc3.tar.gz
kmod: use system_unbound_wq instead of khelper
We need to launch the usermodehelper kernel threads with the widest affinity and this is partly why we use khelper. This workqueue has unbound properties and thus a wide affinity inherited by all its children. Now khelper also has special properties that we aren't much interested in: ordered and singlethread. There is really no need about ordering as all we do is creating kernel threads. This can be done concurrently. And singlethread is a useless limitation as well. The workqueue engine already proposes generic unbound workqueues that don't share these useless properties and handle well parallel jobs. The only worrysome specific is their affinity to the node of the current CPU. It's fine for creating the usermodehelper kernel threads but those inherit this affinity for longer jobs such as requesting modules. This patch proposes to use these node affine unbound workqueues assuming that a node is sufficient to handle several parallel usermodehelper requests. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Rik van Riel <riel@redhat.com> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kmod.c40
1 files changed, 17 insertions, 23 deletions
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 81c6705..d38b2da 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -45,8 +45,6 @@
extern int max_threads;
-static struct workqueue_struct *khelper_wq;
-
#define CAP_BSET (void *)1
#define CAP_PI (void *)2
@@ -225,7 +223,7 @@ static int call_usermodehelper_exec_async(void *data)
spin_unlock_irq(&current->sighand->siglock);
/*
- * Our parent is khelper which runs with elevated scheduling
+ * Our parent (unbound workqueue) runs with elevated scheduling
* priority. Avoid propagating that into the userspace child.
*/
set_user_nice(current, 0);
@@ -268,9 +266,10 @@ out:
}
/*
- * Handles UMH_WAIT_PROC. Our parent khelper can't wait for usermodehelper
- * completion without blocking every other pending requests. That's why
- * we use a kernel thread dedicated for that purpose.
+ * Handles UMH_WAIT_PROC. Our parent (unbound workqueue) might not be able to
+ * run enough instances to handle usermodehelper completions without blocking
+ * some other pending requests. That's why we use a kernel thread dedicated for
+ * that purpose.
*/
static int call_usermodehelper_exec_sync(void *data)
{
@@ -312,14 +311,15 @@ static int call_usermodehelper_exec_sync(void *data)
/*
* This function doesn't strictly needs to be called asynchronously. But we
* need to create the usermodehelper kernel threads from a task that is affine
- * to all CPUs (or nohz housekeeping ones) such that they inherit a widest
- * affinity irrespective of call_usermodehelper() callers with possibly reduced
- * affinity (eg: per-cpu workqueues). We don't want usermodehelper targets to
- * contend any busy CPU.
- * Khelper provides such wide affinity.
+ * to an optimized set of CPUs (or nohz housekeeping ones) such that they
+ * inherit a widest affinity irrespective of call_usermodehelper() callers with
+ * possibly reduced affinity (eg: per-cpu workqueues). We don't want
+ * usermodehelper targets to contend a busy CPU.
+ *
+ * Unbound workqueues provide such wide affinity.
*
- * Besides, khelper provides the privilege level that caller might not have to
- * perform the usermodehelper request.
+ * Besides, workqueues provide the privilege level that caller might not have
+ * to perform the usermodehelper request.
*
*/
static void call_usermodehelper_exec_work(struct work_struct *work)
@@ -549,8 +549,8 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
* from interrupt context.
*
* Runs a user-space application. The application is started
- * asynchronously if wait is not set, and runs as a child of khelper.
- * (ie. it runs with full root capabilities and wide affinity).
+ * asynchronously if wait is not set, and runs as a child of system workqueues.
+ * (ie. it runs with full root capabilities and optimized affinity).
*/
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{
@@ -562,7 +562,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
return -EINVAL;
}
helper_lock();
- if (!khelper_wq || usermodehelper_disabled) {
+ if (usermodehelper_disabled) {
retval = -EBUSY;
goto out;
}
@@ -574,7 +574,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
sub_info->wait = wait;
- queue_work(khelper_wq, &sub_info->work);
+ queue_work(system_unbound_wq, &sub_info->work);
if (wait == UMH_NO_WAIT) /* task has freed sub_info */
goto unlock;
@@ -704,9 +704,3 @@ struct ctl_table usermodehelper_table[] = {
},
{ }
};
-
-void __init usermodehelper_init(void)
-{
- khelper_wq = create_singlethread_workqueue("khelper");
- BUG_ON(!khelper_wq);
-}
OpenPOWER on IntegriCloud