summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-05-13 16:35:18 -0400
committerTejun Heo <tj@kernel.org>2015-05-26 20:35:00 -0400
commitb5ba75b5fc0e8404e2c50cb68f39bb6a53fc916f (patch)
tree467e0e1d4b745ca8ce37673f2fb5505487a82bb7 /kernel/cgroup.c
parentd59cfc09c32a2ae31f1c3bc2983a0cd79afb3f14 (diff)
downloadop-kernel-dev-b5ba75b5fc0e8404e2c50cb68f39bb6a53fc916f.zip
op-kernel-dev-b5ba75b5fc0e8404e2c50cb68f39bb6a53fc916f.tar.gz
cgroup: simplify threadgroup locking
Now that threadgroup locking is made global, code paths around it can be simplified. * lock-verify-unlock-retry dancing removed from __cgroup_procs_write(). * Race protection against de_thread() removed from cgroup_update_dfl_csses(). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c48
1 files changed, 13 insertions, 35 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 77578a1..0fd5227 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2402,14 +2402,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
if (!cgrp)
return -ENODEV;
-retry_find_task:
+ percpu_down_write(&cgroup_threadgroup_rwsem);
rcu_read_lock();
if (pid) {
tsk = find_task_by_vpid(pid);
if (!tsk) {
- rcu_read_unlock();
ret = -ESRCH;
- goto out_unlock_cgroup;
+ goto out_unlock_rcu;
}
/*
* even if we're attaching all tasks in the thread group, we
@@ -2419,9 +2418,8 @@ retry_find_task:
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid)) {
- rcu_read_unlock();
ret = -EACCES;
- goto out_unlock_cgroup;
+ goto out_unlock_rcu;
}
} else
tsk = current;
@@ -2436,35 +2434,21 @@ retry_find_task:
*/
if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
- rcu_read_unlock();
- goto out_unlock_cgroup;
+ goto out_unlock_rcu;
}
get_task_struct(tsk);
rcu_read_unlock();
- percpu_down_write(&cgroup_threadgroup_rwsem);
- if (threadgroup) {
- if (!thread_group_leader(tsk)) {
- /*
- * a race with de_thread from another thread's exec()
- * may strip us of our leadership, if this happens,
- * there is no choice but to throw this task away and
- * try again; this is
- * "double-double-toil-and-trouble-check locking".
- */
- percpu_up_write(&cgroup_threadgroup_rwsem);
- put_task_struct(tsk);
- goto retry_find_task;
- }
- }
-
ret = cgroup_attach_task(cgrp, tsk, threadgroup);
- percpu_up_write(&cgroup_threadgroup_rwsem);
-
put_task_struct(tsk);
-out_unlock_cgroup:
+ goto out_unlock_threadgroup;
+
+out_unlock_rcu:
+ rcu_read_unlock();
+out_unlock_threadgroup:
+ percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_kn_unlock(of->kn);
return ret ?: nbytes;
}
@@ -2611,6 +2595,8 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
lockdep_assert_held(&cgroup_mutex);
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+
/* look up all csses currently attached to @cgrp's subtree */
down_read(&css_set_rwsem);
css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
@@ -2666,17 +2652,8 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
goto out_finish;
last_task = task;
- percpu_down_write(&cgroup_threadgroup_rwsem);
- /* raced against de_thread() from another thread? */
- if (!thread_group_leader(task)) {
- percpu_up_write(&cgroup_threadgroup_rwsem);
- put_task_struct(task);
- continue;
- }
-
ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
- percpu_up_write(&cgroup_threadgroup_rwsem);
put_task_struct(task);
if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
@@ -2686,6 +2663,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
out_finish:
cgroup_migrate_finish(&preloaded_csets);
+ percpu_up_write(&cgroup_threadgroup_rwsem);
return ret;
}
OpenPOWER on IntegriCloud