summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index dae50d0..4936d88 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1850,14 +1850,14 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
struct css_set *newcg;
/*
- * get old css_set. we need to take task_lock and refcount it, because
- * an exiting task can change its css_set to init_css_set and drop its
- * old one without taking cgroup_mutex.
+ * get old css_set. We are synchronized through threadgroup_lock()
+ * against PF_EXITING setting such that we can't race against
+ * cgroup_exit() changing the css_set to init_css_set and dropping the
+ * old one.
*/
- task_lock(tsk);
+ WARN_ON_ONCE(tsk->flags & PF_EXITING);
oldcg = tsk->cgroups;
get_css_set(oldcg);
- task_unlock(tsk);
/* locate or allocate a new css_set for this task. */
if (guarantee) {
@@ -1879,9 +1879,7 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
}
put_css_set(oldcg);
- /* @tsk can't exit as its threadgroup is locked */
task_lock(tsk);
- WARN_ON_ONCE(tsk->flags & PF_EXITING);
rcu_assign_pointer(tsk->cgroups, newcg);
task_unlock(tsk);
@@ -2182,11 +2180,13 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
/* nothing to do if this task is already in the cgroup */
if (tc->cgrp == cgrp)
continue;
- /* get old css_set pointer */
- task_lock(tc->task);
+ /*
+ * get old css_set pointer. threadgroup is locked so this is
+ * safe against concurrent cgroup_exit() changing this to
+ * init_css_set.
+ */
oldcg = tc->task->cgroups;
get_css_set(oldcg);
- task_unlock(tc->task);
/* see if the new one for us is already in the list? */
if (css_set_check_fetched(cgrp, tc->task, oldcg, &newcg_list)) {
/* was already there, nothing to do. */
OpenPOWER on IntegriCloud