summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-12-21 20:03:18 +0100
committerTejun Heo <tj@kernel.org>2011-12-21 11:15:01 -0800
commitc84cdf75ccb2845f690579e838f13f7e744e3d23 (patch)
treea94bd69e84e7996852e1973c75fe11a2b0f1a42d /kernel
parent7e381b0eb1e1a9805c37335562e8dc02e7d7848c (diff)
downloadop-kernel-dev-c84cdf75ccb2845f690579e838f13f7e744e3d23.zip
op-kernel-dev-c84cdf75ccb2845f690579e838f13f7e744e3d23.tar.gz
cgroup: Remove unnecessary task_lock before fetching css_set on migration
When we fetch the css_set of the tasks on cgroup migration, we don't need anymore to synchronize against cgroup_exit() that could swap the old one with init_css_set. Now that we are using threadgroup_lock() during the migrations, we don't need to worry about it anymore. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Reviewed-by: Mandeep Singh Baines <msb@chromium.org> Reviewed-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Containers <containers@lists.linux-foundation.org> Cc: Cgroups <cgroups@vger.kernel.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Menage <paul@paulmenage.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index dae50d0..4936d88 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1850,14 +1850,14 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
struct css_set *newcg;
/*
- * get old css_set. we need to take task_lock and refcount it, because
- * an exiting task can change its css_set to init_css_set and drop its
- * old one without taking cgroup_mutex.
+ * get old css_set. We are synchronized through threadgroup_lock()
+ * against PF_EXITING setting such that we can't race against
+ * cgroup_exit() changing the css_set to init_css_set and dropping the
+ * old one.
*/
- task_lock(tsk);
+ WARN_ON_ONCE(tsk->flags & PF_EXITING);
oldcg = tsk->cgroups;
get_css_set(oldcg);
- task_unlock(tsk);
/* locate or allocate a new css_set for this task. */
if (guarantee) {
@@ -1879,9 +1879,7 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
}
put_css_set(oldcg);
- /* @tsk can't exit as its threadgroup is locked */
task_lock(tsk);
- WARN_ON_ONCE(tsk->flags & PF_EXITING);
rcu_assign_pointer(tsk->cgroups, newcg);
task_unlock(tsk);
@@ -2182,11 +2180,13 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
/* nothing to do if this task is already in the cgroup */
if (tc->cgrp == cgrp)
continue;
- /* get old css_set pointer */
- task_lock(tc->task);
+ /*
+ * get old css_set pointer. threadgroup is locked so this is
+ * safe against concurrent cgroup_exit() changing this to
+ * init_css_set.
+ */
oldcg = tc->task->cgroups;
get_css_set(oldcg);
- task_unlock(tc->task);
/* see if the new one for us is already in the list? */
if (css_set_check_fetched(cgrp, tc->task, oldcg, &newcg_list)) {
/* was already there, nothing to do. */
OpenPOWER on IntegriCloud