summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup_freezer.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-10-16 15:03:14 -0700
committerTejun Heo <tj@kernel.org>2012-10-20 16:33:08 -0700
commitb4d18311d37b0b1b370a1ef3e4de92b97930f0a8 (patch)
treeca43c69c402de2cf67dc45e45ab90f86b489ff9c /kernel/cgroup_freezer.c
parent8755ade683241e8c6b8fe8d22d0ae35041a3dc51 (diff)
downloadop-kernel-dev-b4d18311d37b0b1b370a1ef3e4de92b97930f0a8.zip
op-kernel-dev-b4d18311d37b0b1b370a1ef3e4de92b97930f0a8.tar.gz
cgroup_freezer: prepare update_if_frozen() for locking change
Locking will change such that migration can happen while freezer_read/write() is in progress. This means that update_if_frozen() can no longer assume that all tasks in the cgroup coform to the current freezer state - newly migrated tasks which haven't finished freezer_attach() yet might be in any state. This patch updates update_if_frozen() such that it no longer verifies task states against freezer state. It now simply decides whether FREEZING stage is complete. This removal of verification makes it meaningless to call from freezer_change_state(). Drop it and move the fast exit test from freezer_read() - the only left caller - to update_if_frozen(). Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Rafael J. Wysocki <rjw@sisk.pl> Cc: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup_freezer.c')
-rw-r--r--kernel/cgroup_freezer.c43
1 files changed, 17 insertions, 26 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 0b0e105..3d45503 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -213,41 +213,39 @@ out:
}
/*
- * caller must hold freezer->lock
+ * We change from FREEZING to FROZEN lazily if the cgroup was only
+ * partially frozen when we exitted write. Caller must hold freezer->lock.
+ *
+ * Task states and freezer state might disagree while tasks are being
+ * migrated into @cgroup, so we can't verify task states against @freezer
+ * state here. See freezer_attach() for details.
*/
-static void update_if_frozen(struct cgroup *cgroup,
- struct freezer *freezer)
+static void update_if_frozen(struct cgroup *cgroup, struct freezer *freezer)
{
struct cgroup_iter it;
struct task_struct *task;
- unsigned int nfrozen = 0, ntotal = 0;
- enum freezer_state old_state = freezer->state;
+
+ if (freezer->state != CGROUP_FREEZING)
+ return;
cgroup_iter_start(cgroup, &it);
+
while ((task = cgroup_iter_next(cgroup, &it))) {
if (freezing(task)) {
- ntotal++;
/*
* freezer_should_skip() indicates that the task
* should be skipped when determining freezing
* completion. Consider it frozen in addition to
* the usual frozen condition.
*/
- if (frozen(task) || task_is_stopped_or_traced(task) ||
- freezer_should_skip(task))
- nfrozen++;
+ if (!frozen(task) && !task_is_stopped_or_traced(task) &&
+ !freezer_should_skip(task))
+ goto notyet;
}
}
- if (old_state == CGROUP_THAWED) {
- BUG_ON(nfrozen > 0);
- } else if (old_state == CGROUP_FREEZING) {
- if (nfrozen == ntotal)
- freezer->state = CGROUP_FROZEN;
- } else { /* old_state == CGROUP_FROZEN */
- BUG_ON(nfrozen != ntotal);
- }
-
+ freezer->state = CGROUP_FROZEN;
+notyet:
cgroup_iter_end(cgroup, &it);
}
@@ -262,13 +260,8 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
freezer = cgroup_freezer(cgroup);
spin_lock_irq(&freezer->lock);
+ update_if_frozen(cgroup, freezer);
state = freezer->state;
- if (state == CGROUP_FREEZING) {
- /* We change from FREEZING to FROZEN lazily if the cgroup was
- * only partially frozen when we exitted write. */
- update_if_frozen(cgroup, freezer);
- state = freezer->state;
- }
spin_unlock_irq(&freezer->lock);
cgroup_unlock();
@@ -306,8 +299,6 @@ static void freezer_change_state(struct cgroup *cgroup,
spin_lock_irq(&freezer->lock);
- update_if_frozen(cgroup, freezer);
-
switch (goal_state) {
case CGROUP_THAWED:
if (freezer->state != CGROUP_THAWED)
OpenPOWER on IntegriCloud