summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup_freezer.c10
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/freezer.c62
-rw-r--r--kernel/power/process.c15
4 files changed, 60 insertions, 28 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e6a1b8d..2327ad1 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -145,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
static void freezer_destroy(struct cgroup_subsys *ss,
struct cgroup *cgroup)
{
- kfree(cgroup_freezer(cgroup));
+ struct freezer *freezer = cgroup_freezer(cgroup);
+
+ if (freezer->state != CGROUP_THAWED)
+ atomic_dec(&system_freezing_cnt);
+ kfree(freezer);
}
/*
@@ -307,10 +311,14 @@ static int freezer_change_state(struct cgroup *cgroup,
switch (goal_state) {
case CGROUP_THAWED:
+ if (freezer->state != CGROUP_THAWED)
+ atomic_dec(&system_freezing_cnt);
freezer->state = CGROUP_THAWED;
unfreeze_cgroup(cgroup, freezer);
break;
case CGROUP_FROZEN:
+ if (freezer->state == CGROUP_THAWED)
+ atomic_inc(&system_freezing_cnt);
freezer->state = CGROUP_FREEZING;
retval = try_to_freeze_cgroup(cgroup, freezer);
break;
diff --git a/kernel/fork.c b/kernel/fork.c
index ba0d172..d53316e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -997,7 +997,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
- clear_freeze_flag(p);
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 11e32d4..f53cd5a 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -11,9 +11,41 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
+/* total number of freezing conditions in effect */
+atomic_t system_freezing_cnt = ATOMIC_INIT(0);
+EXPORT_SYMBOL(system_freezing_cnt);
+
+/* indicate whether PM freezing is in effect, protected by pm_mutex */
+bool pm_freezing;
+bool pm_nosig_freezing;
+
/* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock);
+/**
+ * freezing_slow_path - slow path for testing whether a task needs to be frozen
+ * @p: task to be tested
+ *
+ * This function is called by freezing() if system_freezing_cnt isn't zero
+ * and tests whether @p needs to enter and stay in frozen state. Can be
+ * called under any context. The freezers are responsible for ensuring the
+ * target tasks see the updated state.
+ */
+bool freezing_slow_path(struct task_struct *p)
+{
+ if (p->flags & PF_NOFREEZE)
+ return false;
+
+ if (pm_nosig_freezing || cgroup_freezing(p))
+ return true;
+
+ if (pm_freezing && !(p->flags & PF_FREEZER_NOSIG))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(freezing_slow_path);
+
/* Refrigerator is place where frozen processes are stored :-). */
bool __refrigerator(bool check_kthr_stop)
{
@@ -23,17 +55,11 @@ bool __refrigerator(bool check_kthr_stop)
long save;
/*
- * Enter FROZEN. If NOFREEZE, schedule immediate thawing by
- * clearing freezing.
+ * No point in checking freezing() again - the caller already did.
+ * Proceed to enter FROZEN.
*/
spin_lock_irq(&freezer_lock);
repeat:
- if (!freezing(current)) {
- spin_unlock_irq(&freezer_lock);
- return was_frozen;
- }
- if (current->flags & PF_NOFREEZE)
- clear_freeze_flag(current);
current->flags |= PF_FROZEN;
spin_unlock_irq(&freezer_lock);
@@ -99,18 +125,12 @@ static void fake_signal_wake_up(struct task_struct *p)
bool freeze_task(struct task_struct *p, bool sig_only)
{
unsigned long flags;
- bool ret = false;
spin_lock_irqsave(&freezer_lock, flags);
-
- if ((p->flags & PF_NOFREEZE) ||
- (sig_only && !should_send_signal(p)))
- goto out_unlock;
-
- if (frozen(p))
- goto out_unlock;
-
- set_freeze_flag(p);
+ if (!freezing(p) || frozen(p)) {
+ spin_unlock_irqrestore(&freezer_lock, flags);
+ return false;
+ }
if (should_send_signal(p)) {
fake_signal_wake_up(p);
@@ -123,10 +143,9 @@ bool freeze_task(struct task_struct *p, bool sig_only)
} else {
wake_up_state(p, TASK_INTERRUPTIBLE);
}
- ret = true;
-out_unlock:
+
spin_unlock_irqrestore(&freezer_lock, flags);
- return ret;
+ return true;
}
void __thaw_task(struct task_struct *p)
@@ -143,7 +162,6 @@ void __thaw_task(struct task_struct *p)
* avoid leaving dangling TIF_SIGPENDING behind.
*/
spin_lock_irqsave(&freezer_lock, flags);
- clear_freeze_flag(p);
if (frozen(p)) {
wake_up_process(p);
} else {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 9f6f5c7..0beb51e 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -101,7 +101,7 @@ static int try_to_freeze_tasks(bool sig_only)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
if (!wakeup && !freezer_should_skip(p) &&
- freezing(p) && !frozen(p))
+ p != current && freezing(p) && !frozen(p))
sched_show_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
@@ -122,7 +122,11 @@ int freeze_processes(void)
{
int error;
+ if (!pm_freezing)
+ atomic_inc(&system_freezing_cnt);
+
printk("Freezing user space processes ... ");
+ pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error) {
printk("done.");
@@ -146,6 +150,7 @@ int freeze_kernel_threads(void)
int error;
printk("Freezing remaining freezable tasks ... ");
+ pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
printk("done.");
@@ -162,6 +167,11 @@ void thaw_processes(void)
{
struct task_struct *g, *p;
+ if (pm_freezing)
+ atomic_dec(&system_freezing_cnt);
+ pm_freezing = false;
+ pm_nosig_freezing = false;
+
oom_killer_enable();
printk("Restarting tasks ... ");
@@ -170,9 +180,6 @@ void thaw_processes(void)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- if (cgroup_freezing(p))
- continue;
-
__thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
OpenPOWER on IntegriCloud