summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-11-23 09:28:17 -0800
committerTejun Heo <tj@kernel.org>2011-11-23 09:28:17 -0800
commit34b087e48367c252e343c2f8de65676a78af1e4a (patch)
treefbe94bad0a3073c6f4231752fba99c6bc8702277
parentadfa543e7314b36ac55a40019977de6e47946dd7 (diff)
downloadop-kernel-dev-34b087e48367c252e343c2f8de65676a78af1e4a.zip
op-kernel-dev-34b087e48367c252e343c2f8de65676a78af1e4a.tar.gz
freezer: kill unused set_freezable_with_signal()
There's no in-kernel user of set_freezable_with_signal() left. Mixing TIF_SIGPENDING with kernel threads can lead to nasty corner cases as kernel threads never travel signal delivery path on their own. e.g. the current implementation is buggy in the cancelation path of __thaw_task(). It calls recalc_sigpending_and_wake() in an attempt to clear TIF_SIGPENDING but the function never clears it regardless of sigpending state. This means that signallable freezable kthreads may continue executing with !freezing() && stuck TIF_SIGPENDING, which can be troublesome. This patch removes set_freezable_with_signal() along with PF_FREEZER_NOSIG and recalc_sigpending*() calls in freezer. User tasks get TIF_SIGPENDING, kernel tasks get woken up and the spurious sigpending is dealt with in the usual signal delivery path. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Oleg Nesterov <oleg@redhat.com>
-rw-r--r--include/linux/freezer.h20
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/freezer.c27
-rw-r--r--kernel/kthread.c2
4 files changed, 8 insertions, 42 deletions
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index a28842e..a33550f 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -49,7 +49,7 @@ static inline bool try_to_freeze(void)
}
extern bool freeze_task(struct task_struct *p);
-extern bool __set_freezable(bool with_signal);
+extern bool set_freezable(void);
#ifdef CONFIG_CGROUP_FREEZER
extern bool cgroup_freezing(struct task_struct *task);
@@ -105,23 +105,6 @@ static inline int freezer_should_skip(struct task_struct *p)
}
/*
- * Tell the freezer that the current task should be frozen by it
- */
-static inline bool set_freezable(void)
-{
- return __set_freezable(false);
-}
-
-/*
- * Tell the freezer that the current task should be frozen by it and that it
- * should send a fake signal to the task to freeze it.
- */
-static inline bool set_freezable_with_signal(void)
-{
- return __set_freezable(true);
-}
-
-/*
* Freezer-friendly wrappers around wait_event_interruptible(),
* wait_event_killable() and wait_event_interruptible_timeout(), originally
* defined in <linux/wait.h>
@@ -176,7 +159,6 @@ static inline void freezer_do_not_count(void) {}
static inline void freezer_count(void) {}
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-static inline void set_freezable_with_signal(void) {}
#define wait_event_freezable(wq, condition) \
wait_event_interruptible(wq, condition)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d12bd03..2f90470 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1788,7 +1788,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
-#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
/*
* Only the _current_ task can read/write to tsk->flags, but other
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 2589a61..9815b8d 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -39,7 +39,7 @@ bool freezing_slow_path(struct task_struct *p)
if (pm_nosig_freezing || cgroup_freezing(p))
return true;
- if (pm_freezing && !(p->flags & PF_FREEZER_NOSIG))
+ if (pm_freezing && !(p->flags & PF_KTHREAD))
return true;
return false;
@@ -72,10 +72,6 @@ bool __refrigerator(bool check_kthr_stop)
schedule();
}
- spin_lock_irq(&current->sighand->siglock);
- recalc_sigpending(); /* We sent fake signal, clean it up */
- spin_unlock_irq(&current->sighand->siglock);
-
pr_debug("%s left refrigerator\n", current->comm);
/*
@@ -120,7 +116,7 @@ bool freeze_task(struct task_struct *p)
return false;
}
- if (!(p->flags & PF_FREEZER_NOSIG)) {
+ if (!(p->flags & PF_KTHREAD)) {
fake_signal_wake_up(p);
/*
* fake_signal_wake_up() goes through p's scheduler
@@ -145,28 +141,19 @@ void __thaw_task(struct task_struct *p)
* be visible to @p as waking up implies wmb. Waking up inside
* freezer_lock also prevents wakeups from leaking outside
* refrigerator.
- *
- * If !FROZEN, @p hasn't reached refrigerator, recalc sigpending to
- * avoid leaving dangling TIF_SIGPENDING behind.
*/
spin_lock_irqsave(&freezer_lock, flags);
- if (frozen(p)) {
+ if (frozen(p))
wake_up_process(p);
- } else {
- spin_lock(&p->sighand->siglock);
- recalc_sigpending_and_wake(p);
- spin_unlock(&p->sighand->siglock);
- }
spin_unlock_irqrestore(&freezer_lock, flags);
}
/**
- * __set_freezable - make %current freezable
- * @with_signal: do we want %TIF_SIGPENDING for notification too?
+ * set_freezable - make %current freezable
*
* Mark %current freezable and enter refrigerator if necessary.
*/
-bool __set_freezable(bool with_signal)
+bool set_freezable(void)
{
might_sleep();
@@ -177,10 +164,8 @@ bool __set_freezable(bool with_signal)
*/
spin_lock_irq(&freezer_lock);
current->flags &= ~PF_NOFREEZE;
- if (with_signal)
- current->flags &= ~PF_FREEZER_NOSIG;
spin_unlock_irq(&freezer_lock);
return try_to_freeze();
}
-EXPORT_SYMBOL(__set_freezable);
+EXPORT_SYMBOL(set_freezable);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 1c36dea..3d3de633 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -282,7 +282,7 @@ int kthreadd(void *unused)
set_cpus_allowed_ptr(tsk, cpu_all_mask);
set_mems_allowed(node_states[N_HIGH_MEMORY]);
- current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
+ current->flags |= PF_NOFREEZE;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
OpenPOWER on IntegriCloud