summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/sched.c8
-rw-r--r--mm/oom_kill.c2
4 files changed, 7 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9763de3..a06fc89 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -148,6 +148,7 @@ extern unsigned long weighted_cpuload(const int cpu);
#define EXIT_DEAD 32
/* in tsk->state again */
#define TASK_NONINTERACTIVE 64
+#define TASK_DEAD 128
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
diff --git a/kernel/exit.c b/kernel/exit.c
index 9dd5f13..2e4c13c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -955,7 +955,7 @@ fastcall NORET_TYPE void do_exit(long code)
preempt_disable();
/* causes final put_task_struct in finish_task_switch(). */
- tsk->state = EXIT_DEAD;
+ tsk->state = TASK_DEAD;
schedule();
BUG();
diff --git a/kernel/sched.c b/kernel/sched.c
index a9405d7..74f169a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1761,10 +1761,10 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
/*
* A task struct has one reference for the use as "current".
- * If a task dies, then it sets EXIT_DEAD in tsk->state and calls
+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
- * The test for EXIT_DEAD must occur while the runqueue locks are
+ * The test for TASK_DEAD must occur while the runqueue locks are
* still held, otherwise prev could be scheduled on another cpu, die
* there before we look at prev->state, and then the reference would
* be dropped twice.
@@ -1775,7 +1775,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
finish_lock_switch(rq, prev);
if (mm)
mmdrop(mm);
- if (unlikely(prev_state == EXIT_DEAD)) {
+ if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
@@ -5153,7 +5153,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
/* Cannot have done final schedule yet: would have vanished. */
- BUG_ON(p->state == EXIT_DEAD);
+ BUG_ON(p->state == TASK_DEAD);
get_task_struct(p);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 202f186..21f0a7e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -227,7 +227,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
p->flags & PF_EXITING;
if (releasing) {
/* TASK_DEAD tasks have already released their mm */
- if (p->state == EXIT_DEAD)
+ if (p->state == TASK_DEAD)
continue;
if (p->flags & PF_EXITING && p == current) {
chosen = p;
OpenPOWER on IntegriCloud