diff options
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/exit.c | 6 | ||||
-rw-r--r-- | kernel/sched.c | 16 | ||||
-rw-r--r-- | mm/oom_kill.c | 4 |
4 files changed, 12 insertions, 15 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index fbc69cc..9763de3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1061,7 +1061,6 @@ static inline void put_task_struct(struct task_struct *t) /* Not implemented yet, only for 486*/ #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ -#define PF_DEAD 0x00000008 /* Dead */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ diff --git a/kernel/exit.c b/kernel/exit.c index 3d759c9..9dd5f13 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -953,10 +953,8 @@ fastcall NORET_TYPE void do_exit(long code) if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); - /* PF_DEAD causes final put_task_struct after we schedule. */ preempt_disable(); - BUG_ON(tsk->flags & PF_DEAD); - tsk->flags |= PF_DEAD; + /* causes final put_task_struct in finish_task_switch(). */ tsk->state = EXIT_DEAD; schedule(); @@ -972,7 +970,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); - + do_exit(code); } diff --git a/kernel/sched.c b/kernel/sched.c index e1646b0..a9405d7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1755,27 +1755,27 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; - unsigned long prev_task_flags; + long prev_state; rq->prev_mm = NULL; /* * A task struct has one reference for the use as "current". - * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and - * calls schedule one last time. The schedule call will never return, - * and the scheduled task must drop that reference. - * The test for EXIT_ZOMBIE must occur while the runqueue locks are + * If a task dies, then it sets EXIT_DEAD in tsk->state and calls + * schedule one last time. The schedule call will never return, and + * the scheduled task must drop that reference. + * The test for EXIT_DEAD must occur while the runqueue locks are * still held, otherwise prev could be scheduled on another cpu, die * there before we look at prev->state, and then the reference would * be dropped twice. * Manfred Spraul <manfred@colorfullife.com> */ - prev_task_flags = prev->flags; + prev_state = prev->state; finish_arch_switch(prev); finish_lock_switch(rq, prev); if (mm) mmdrop(mm); - if (unlikely(prev_task_flags & PF_DEAD)) { + if (unlikely(prev_state == EXIT_DEAD)) { /* * Remove function-return probe instances associated with this * task and put them back on the free list. @@ -5153,7 +5153,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); /* Cannot have done final schedule yet: would have vanished. */ - BUG_ON(p->flags & PF_DEAD); + BUG_ON(p->state == EXIT_DEAD); get_task_struct(p); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index f3dd79c..202f186 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -226,8 +226,8 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) releasing = test_tsk_thread_flag(p, TIF_MEMDIE) || p->flags & PF_EXITING; if (releasing) { - /* PF_DEAD tasks have already released their mm */ - if (p->flags & PF_DEAD) + /* TASK_DEAD tasks have already released their mm */ + if (p->state == EXIT_DEAD) continue; if (p->flags & PF_EXITING && p == current) { chosen = p; |