summaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-11-13 19:32:29 +0100
committerIngo Molnar <mingo@elte.hu>2010-11-18 13:27:46 +0100
commit48c5ccae88dcd989d9de507e8510313c6cbd352b (patch)
tree06fe8ce2ac28e9f5844de8bc32ecbef97e40d68b /kernel/cpu.c
parent92fd4d4d67b945c0766416284d4ab236b31542c4 (diff)
downloadop-kernel-dev-48c5ccae88dcd989d9de507e8510313c6cbd352b.zip
op-kernel-dev-48c5ccae88dcd989d9de507e8510313c6cbd352b.tar.gz
sched: Simplify cpu-hot-unplug task migration
While discussing the need for sched_idle_next(), Oleg remarked that since try_to_wake_up() ensures sleeping tasks will end up running on a sane cpu, we can do away with migrate_live_tasks(). If we then extend the existing hack of migrating current from CPU_DYING to migrating the full rq worth of tasks from CPU_DYING, the need for the sched_idle_next() abomination disappears as well, since idle will be the only possible thread left after the migration thread stops. This greatly simplifies the hot-unplug task migration path, as can be seen from the resulting code reduction (and about half the new lines are comments). Suggested-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1289851597.2109.547.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f6e726f..8615aa6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -189,7 +189,6 @@ static inline void check_for_tasks(int cpu)
}
struct take_cpu_down_param {
- struct task_struct *caller;
unsigned long mod;
void *hcpu;
};
@@ -208,11 +207,6 @@ static int __ref take_cpu_down(void *_param)
cpu_notify(CPU_DYING | param->mod, param->hcpu);
- if (task_cpu(param->caller) == cpu)
- move_task_off_dead_cpu(cpu, param->caller);
- /* Force idle task to run as soon as we yield: it should
- immediately notice cpu is offline and die quickly. */
- sched_idle_next();
return 0;
}
@@ -223,7 +217,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
- .caller = current,
.mod = mod,
.hcpu = hcpu,
};
@@ -253,9 +246,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
}
BUG_ON(cpu_online(cpu));
- /* Wait for it to sleep (leaving idle task). */
- while (!idle_cpu(cpu))
- yield();
+ /*
+ * The migration_call() CPU_DYING callback will have removed all
+ * runnable tasks from the cpu, there's only the idle task left now
+ * that the migration thread is done doing the stop_machine thing.
+ */
+ BUG_ON(!idle_cpu(cpu));
/* This actually kills the CPU. */
__cpu_die(cpu);
OpenPOWER on IntegriCloud