summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-06-25 05:49:10 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 10:01:22 -0700
commitfc75cdfa5b43ac4d3232b490800cd35063adafd3 (patch)
tree0529ffd9633667457d5340a09b3cb352d09436d2
parent04a3446c90137a473837288b04b517b19dc67a0d (diff)
downloadop-kernel-dev-fc75cdfa5b43ac4d3232b490800cd35063adafd3.zip
op-kernel-dev-fc75cdfa5b43ac4d3232b490800cd35063adafd3.tar.gz
[PATCH] cpu hotplug: fix CPU_UP_CANCEL handling
If a cpu hotplug callback fails on CPU_UP_PREPARE, all callbacks will be called with CPU_UP_CANCELED. A few of these callbacks assume that on CPU_UP_PREPARE a pointer to task has been stored in a percpu array. This assumption is not true if CPU_UP_PREPARE fails and the following calls to kthread_bind() in CPU_UP_CANCELED will cause an addressing exception because of passing a NULL pointer. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/workqueue.c2
4 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f8d540b..f06d059 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4756,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
+ if (!cpu_rq(cpu)->migration_thread)
+ break;
/* Unbind it from offline cpu so it can run. Fall thru. */
kthread_bind(cpu_rq(cpu)->migration_thread,
any_online_cpu(cpu_online_map));
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 336f92d..9e2f1c6 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -470,6 +470,8 @@ static int cpu_callback(struct notifier_block *nfb,
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
+ if (!per_cpu(ksoftirqd, hotcpu))
+ break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(ksoftirqd, hotcpu),
any_online_cpu(cpu_online_map));
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 2c1be11..b5c3b94 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -127,6 +127,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
+ if (!per_cpu(watchdog_task, hotcpu))
+ break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(watchdog_task, hotcpu),
any_online_cpu(cpu_online_map));
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f869aff..565cf7a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -590,6 +590,8 @@ static int workqueue_cpu_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED:
list_for_each_entry(wq, &workqueues, list) {
+ if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
+ continue;
/* Unbind so it can run. */
kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
any_online_cpu(cpu_online_map));
OpenPOWER on IntegriCloud