summaryrefslogtreecommitdiffstats
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 2c0aacc..dcfb5d7 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -4,7 +4,6 @@
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/syscalls.h>
-#include <linux/kthread.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@@ -26,11 +25,13 @@ static unsigned int stopmachine_num_threads;
static atomic_t stopmachine_thread_ack;
static DECLARE_MUTEX(stopmachine_mutex);
-static int stopmachine(void *unused)
+static int stopmachine(void *cpu)
{
int irqs_disabled = 0;
int prepared = 0;
+ set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
+
/* Ack: we are alive */
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
atomic_inc(&stopmachine_thread_ack);
@@ -84,8 +85,7 @@ static void stopmachine_set_state(enum stopmachine_state state)
static int stop_machine(void)
{
- int ret = 0;
- unsigned int i;
+ int i, ret = 0;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
/* One high-prio thread per cpu. We'll do this one. */
@@ -96,16 +96,11 @@ static int stop_machine(void)
stopmachine_state = STOPMACHINE_WAIT;
for_each_online_cpu(i) {
- struct task_struct *tsk;
if (i == raw_smp_processor_id())
continue;
- tsk = kthread_create(stopmachine, NULL, "stopmachine");
- if (IS_ERR(tsk)) {
- ret = PTR_ERR(tsk);
+ ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
+ if (ret < 0)
break;
- }
- kthread_bind(tsk, i);
- wake_up_process(tsk);
stopmachine_num_threads++;
}
OpenPOWER on IntegriCloud