diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-05-07 17:59:48 +0000 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2012-05-08 12:35:06 +0200 |
commit | f37f435f33717dcf15fd4bb422da739da7fc2052 (patch) | |
tree | aca7b5707758ef33158116acbbdd41b8ab6c2fb4 | |
parent | 9cd75e13de2dcf32ecc21c7f277cff3c0ced059e (diff) | |
download | op-kernel-dev-f37f435f33717dcf15fd4bb422da739da7fc2052.zip op-kernel-dev-f37f435f33717dcf15fd4bb422da739da7fc2052.tar.gz |
smp: Implement kick_all_cpus_sync()
Will replace the misnomed cpu_idle_wait() function which is copied a
gazillion times all over arch/*
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120507175652.049316594@linutronix.de
-rw-r--r-- | include/linux/smp.h | 4 | ||||
-rw-r--r-- | kernel/smp.c | 23 |
2 files changed, 27 insertions, 0 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h index 24360de..717fb74 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -81,6 +81,8 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data, int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait); +void kick_all_cpus_sync(void); + /* * Generic and arch helpers */ @@ -192,6 +194,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, return smp_call_function_single(0, func, info, wait); } +static inline void kick_all_cpus_sync(void) { } + #endif /* !SMP */ /* diff --git a/kernel/smp.c b/kernel/smp.c index a61294c..d0ae5b2 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -795,3 +795,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), } } EXPORT_SYMBOL(on_each_cpu_cond); + +static void do_nothing(void *unused) +{ +} + +/** + * kick_all_cpus_sync - Force all cpus out of idle + * + * Used to synchronize the update of pm_idle function pointer. It's + * called after the pointer is updated and returns after the dummy + * callback function has been executed on all cpus. The execution of + * the function can only happen on the remote cpus after they have + * left the idle function which had been called via pm_idle function + * pointer. So it's guaranteed that nothing uses the previous pointer + * anymore. + */ +void kick_all_cpus_sync(void) +{ + /* Make sure the change is visible before we kick the cpus */ + smp_mb(); + smp_call_function(do_nothing, NULL, 1); +} +EXPORT_SYMBOL_GPL(kick_all_cpus_sync); |