summaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-10 20:50:56 +0200
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 11:22:57 +0200
commitdbcf4787d816a4694ec83b5fde1a947c3ce74d57 (patch)
tree77d1151f7257e649589bc5daa2ec6f9abf6beffa /arch/parisc/kernel
parent2f304c0a0a55072b80957580f1b66256a615d8da (diff)
downloadop-kernel-dev-dbcf4787d816a4694ec83b5fde1a947c3ce74d57.zip
op-kernel-dev-dbcf4787d816a4694ec83b5fde1a947c3ce74d57.tar.gz
parisc: convert to generic helpers for IPI function calls
This converts parisc to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Tested by Kyle, seems to work. Cc: Matthew Wilcox <matthew@wil.cx> Cc: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@mcmartin.ca> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/smp.c134
1 files changed, 21 insertions, 113 deletions
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 85fc775..126105c 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map);
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
-struct smp_call_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t unstarted_count;
- atomic_t unfinished_count;
-};
-static volatile struct smp_call_struct *smp_call_function_data;
-
enum ipi_message_type {
IPI_NOP=0,
IPI_RESCHEDULE=1,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_START,
IPI_CPU_STOP,
IPI_CPU_TEST
@@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
- {
- volatile struct smp_call_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- data = smp_call_function_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_dec ((atomic_t *)&data->unstarted_count);
-
- /* At this point, *data can't
- * be relied upon.
- */
-
- (*func)(info);
-
- /* Notify the sending CPU that the
- * task is done.
- */
- mb();
- if (wait)
- atomic_dec ((atomic_t *)&data->unfinished_count);
- }
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
+ generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_START:
@@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op)
spin_unlock_irqrestore(lock, flags);
}
+static void
+send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ ipi_send(cpu, op);
+}
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
@@ -295,86 +274,15 @@ smp_send_all_nop(void)
send_IPI_allbutself(IPI_NOP);
}
-
-/**
- * Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or have executed.
- */
-
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- static DEFINE_SPINLOCK(lock);
- int retries = 0;
-
- if (num_online_cpus() < 2)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* can also deadlock if IPIs are disabled */
- WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
-
-
- data.func = func;
- data.info = info;
- data.wait = wait;
- atomic_set(&data.unstarted_count, num_online_cpus() - 1);
- atomic_set(&data.unfinished_count, num_online_cpus() - 1);
-
- if (retry) {
- spin_lock (&lock);
- while (smp_call_function_data != 0)
- barrier();
- }
- else {
- spin_lock (&lock);
- if (smp_call_function_data) {
- spin_unlock (&lock);
- return -EBUSY;
- }
- }
-
- smp_call_function_data = &data;
- spin_unlock (&lock);
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- retry:
- /* Wait for response */
- timeout = jiffies + HZ;
- while ( (atomic_read (&data.unstarted_count) > 0) &&
- time_before (jiffies, timeout) )
- barrier ();
-
- if (atomic_read (&data.unstarted_count) > 0) {
- printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
- smp_processor_id(), ++retries);
- goto retry;
- }
- /* We either got one or timed out. Release the lock */
-
- mb();
- smp_call_function_data = NULL;
-
- while (wait && atomic_read (&data.unfinished_count) > 0)
- barrier ();
-
- return 0;
+ send_IPI_mask(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function);
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
+}
/*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
OpenPOWER on IntegriCloud