diff options
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 201 |
1 files changed, 29 insertions, 172 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f03914b..3ed5c7a 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -20,6 +20,9 @@ * cpu_number_map in other architectures. */ +#define KMSG_COMPONENT "cpu" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/mm.h> @@ -71,159 +74,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); static void smp_ext_bitcall(int, ec_bit_sig); -/* - * Structure and data for __smp_call_function_map(). This is designed to - * minimise static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - cpumask_t started; - cpumask_t finished; - int wait; -}; - -static struct call_data_struct *call_data; - -/* - * 'Call function' interrupt callback - */ -static void do_call_function(void) -{ - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - cpu_set(smp_processor_id(), call_data->started); - (*func)(info); - if (wait) - cpu_set(smp_processor_id(), call_data->finished);; -} - -static void __smp_call_function_map(void (*func) (void *info), void *info, - int wait, cpumask_t map) -{ - struct call_data_struct data; - int cpu, local = 0; - - /* - * Can deadlock when interrupts are disabled or if in wrong context. - */ - WARN_ON(irqs_disabled() || in_irq()); - - /* - * Check for local function call. We have to have the same call order - * as in on_each_cpu() because of machine_restart_smp(). - */ - if (cpu_isset(smp_processor_id(), map)) { - local = 1; - cpu_clear(smp_processor_id(), map); - } - - cpus_and(map, map, cpu_online_map); - if (cpus_empty(map)) - goto out; - - data.func = func; - data.info = info; - data.started = CPU_MASK_NONE; - data.wait = wait; - if (wait) - data.finished = CPU_MASK_NONE; - - call_data = &data; - - for_each_cpu_mask(cpu, map) - smp_ext_bitcall(cpu, ec_call_function); - - /* Wait for response */ - while (!cpus_equal(map, data.started)) - cpu_relax(); - if (wait) - while (!cpus_equal(map, data.finished)) - cpu_relax(); -out: - if (local) { - local_irq_disable(); - func(info); - local_irq_enable(); - } -} - -/* - * smp_call_function: - * @func: the function to run; this must be fast and non-blocking - * @info: an arbitrary pointer to pass to the function - * @wait: if true, wait (atomically) until function has completed on other CPUs - * - * Run a function on all other CPUs. - * - * You must not call this function with disabled interrupts, from a - * hardware interrupt handler or from a bottom half. - */ -int smp_call_function(void (*func) (void *info), void *info, int wait) -{ - cpumask_t map; - - spin_lock(&call_lock); - map = cpu_online_map; - cpu_clear(smp_processor_id(), map); - __smp_call_function_map(func, info, wait, map); - spin_unlock(&call_lock); - return 0; -} -EXPORT_SYMBOL(smp_call_function); - -/* - * smp_call_function_single: - * @cpu: the CPU where func should run - * @func: the function to run; this must be fast and non-blocking - * @info: an arbitrary pointer to pass to the function - * @wait: if true, wait (atomically) until function has completed on other CPUs - * - * Run a function on one processor. - * - * You must not call this function with disabled interrupts, from a - * hardware interrupt handler or from a bottom half. - */ -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int wait) -{ - spin_lock(&call_lock); - __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); - spin_unlock(&call_lock); - return 0; -} -EXPORT_SYMBOL(smp_call_function_single); - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. Must not include the current cpu. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, - int wait) -{ - spin_lock(&call_lock); - cpu_clear(smp_processor_id(), mask); - __smp_call_function_map(func, info, wait, mask); - spin_unlock(&call_lock); - return 0; -} -EXPORT_SYMBOL(smp_call_function_mask); - void smp_send_stop(void) { int cpu, rc; @@ -265,7 +115,10 @@ static void do_ext_call_interrupt(__u16 code) bits = xchg(&S390_lowcore.ext_call_fast, 0); if (test_bit(ec_call_function, &bits)) - do_call_function(); + generic_smp_call_function_interrupt(); + + if (test_bit(ec_call_function_single, &bits)) + generic_smp_call_function_single_interrupt(); } /* @@ -282,6 +135,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) udelay(10); } +void arch_send_call_function_ipi(cpumask_t mask) +{ + int cpu; + + for_each_cpu_mask(cpu, mask) + smp_ext_bitcall(cpu, ec_call_function); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + smp_ext_bitcall(cpu, ec_call_function_single); +} + #ifndef CONFIG_64BIT /* * this function sends a 'purge tlb' signal to another CPU. @@ -382,8 +248,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) if (ipl_info.type != IPL_TYPE_FCP_DUMP) return; if (cpu >= NR_CPUS) { - printk(KERN_WARNING "Registers for cpu %i not saved since dump " - "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); + pr_warning("CPU %i exceeds the maximum %i and is excluded from " + "the dump\n", cpu, NR_CPUS - 1); return; } zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); @@ -556,7 +422,7 @@ static void __init smp_detect_cpus(void) } out: kfree(info); - printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); + pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); get_online_cpus(); __smp_rescan_cpus(); put_online_cpus(); @@ -572,19 +438,17 @@ int __cpuinit start_secondary(void *cpuvoid) preempt_disable(); /* Enable TOD clock interrupts on the secondary cpu. */ init_cpu_timer(); -#ifdef CONFIG_VIRT_TIMER /* Enable cpu timer interrupts on the secondary cpu. */ init_cpu_vtimer(); -#endif /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); /* call cpu notifiers */ notify_cpu_starting(smp_processor_id()); /* Mark this cpu as online */ - spin_lock(&call_lock); + ipi_call_lock(); cpu_set(smp_processor_id(), cpu_online_map); - spin_unlock(&call_lock); + ipi_call_unlock(); /* Switch on interrupts */ local_irq_enable(); /* Print info about this processor */ @@ -633,18 +497,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu) save_area = get_zeroed_page(GFP_KERNEL); if (!save_area) - goto out_save_area; + goto out; lowcore->extended_save_area_addr = (u32) save_area; } #endif lowcore_ptr[cpu] = lowcore; return 0; -#ifndef CONFIG_64BIT -out_save_area: - free_page(panic_stack); -#endif out: + free_page(panic_stack); free_pages(async_stack, ASYNC_ORDER); free_pages((unsigned long) lowcore, lc_order); return -ENOMEM; @@ -684,12 +545,8 @@ int __cpuinit __cpu_up(unsigned int cpu) ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); - if (ccode) { - printk("sigp_set_prefix failed for cpu %d " - "with condition code %d\n", - (int) cpu, (int) ccode); + if (ccode) return -EIO; - } idle = current_set[cpu]; cpu_lowcore = lowcore_ptr[cpu]; @@ -772,7 +629,7 @@ void __cpu_die(unsigned int cpu) while (!smp_cpu_not_running(cpu)) cpu_relax(); smp_free_lowcore(cpu); - printk(KERN_INFO "Processor %d spun down\n", cpu); + pr_info("Processor %d stopped\n", cpu); } void cpu_die(void) |