diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apic_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/non-fatal.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpuid.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 3 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/nmi.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 158 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/smpcommon.c | 56 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 4 |
19 files changed, 53 insertions, 216 deletions
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 3e58b67..a437d02 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1340,6 +1340,10 @@ void __init smp_intr_init(void) /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); + + /* IPI for single call function */ + set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); } #endif diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 9874107..c4a7ec3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -364,7 +364,7 @@ static void mcheck_check_cpu(void *info) static void mcheck_timer(struct work_struct *work) { - on_each_cpu(mcheck_check_cpu, NULL, 1, 1); + on_each_cpu(mcheck_check_cpu, NULL, 1); /* * Alert userspace if needed. If we logged an MCE, reduce the @@ -621,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, * Collect entries that were still getting written before the * synchronize. */ - on_each_cpu(collect_tscs, cpu_tsc, 1, 1); + on_each_cpu(collect_tscs, cpu_tsc, 1); for (i = next; i < MCE_LOG_LEN; i++) { if (mcelog.entry[i].finished && mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { @@ -746,7 +746,7 @@ static void mce_restart(void) if (next_interval) cancel_delayed_work(&mcheck_work); /* Timer race is harmless here */ - on_each_cpu(mce_init, NULL, 1, 1); + on_each_cpu(mce_init, NULL, 1); next_interval = check_interval * HZ; if (next_interval) schedule_delayed_work(&mcheck_work, diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index 00ccb6c..cc1fccd 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c @@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); static void mce_work_fn(struct work_struct *work) { - on_each_cpu(mce_checkregs, NULL, 1, 1); + on_each_cpu(mce_checkregs, NULL, 1); schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); } diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 105afe1..6f23969 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -223,7 +223,7 @@ static void set_mtrr(unsigned int reg, unsigned long base, atomic_set(&data.gate,0); /* Start the ball rolling on other CPUs */ - if (smp_call_function(ipi_handler, &data, 1, 0) != 0) + if (smp_call_function(ipi_handler, &data, 0) != 0) panic("mtrr: timed out waiting for other CPUs\n"); local_irq_save(flags); @@ -1682,7 +1682,7 @@ void mtrr_ap_init(void) */ void mtrr_save_state(void) { - smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); + smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); } static int __init mtrr_init_finialize(void) diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 2e9bef6..6d4bdc0 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -189,7 +189,7 @@ void disable_lapic_nmi_watchdog(void) if (atomic_read(&nmi_active) <= 0) return; - on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); + on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); if (wd_ops) wd_ops->unreserve(); @@ -213,7 +213,7 @@ void enable_lapic_nmi_watchdog(void) return; } - on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); + on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); touch_nmi_watchdog(); } diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 71f1c26..2de5fa2 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -96,7 +96,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, for (; count; count -= 16) { cmd.eax = pos; cmd.ecx = pos >> 32; - smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); + smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); if (copy_to_user(tmp, &cmd, 16)) return -EFAULT; tmp += 16; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index ba41bf4..ae63e58 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -816,6 +816,9 @@ END(invalidate_interrupt\num) ENTRY(call_function_interrupt) apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt END(call_function_interrupt) +ENTRY(call_function_single_interrupt) + apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt +END(call_function_single_interrupt) ENTRY(irq_move_cleanup_interrupt) apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt END(irq_move_cleanup_interrupt) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 603261a..558abf4 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1569,7 +1569,7 @@ void /*__init*/ print_local_APIC(void *dummy) void print_all_local_APICs(void) { - on_each_cpu(print_local_APIC, NULL, 1, 1); + on_each_cpu(print_local_APIC, NULL, 1); } void /*__init*/ print_PIC(void) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index b16ef02..6510cde 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1160,7 +1160,7 @@ void __apicdebuginit print_local_APIC(void * dummy) void print_all_local_APICs (void) { - on_each_cpu(print_local_APIC, NULL, 1, 1); + on_each_cpu(print_local_APIC, NULL, 1); } void __apicdebuginit print_PIC(void) diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 31f49e8..0373e88 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -199,6 +199,10 @@ void __init native_init_IRQ(void) /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); + /* IPI for generic single function call */ + alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); + /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); #endif diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 21f2bae..a844957 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) load_LDT(pc); mask = cpumask_of_cpu(smp_processor_id()); if (!cpus_equal(current->mm->cpu_vm_mask, mask)) - smp_call_function(flush_ldt, current->mm, 1, 1); + smp_call_function(flush_ldt, current->mm, 1); preempt_enable(); #else load_LDT(pc); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 716b892..ec024b3 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -130,7 +130,7 @@ int __init check_nmi_watchdog(void) #ifdef CONFIG_SMP if (nmi_watchdog == NMI_LOCAL_APIC) - smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); + smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); #endif for_each_possible_cpu(cpu) @@ -272,7 +272,7 @@ static void __acpi_nmi_enable(void *__unused) void acpi_nmi_enable(void) { if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); + on_each_cpu(__acpi_nmi_enable, NULL, 1); } static void __acpi_nmi_disable(void *__unused) @@ -286,7 +286,7 @@ static void __acpi_nmi_disable(void *__unused) void acpi_nmi_disable(void) { if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); + on_each_cpu(__acpi_nmi_disable, NULL, 1); } void setup_apic_nmi_watchdog(void *unused) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4061d63..7dceea9 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -132,7 +132,7 @@ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ - smp_call_function(do_nothing, NULL, 0, 1); + smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 0cb7aad..361b7a4 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu) send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); } -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -}; - -void lock_ipi_call_lock(void) +void native_send_call_func_single_ipi(int cpu) { - spin_lock_irq(&call_lock); -} - -void unlock_ipi_call_lock(void) -{ - spin_unlock_irq(&call_lock); -} - -static struct call_data_struct *call_data; - -static void __smp_call_function(void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - struct call_data_struct data; - int cpus = num_online_cpus() - 1; - - if (!cpus) - return; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); - - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_allbutself(CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); + send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); } - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. Must not include the current cpu. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -static int -native_smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +void native_send_call_func_ipi(cpumask_t mask) { - struct call_data_struct data; cpumask_t allbutself; - int cpus; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - /* Holding any lock stops cpus from going down. */ - spin_lock(&call_lock); allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); - cpus_and(mask, mask, allbutself); - cpus = cpus_weight(mask); - - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - wmb(); - - /* Send a message to other CPUs */ if (cpus_equal(mask, allbutself) && cpus_equal(cpu_online_map, cpu_callout_map)) send_IPI_allbutself(CALL_FUNCTION_VECTOR); else send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - spin_unlock(&call_lock); - - return 0; } static void stop_this_cpu(void *dummy) @@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy) static void native_smp_send_stop(void) { - int nolock; unsigned long flags; if (reboot_force) return; - /* Don't deadlock on the call lock in panic */ - nolock = !spin_trylock(&call_lock); + smp_call_function(stop_this_cpu, NULL, 0); local_irq_save(flags); - __smp_call_function(stop_this_cpu, NULL, 0, 0); - if (!nolock) - spin_unlock(&call_lock); disable_local_APIC(); local_irq_restore(flags); } @@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs) void smp_call_function_interrupt(struct pt_regs *regs) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - ack_APIC_irq(); - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ irq_enter(); - (*func)(info); + generic_smp_call_function_interrupt(); #ifdef CONFIG_X86_32 __get_cpu_var(irq_stat).irq_call_count++; #else add_pda(irq_call_count, 1); #endif irq_exit(); +} - if (wait) { - mb(); - atomic_inc(&call_data->finished); - } +void smp_call_function_single_interrupt(struct pt_regs *regs) +{ + ack_APIC_irq(); + irq_enter(); + generic_smp_call_function_single_interrupt(); +#ifdef CONFIG_X86_32 + __get_cpu_var(irq_stat).irq_call_count++; +#else + add_pda(irq_call_count, 1); +#endif + irq_exit(); } struct smp_ops smp_ops = { @@ -338,7 +219,8 @@ struct smp_ops smp_ops = { .smp_send_stop = native_smp_send_stop, .smp_send_reschedule = native_smp_send_reschedule, - .smp_call_function_mask = native_smp_call_function_mask, + + .send_call_func_ipi = native_send_call_func_ipi, + .send_call_func_single_ipi = native_send_call_func_single_ipi, }; EXPORT_SYMBOL_GPL(smp_ops); - diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f35c2d8..687376a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -327,12 +327,12 @@ static void __cpuinit start_secondary(void *unused) * lock helps us to not include this cpu in a currently in progress * smp_call_function(). */ - lock_ipi_call_lock(); + ipi_call_lock_irq(); #ifdef CONFIG_X86_IO_APIC setup_vector_irq(smp_processor_id()); #endif cpu_set(smp_processor_id(), cpu_online_map); - unlock_ipi_call_lock(); + ipi_call_unlock_irq(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; setup_secondary_clock(); diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 3449064..99941b3 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c @@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu) per_cpu(cpu_number, cpu) = cpu; } #endif - -/** - * smp_call_function(): Run a function on all other CPUs. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Unused. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - return smp_call_function_mask(cpu_online_map, func, info, wait); -} -EXPORT_SYMBOL(smp_call_function); - -/** - * smp_call_function_single - Run a function on a specific CPU - * @cpu: The target CPU. Cannot be the calling CPU. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Unused. - * @wait: If true, wait until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - */ -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - /* prevent preemption and reschedule on another processor */ - int ret; - int me = get_cpu(); - if (cpu == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); - - put_cpu(); - return ret; -} -EXPORT_SYMBOL(smp_call_function_single); diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 9bb2363..fec1ece 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - on_each_cpu(do_flush_tlb_all, NULL, 1, 1); + on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 5039d0f..dcbf7a1 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -275,5 +275,5 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - on_each_cpu(do_flush_tlb_all, NULL, 1, 1); + on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index e50740d..0b8b6690 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -279,7 +279,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) { long cpu = (long)arg; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) - smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); + smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); return NOTIFY_DONE; } @@ -302,7 +302,7 @@ static int __init vsyscall_init(void) #ifdef CONFIG_SYSCTL register_sysctl_table(kernel_root_table2); #endif - on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); + on_each_cpu(cpu_vsyscall_init, NULL, 1); hotcpu_notifier(cpu_vsyscall_notifier, 0); return 0; } |