diff options
-rw-r--r-- | arch/hexagon/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/m32r/include/asm/smp.h | 5 | ||||
-rw-r--r-- | arch/mn10300/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 7 | ||||
-rw-r--r-- | arch/tile/kernel/smpboot.c | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/smp.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 9 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 2 | ||||
-rw-r--r-- | include/linux/smp.h | 5 | ||||
-rw-r--r-- | kernel/smp.c | 20 | ||||
-rw-r--r-- | kernel/smpboot.h | 2 |
15 files changed, 1 insertions, 76 deletions
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index f726462..149fbef 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c @@ -180,9 +180,7 @@ void __cpuinit start_secondary(void) notify_cpu_starting(cpu); - ipi_call_lock(); set_cpu_online(cpu, true); - ipi_call_unlock(); local_irq_enable(); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 1113b8a..963d2db 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -382,7 +382,6 @@ smp_callin (void) set_numa_node(cpu_to_node_map[cpuid]); set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); - ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); @@ -390,7 +389,6 @@ smp_callin (void) set_cpu_online(cpuid, true); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); - ipi_call_unlock_irq(); smp_setup_percpu_timer(); diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h index cf7829a..c689b82 100644 --- a/arch/m32r/include/asm/smp.h +++ b/arch/m32r/include/asm/smp.h @@ -79,11 +79,6 @@ static __inline__ int cpu_number_map(int cpu) return cpu; } -static __inline__ unsigned int num_booting_cpus(void) -{ - return cpumask_weight(&cpu_callout_map); -} - extern void smp_send_timer(void); extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int); diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 090d35d..e62c223 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -876,9 +876,7 @@ static void __init smp_online(void) notify_cpu_starting(cpu); - ipi_call_lock(); set_cpu_online(cpu, true); - ipi_call_unlock(); local_irq_enable(); } diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index a47828d..6266730 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -300,9 +300,7 @@ smp_cpu_init(int cpunum) notify_cpu_starting(cpunum); - ipi_call_lock(); set_cpu_online(cpunum, true); - ipi_call_unlock(); /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index e4cb343..e1417c4 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -571,7 +571,6 @@ void __devinit start_secondary(void *unused) if (system_state == SYSTEM_RUNNING) vdso_data->processorCount++; #endif - ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); /* Update sibling maps */ @@ -601,7 +600,6 @@ void __devinit start_secondary(void *unused) of_node_put(np); } of_node_put(l2_cache); - ipi_call_unlock(); local_irq_enable(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 15cca26..8dca9c2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -717,9 +717,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid) init_cpu_vtimer(); pfault_init(); notify_cpu_starting(smp_processor_id()); - ipi_call_lock(); set_cpu_online(smp_processor_id(), true); - ipi_call_unlock(); local_irq_enable(); /* cpu_idle will call schedule for us */ cpu_idle(); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index f591598..781bcb1 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -103,8 +103,6 @@ void __cpuinit smp_callin(void) if (cheetah_pcache_forced_on) cheetah_enable_pcache(); - local_irq_enable(); - callin_flag = 1; __asm__ __volatile__("membar #Sync\n\t" "flush %%g6" : : : "memory"); @@ -124,9 +122,8 @@ void __cpuinit smp_callin(void) while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) rmb(); - ipi_call_lock_irq(); set_cpu_online(cpuid, true); - ipi_call_unlock_irq(); + local_irq_enable(); /* idle thread is expected to have preempt disabled */ preempt_disable(); @@ -1308,9 +1305,7 @@ int __cpu_disable(void) mdelay(1); local_irq_disable(); - ipi_call_lock(); set_cpu_online(cpu, false); - ipi_call_unlock(); cpu_map_rebuild(); diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 84873fb..e686c5a 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c @@ -198,17 +198,7 @@ void __cpuinit online_secondary(void) notify_cpu_starting(smp_processor_id()); - /* - * We need to hold call_lock, so there is no inconsistency - * between the time smp_call_function() determines number of - * IPI recipients, and the time when the determination is made - * for which cpus receive the IPI. Holding this - * lock helps us to not include this cpu in a currently in progress - * smp_call_function(). - */ - ipi_call_lock(); set_cpu_online(smp_processor_id(), 1); - ipi_call_unlock(); __get_cpu_var(cpu_state) = CPU_ONLINE; /* Set up tile-specific state for this cpu. */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index f483945..2ffa95d 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -169,11 +169,6 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -/* We don't mark CPUs online until __cpu_up(), so we need another measure */ -static inline int num_booting_cpus(void) -{ - return cpumask_weight(cpu_callout_mask); -} #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() static inline int wbinvd_on_all_cpus(void) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7bd8a08..27e2eef 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -255,22 +255,13 @@ notrace static void __cpuinit start_secondary(void *unused) check_tsc_sync_target(); /* - * We need to hold call_lock, so there is no inconsistency - * between the time smp_call_function() determines number of - * IPI recipients, and the time when the determination is made - * for which cpus receive the IPI. Holding this - * lock helps us to not include this cpu in a currently in progress - * smp_call_function(). - * * We need to hold vector_lock so there the set of online cpus * does not change while we are assigning vectors to cpus. Holding * this lock ensures we don't half assign or remove an irq from a cpu. */ - ipi_call_lock(); lock_vector_lock(); set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); - ipi_call_unlock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; x86_platform.nmi_init(); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index afb250d..f58dca7 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -80,9 +80,7 @@ static void __cpuinit cpu_bringup(void) notify_cpu_starting(cpu); - ipi_call_lock(); set_cpu_online(cpu, true); - ipi_call_unlock(); this_cpu_write(cpu_state, CPU_ONLINE); diff --git a/include/linux/smp.h b/include/linux/smp.h index 717fb74..dd6f06b 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -90,10 +90,6 @@ void kick_all_cpus_sync(void); void __init call_function_init(void); void generic_smp_call_function_single_interrupt(void); void generic_smp_call_function_interrupt(void); -void ipi_call_lock(void); -void ipi_call_unlock(void); -void ipi_call_lock_irq(void); -void ipi_call_unlock_irq(void); #else static inline void call_function_init(void) { } #endif @@ -181,7 +177,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) } while (0) static inline void smp_send_reschedule(int cpu) { } -#define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_many(mask, func, info, wait) \ (up_smp_call_function(func, info)) diff --git a/kernel/smp.c b/kernel/smp.c index d0ae5b2..29dd40a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -581,26 +581,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait) return 0; } EXPORT_SYMBOL(smp_call_function); - -void ipi_call_lock(void) -{ - raw_spin_lock(&call_function.lock); -} - -void ipi_call_unlock(void) -{ - raw_spin_unlock(&call_function.lock); -} - -void ipi_call_lock_irq(void) -{ - raw_spin_lock_irq(&call_function.lock); -} - -void ipi_call_unlock_irq(void) -{ - raw_spin_unlock_irq(&call_function.lock); -} #endif /* USE_GENERIC_SMP_HELPERS */ /* Setup configured maximum number of CPUs to activate */ diff --git a/kernel/smpboot.h b/kernel/smpboot.h index 80c0acf..6ef9433 100644 --- a/kernel/smpboot.h +++ b/kernel/smpboot.h @@ -3,8 +3,6 @@ struct task_struct; -int smpboot_prepare(unsigned int cpu); - #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD struct task_struct *idle_thread_get(unsigned int cpu); void idle_thread_set_boot_cpu(void); |