summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/basler/excite/excite_iodev.c9
-rw-r--r--arch/mips/kernel/irq-rm9000.c4
-rw-r--r--arch/mips/kernel/rtlx.c7
-rw-r--r--arch/mips/kernel/smp.c149
-rw-r--r--arch/mips/kernel/smtc.c1
-rw-r--r--arch/mips/kernel/stacktrace.c1
-rw-r--r--arch/mips/kernel/vpe.c12
-rw-r--r--arch/mips/mm/c-r3k.c6
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/page.c61
-rw-r--r--arch/mips/mm/sc-rm7k.c4
-rw-r--r--arch/mips/oprofile/common.c6
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c2
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c25
-rw-r--r--arch/mips/sibyte/sb1250/prom.c2
-rw-r--r--arch/mips/sibyte/swarm/Makefile1
-rw-r--r--arch/mips/sibyte/swarm/swarm-i2c.c37
20 files changed, 152 insertions, 200 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 24c5dee..d2be3ff 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1763,6 +1763,7 @@ config SMP
bool "Multi-Processing support"
depends on SYS_SUPPORTS_SMP
select IRQ_PER_CPU
+ select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/basler/excite/excite_iodev.c b/arch/mips/basler/excite/excite_iodev.c
index 476d20e..a1e3526 100644
--- a/arch/mips/basler/excite/excite_iodev.c
+++ b/arch/mips/basler/excite/excite_iodev.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
+#include <linux/smp_lock.h>
#include "excite_iodev.h"
@@ -110,8 +111,14 @@ static int __exit iodev_remove(struct device *dev)
static int iodev_open(struct inode *i, struct file *f)
{
- return request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED,
+ int ret;
+
+ lock_kernel();
+ ret = request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED,
iodev_name, &miscdev);
+ unlock_kernel();
+
+ return ret;
}
static int iodev_release(struct inode *i, struct file *f)
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index ed9febe..b47e461 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args)
static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
{
- on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1);
+ on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1);
return 0;
}
@@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args)
static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
{
- on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
+ on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1);
}
static struct irq_chip rm9k_irq_controller = {
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index b88f1c1..b556419 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -28,6 +28,7 @@
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
@@ -392,8 +393,12 @@ out:
static int file_open(struct inode *inode, struct file *filp)
{
int minor = iminor(inode);
+ int err;
- return rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
+ lock_kernel();
+ err = rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
+ unlock_kernel();
+ return err;
}
static int file_release(struct inode *inode, struct file *filp)
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index cdf87a9..4410f17 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
-DEFINE_SPINLOCK(smp_call_lock);
-
-struct call_data_struct *call_data;
-
-/*
- * Run a function on all other CPUs.
- *
- * <mask> cpuset_t of all processors to run the function on.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler:
- *
- * CPU A CPU B
- * Disable interrupts
- * smp_call_function()
- * Take call_lock
- * Send IPIs
- * Wait for all cpus to acknowledge IPI
- * CPU A has not responded, spin waiting
- * for cpu A to respond, holding call_lock
- * smp_call_function()
- * Spin waiting for call_lock
- * Deadlock Deadlock
- */
-int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
- void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
- int cpu = smp_processor_id();
- int cpus;
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- BUG_ON(!cpu_online(cpu));
-
- cpu_clear(cpu, mask);
- cpus = cpus_weight(mask);
- if (!cpus)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock(&smp_call_lock);
- call_data = &data;
- smp_mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
-
- /* Wait for response */
- /* FIXME: lock-up detection, backtrace on lock-up */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- call_data = NULL;
- spin_unlock(&smp_call_lock);
-
- return 0;
}
-int smp_call_function(void (*func) (void *info), void *info, int retry,
- int wait)
+/*
+ * We reuse the same vector for the single IPI
+ */
+void arch_send_call_function_single_ipi(int cpu)
{
- return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
+ mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
}
-EXPORT_SYMBOL(smp_call_function);
+/*
+ * Call into both interrupt handlers, as we share the IPI for them
+ */
void smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function.
- */
- smp_mb();
- atomic_inc(&call_data->started);
-
- /*
- * At this point the info structure may be out of scope unless wait==1.
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_single_interrupt();
+ generic_smp_call_function_interrupt();
irq_exit();
-
- if (wait) {
- smp_mb();
- atomic_inc(&call_data->finished);
- }
-}
-
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int retry, int wait)
-{
- int ret, me;
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- if (!cpu_online(cpu))
- return 0;
-
- me = get_cpu();
- BUG_ON(!cpu_online(me));
-
- if (cpu == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
- wait);
-
- put_cpu();
- return 0;
}
-EXPORT_SYMBOL(smp_call_function_single);
static void stop_this_cpu(void *dummy)
{
@@ -286,7 +167,7 @@ static void stop_this_cpu(void *dummy)
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, NULL, 1, 0);
+ smp_call_function(stop_this_cpu, NULL, 0);
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -365,7 +246,7 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void)
{
- on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
+ on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
@@ -385,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm)
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
{
#ifndef CONFIG_MIPS_MT_SMTC
- smp_call_function(func, info, 1, 1);
+ smp_call_function(func, info, 1);
#endif
}
@@ -485,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
.addr2 = end,
};
- on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
}
static void flush_tlb_page_ipi(void *info)
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 3e86318..a516286 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -877,7 +877,6 @@ static void ipi_resched_interrupt(void)
/* Return from interrupt should be enough to cause scheduler check */
}
-
static void ipi_call_interrupt(void)
{
/* Invoke generic function invocation code in smp.c */
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
index ebd9db8..5eb4681 100644
--- a/arch/mips/kernel/stacktrace.c
+++ b/arch/mips/kernel/stacktrace.c
@@ -73,3 +73,4 @@ void save_stack_trace(struct stack_trace *trace)
prepare_frametrace(regs);
save_context_stack(trace, regs);
}
+EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2794501..972b2d2 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -38,6 +38,7 @@
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
@@ -1050,17 +1051,20 @@ static int vpe_open(struct inode *inode, struct file *filp)
enum vpe_state state;
struct vpe_notifications *not;
struct vpe *v;
- int ret;
+ int ret, err = 0;
+ lock_kernel();
if (minor != iminor(inode)) {
/* assume only 1 device at the moment. */
printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
if ((v = get_vpe(tclimit)) == NULL) {
printk(KERN_WARNING "VPE loader: unable to get vpe\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
state = xchg(&v->state, VPE_STATE_INUSE);
@@ -1100,6 +1104,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
v->shared_ptr = NULL;
v->__start = 0;
+out:
+ unlock_kernel();
return 0;
}
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 76935e3..27a5b46 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -26,7 +26,7 @@
static unsigned long icache_size, dcache_size; /* Size in bytes */
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
-unsigned long __init r3k_cache_size(unsigned long ca_flags)
+unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
{
unsigned long flags, status, dummy, size;
volatile unsigned long *p;
@@ -61,7 +61,7 @@ unsigned long __init r3k_cache_size(unsigned long ca_flags)
return size * sizeof(*p);
}
-unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
+unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
{
unsigned long flags, status, lsize, i;
volatile unsigned long *p;
@@ -90,7 +90,7 @@ unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
return lsize * sizeof(*p);
}
-static void __init r3k_probe_cache(void)
+static void __cpuinit r3k_probe_cache(void)
{
dcache_size = r3k_cache_size(ST0_ISC);
if (dcache_size)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 2709675..71df339 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -43,12 +43,12 @@
* primary cache.
*/
static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
- int retry, int wait)
+ int wait)
{
preempt_disable();
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
- smp_call_function(func, info, retry, wait);
+ smp_call_function(func, info, wait);
#endif
func(info);
preempt_enable();
@@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args)
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
}
static inline int has_valid_asid(const struct mm_struct *mm)
@@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
int exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
}
static inline void local_r4k_flush_cache_mm(void * args)
@@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
}
struct flush_cache_page_args {
@@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
local_r4k_flush_data_cache_page((void *)addr);
else
r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
- 1, 1);
+ 1);
}
struct flush_icache_range_args {
@@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
- r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
instruction_hazard();
}
@@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
}
static void r4k_flush_icache_all(void)
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 1edf0cb..1417c64 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -235,13 +235,12 @@ static void __cpuinit set_prefetch_parameters(void)
}
/*
* Too much unrolling will overflow the available space in
- * clear_space_array / copy_page_array. 8 words sounds generous,
- * but a R4000 with 128 byte L2 line length can exceed even that.
+ * clear_space_array / copy_page_array.
*/
- half_clear_loop_size = min(8 * clear_word_size,
+ half_clear_loop_size = min(16 * clear_word_size,
max(cache_line_size >> 1,
4 * clear_word_size));
- half_copy_loop_size = min(8 * copy_word_size,
+ half_copy_loop_size = min(16 * copy_word_size,
max(cache_line_size >> 1,
4 * copy_word_size));
}
@@ -263,21 +262,23 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
if (pref_bias_clear_store) {
uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
A0);
- } else if (cpu_has_cache_cdex_s) {
- uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
- } else if (cpu_has_cache_cdex_p) {
- if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- }
+ } else if (cache_line_size == (half_clear_loop_size << 1)) {
+ if (cpu_has_cache_cdex_s) {
+ uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+ } else if (cpu_has_cache_cdex_p) {
+ if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ }
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lw(buf, ZERO, ZERO, AT);
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+ uasm_i_lw(buf, ZERO, ZERO, AT);
- uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
- }
+ uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ }
+ }
}
void __cpuinit build_clear_page(void)
@@ -403,20 +404,22 @@ static inline void build_copy_store_pref(u32 **buf, int off)
if (pref_bias_copy_store) {
uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
A0);
- } else if (cpu_has_cache_cdex_s) {
- uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
- } else if (cpu_has_cache_cdex_p) {
- if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- }
+ } else if (cache_line_size == (half_copy_loop_size << 1)) {
+ if (cpu_has_cache_cdex_s) {
+ uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+ } else if (cpu_has_cache_cdex_p) {
+ if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ }
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lw(buf, ZERO, ZERO, AT);
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+ uasm_i_lw(buf, ZERO, ZERO, AT);
- uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ }
}
}
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index fc227f3..e3abfb2 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -86,7 +86,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
/*
* This function is executed in uncached address space.
*/
-static __init void __rm7k_sc_enable(void)
+static __cpuinit void __rm7k_sc_enable(void)
{
int i;
@@ -107,7 +107,7 @@ static __init void __rm7k_sc_enable(void)
}
}
-static __init void rm7k_sc_enable(void)
+static __cpuinit void rm7k_sc_enable(void)
{
if (read_c0_config() & RM7K_CONF_SE)
return;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index b5f6f71..dd2fbd6 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -27,7 +27,7 @@ static int op_mips_setup(void)
model->reg_setup(ctr);
/* Configure the registers on all cpus. */
- on_each_cpu(model->cpu_setup, NULL, 0, 1);
+ on_each_cpu(model->cpu_setup, NULL, 1);
return 0;
}
@@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root)
static int op_mips_start(void)
{
- on_each_cpu(model->cpu_start, NULL, 0, 1);
+ on_each_cpu(model->cpu_start, NULL, 1);
return 0;
}
@@ -66,7 +66,7 @@ static int op_mips_start(void)
static void op_mips_stop(void)
{
/* Disable performance monitoring for all counters. */
- on_each_cpu(model->cpu_stop, NULL, 0, 1);
+ on_each_cpu(model->cpu_stop, NULL, 1);
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index b40df7d..54759f1 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -313,7 +313,7 @@ static int __init mipsxx_init(void)
if (!cpu_has_mipsmt_pertccounters)
counters = counters_total_to_per_cpu(counters);
#endif
- on_each_cpu(reset_counters, (void *)(long)counters, 0, 1);
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
op_model_mipsxx_ops.num_counters = counters;
switch (current_cpu_type()) {
@@ -382,7 +382,7 @@ static void mipsxx_exit(void)
int counters = op_model_mipsxx_ops.num_counters;
counters = counters_per_cpu_to_total(counters);
- on_each_cpu(reset_counters, (void *)(long)counters, 0, 1);
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
perf_irq = save_perf_irq;
}
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 35dc435..cf4c868 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -64,7 +64,7 @@ static void prom_exit(void)
#ifdef CONFIG_SMP
if (smp_processor_id())
/* CPU 1 */
- smp_call_function(prom_cpu0_exit, NULL, 1, 1);
+ smp_call_function(prom_cpu0_exit, NULL, 1);
#endif
prom_cpu0_exit(NULL);
}
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 33fce82..fd9604d 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg)
if (!reboot_smp) {
/* Get CPU 0 to do the cfe_exit */
reboot_smp = 1;
- smp_call_function(cfe_linux_exit, arg, 1, 0);
+ smp_call_function(cfe_linux_exit, arg, 0);
}
} else {
printk("Passing control back to CFE...\n");
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index 63b444e..28b012a 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/errno.h>
@@ -402,18 +403,26 @@ static int sbprof_zbprof_stop(void)
static int sbprof_tb_open(struct inode *inode, struct file *filp)
{
int minor;
+ int err = 0;
+ lock_kernel();
minor = iminor(inode);
- if (minor != 0)
- return -ENODEV;
+ if (minor != 0) {
+ err = -ENODEV;
+ goto out;
+ }
- if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED)
- return -EBUSY;
+ if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) {
+ err = -EBUSY;
+ goto out;
+ }
memset(&sbp, 0, sizeof(struct sbprof_tb));
sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
- if (!sbp.sbprof_tbbuf)
- return -ENOMEM;
+ if (!sbp.sbprof_tbbuf) {
+ err = -ENOMEM;
+ goto out;
+ }
memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
init_waitqueue_head(&sbp.tb_sync);
init_waitqueue_head(&sbp.tb_read);
@@ -421,7 +430,9 @@ static int sbprof_tb_open(struct inode *inode, struct file *filp)
sbp.open = SB_OPEN;
- return 0;
+ out:
+ unlock_kernel();
+ return err;
}
static int sbprof_tb_release(struct inode *inode, struct file *filp)
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index cf8f6b3..65b1af6 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -66,7 +66,7 @@ static void prom_linux_exit(void)
{
#ifdef CONFIG_SMP
if (smp_processor_id()) {
- smp_call_function(prom_cpu0_exit, NULL, 1, 1);
+ smp_call_function(prom_cpu0_exit, NULL, 1);
}
#endif
while(1);
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile
index 1775755..255d692 100644
--- a/arch/mips/sibyte/swarm/Makefile
+++ b/arch/mips/sibyte/swarm/Makefile
@@ -1,3 +1,4 @@
obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o
+obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o
obj-$(CONFIG_KGDB) += dbg_io.o
diff --git a/arch/mips/sibyte/swarm/swarm-i2c.c b/arch/mips/sibyte/swarm/swarm-i2c.c
new file mode 100644
index 0000000..4282ac9
--- /dev/null
+++ b/arch/mips/sibyte/swarm/swarm-i2c.c
@@ -0,0 +1,37 @@
+/*
+ * arch/mips/sibyte/swarm/swarm-i2c.c
+ *
+ * Broadcom BCM91250A (SWARM), etc. I2C platform setup.
+ *
+ * Copyright (c) 2008 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+
+static struct i2c_board_info swarm_i2c_info1[] __initdata = {
+ {
+ I2C_BOARD_INFO("m41t81", 0x68),
+ },
+};
+
+static int __init swarm_i2c_init(void)
+{
+ int err;
+
+ err = i2c_register_board_info(1, swarm_i2c_info1,
+ ARRAY_SIZE(swarm_i2c_info1));
+ if (err < 0)
+ printk(KERN_ERR
+ "swarm-i2c: cannot register board I2C devices\n");
+ return err;
+}
+
+arch_initcall(swarm_i2c_init);
OpenPOWER on IntegriCloud