diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 22 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 21 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 1 |
5 files changed, 33 insertions, 19 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6e99665..c43f4b2 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); - else + else { + unsigned long lsize = cpu_scache_line_size(); + unsigned long almask = ~(lsize - 1); + + /* + * There is no clearly documented alignment requirement + * for the cache instruction on MIPS processors and + * some processors, among them the RM5200 and RM7000 + * QED processors will throw an address error for cache + * hit ops with insufficient alignment. Solved by + * aligning the address to cache line size. + */ + cache_op(Hit_Writeback_Inv_SD, addr & almask); + cache_op(Hit_Writeback_Inv_SD, + (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); + } return; } if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { + unsigned long lsize = cpu_dcache_line_size(); + unsigned long almask = ~(lsize - 1); + R4600_HIT_CACHEOP_WAR_IMPL; + cache_op(Hit_Writeback_Inv_D, addr & almask); + cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); } diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 98ad0a8..694d51f 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -13,6 +13,7 @@ #include <linux/linkage.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/syscalls.h> #include <linux/mm.h> #include <asm/cacheflush.h> @@ -58,8 +59,8 @@ EXPORT_SYMBOL(_dma_cache_wback_inv); * We could optimize the case where the cache argument is not BCACHE but * that seems very atypical use ... */ -asmlinkage int sys_cacheflush(unsigned long addr, - unsigned long bytes, unsigned int cache) +SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, + unsigned int, cache) { if (bytes == 0) return 0; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index fa636fc..55767ad 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -97,7 +97,6 @@ good_area: goto bad_area; } -survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -167,21 +166,13 @@ no_context: field, regs->regs[31]); die("Oops", regs); -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ out_of_memory: - up_read(&mm->mmap_sem); - if (is_global_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 1417c64..48060c6 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -172,8 +172,9 @@ static void __cpuinit set_prefetch_parameters(void) */ cache_line_size = cpu_dcache_line_size(); switch (current_cpu_type()) { + case CPU_R5500: case CPU_TX49XX: - /* TX49 supports only Pref_Load */ + /* These processors only support the Pref_Load. */ pref_bias_copy_load = 256; break; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4294203..f335cf6 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -318,6 +318,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_BCM4710: case CPU_LOONGSON2: case CPU_CAVIUM_OCTEON: + case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); tlbw(p); |