diff options
author | Atsushi Nemoto <anemo@mba.ocn.ne.jp> | 2006-08-25 17:55:31 +0900 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-09-27 13:37:37 +0100 |
commit | f6502791d780b22fc147150137704a07a05ba361 (patch) | |
tree | 687dbfcd20e60c480868eb1b64f784e8084b6e4b /arch/mips/mm/c-sb1.c | |
parent | a94d702049569401c65b579d0751ce282f962b41 (diff) | |
download | op-kernel-dev-f6502791d780b22fc147150137704a07a05ba361.zip op-kernel-dev-f6502791d780b22fc147150137704a07a05ba361.tar.gz |
[MIPS] Do not use drop_mmu_context to flusing other task's VIPT I-cache.
c-r4k.c and c-sb1.c use drop_mmu_context() to flush virtually tagged
I-caches, but this does not work for flushing other task's icache. This
is for example triggered by copy_to_user_page() called from ptrace(2).
Use indexed flush for such cases.
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/c-sb1.c')
-rw-r--r-- | arch/mips/mm/c-sb1.c | 56 |
1 files changed, 31 insertions, 25 deletions
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c index 4bd9ad8..16bad7c 100644 --- a/arch/mips/mm/c-sb1.c +++ b/arch/mips/mm/c-sb1.c @@ -155,6 +155,26 @@ static inline void __sb1_flush_icache_all(void) } /* + * Invalidate a range of the icache. The addresses are virtual, and + * the cache is virtually indexed and tagged. However, we don't + * necessarily have the right ASID context, so use index ops instead + * of hit ops. + */ +static inline void __sb1_flush_icache_range(unsigned long start, + unsigned long end) +{ + start &= ~(icache_line_size - 1); + end = (end + icache_line_size - 1) & ~(icache_line_size - 1); + + while (start != end) { + cache_set_op(Index_Invalidate_I, start & icache_index_mask); + start += icache_line_size; + } + mispredict(); + sync(); +} + +/* * Flush the icache for a given physical page. Need to writeback the * dcache first, then invalidate the icache. If the page isn't * executable, nothing is required. @@ -173,8 +193,11 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long /* * Bumping the ASID is probably cheaper than the flush ... */ - if (cpu_context(cpu, vma->vm_mm) != 0) - drop_mmu_context(vma->vm_mm, cpu); + if (vma->vm_mm == current->active_mm) { + if (cpu_context(cpu, vma->vm_mm) != 0) + drop_mmu_context(vma->vm_mm, cpu); + } else + __sb1_flush_icache_range(addr, addr + PAGE_SIZE); } #ifdef CONFIG_SMP @@ -210,26 +233,6 @@ void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsign __attribute__((alias("local_sb1_flush_cache_page"))); #endif -/* - * Invalidate a range of the icache. The addresses are virtual, and - * the cache is virtually indexed and tagged. However, we don't - * necessarily have the right ASID context, so use index ops instead - * of hit ops. - */ -static inline void __sb1_flush_icache_range(unsigned long start, - unsigned long end) -{ - start &= ~(icache_line_size - 1); - end = (end + icache_line_size - 1) & ~(icache_line_size - 1); - - while (start != end) { - cache_set_op(Index_Invalidate_I, start & icache_index_mask); - start += icache_line_size; - } - mispredict(); - sync(); -} - /* * Invalidate all caches on this CPU @@ -326,9 +329,12 @@ static void local_sb1_flush_icache_page(struct vm_area_struct *vma, * If there's a context, bump the ASID (cheaper than a flush, * since we don't know VAs!) */ - if (cpu_context(cpu, vma->vm_mm) != 0) { - drop_mmu_context(vma->vm_mm, cpu); - } + if (vma->vm_mm == current->active_mm) { + if (cpu_context(cpu, vma->vm_mm) != 0) + drop_mmu_context(vma->vm_mm, cpu); + } else + __sb1_flush_icache_range(start, start + PAGE_SIZE); + } #ifdef CONFIG_SMP |