diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-10-25 10:40:02 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-01 18:20:07 +0000 |
commit | 2f0b192633f1fbf253b21c90938733491549edae (patch) | |
tree | a429eaf88e591ea9674075ac6389e9a751da9806 /arch/arm/include/asm/cacheflush.h | |
parent | 29e553631b2a0d4eebd23db630572e1027a9967a (diff) | |
download | op-kernel-dev-2f0b192633f1fbf253b21c90938733491549edae.zip op-kernel-dev-2f0b192633f1fbf253b21c90938733491549edae.tar.gz |
ARM: Avoid duplicated implementation for VIVT cache flushing
We had two copies of the wrapper code for VIVT cache flushing - one in
asm/cacheflush.h and one in arch/arm/mm/flush.c. Reduce this down to
one common copy.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 20 |
1 files changed, 15 insertions, 5 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3d0cdd2..61ae25c 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * Convert calls to our calling convention. */ #define flush_cache_all() __cpuc_flush_kern_all() -#ifndef CONFIG_CPU_CACHE_VIPT -static inline void flush_cache_mm(struct mm_struct *mm) + +static inline void vivt_flush_cache_mm(struct mm_struct *mm) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_all(); } static inline void -flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), @@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long } static inline void -flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) +vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = user_addr & PAGE_MASK; @@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l } static inline void -flush_ptrace_access(struct vm_area_struct *vma, struct page *page, +vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write) { @@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page, __cpuc_coherent_kern_range(addr, addr + len); } } + +#ifndef CONFIG_CPU_CACHE_VIPT +#define flush_cache_mm(mm) \ + vivt_flush_cache_mm(mm) +#define flush_cache_range(vma,start,end) \ + vivt_flush_cache_range(vma,start,end) +#define flush_cache_page(vma,addr,pfn) \ + vivt_flush_cache_page(vma,addr,pfn) +#define flush_ptrace_access(vma,page,ua,ka,len,write) \ + vivt_flush_ptrace_access(vma,page,ua,ka,len,write) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |