From ef7cc35b0ee03431731186320b18e5da585341ff Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 25 Jan 2010 11:42:21 -0600 Subject: parisc: add mm API for DMA to vmalloc/vmap areas We already have an API to flush a kernel page along an alias address, so use it. The TLB purge prevents the CPU from doing speculative moveins on the flushed address, so we don't need to implement and invalidate. Acked-by: Kyle McMartin Signed-off-by: James Bottomley --- arch/parisc/include/asm/cacheflush.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 7a73b61..4772777 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -38,6 +38,18 @@ void flush_cache_mm(struct mm_struct *mm); #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); +/* vmap range flushes and invalidates. Architecturally, we don't need + * the invalidate, because the CPU should refuse to speculate once an + * area has been flushed, so invalidate is left empty */ +static inline void flush_kernel_vmap_range(void *vaddr, int size) +{ + unsigned long start = (unsigned long)vaddr; + + flush_kernel_dcache_range_asm(start, start + size); +} +static inline void invalidate_kernel_vmap_range(void *vaddr, int size) +{ +} #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() -- cgit v1.1 From 252a9afff76097667429b583e8b5b170b47665a4 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 25 Jan 2010 11:42:22 -0600 Subject: arm: add mm API for DMA to vmalloc/vmap areas ARM cannot prevent cache movein, so this patch implements both the flush and invalidate pieces of the API. Signed-off-by: James Bottomley --- arch/arm/include/asm/cacheflush.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefc..4ae503c 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -432,6 +432,16 @@ static inline void __flush_icache_all(void) : "r" (0)); #endif } +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); +} #define ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, -- cgit v1.1 From c9334f6067dbe0380141fc75b122e0a533878838 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 25 Jan 2010 11:42:23 -0600 Subject: sh: add mm API for DMA to vmalloc/vmap areas Signed-off-by: James Bottomley --- arch/sh/include/asm/cacheflush.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index dda96eb..da3ebec 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -63,6 +63,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma, if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) __flush_anon_page(page, vmaddr); } +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + __flush_wback_region(addr, size); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) +{ + __flush_invalidate_region(addr, size); +} #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE static inline void flush_kernel_dcache_page(struct page *page) -- cgit v1.1