summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/cache.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 15:01:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 15:01:45 -0700
commit1ce235faa8fefa4eb7199cad890944c1d2ba1b3e (patch)
tree1ed51ca928c6760d524dc853cd879ab958e66bd9 /arch/arm64/mm/cache.S
parente38be1b1066687204b05b6a46a2018806c7281de (diff)
parent196adf2f3015eacac0567278ba538e3ffdd16d0e (diff)
downloadop-kernel-dev-1ce235faa8fefa4eb7199cad890944c1d2ba1b3e.zip
op-kernel-dev-1ce235faa8fefa4eb7199cad890944c1d2ba1b3e.tar.gz
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull ARM64 updates from Catalin Marinas: - KGDB support for arm64 - PCI I/O space extended to 16M (in preparation of PCIe support patches) - Dropping ZONE_DMA32 in favour of ZONE_DMA (we only need one for the time being), together with swiotlb late initialisation to correctly setup the bounce buffer - DMA API cache maintenance support (not all ARMv8 platforms have hardware cache coherency) - Crypto extensions advertising via ELF_HWCAP2 for compat user space - Perf support for dwarf unwinding in compat mode - asm/tlb.h converted to the generic mmu_gather code - asm-generic rwsem implementation - Code clean-up * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (42 commits) arm64: Remove pgprot_dmacoherent() arm64: Support DMA_ATTR_WRITE_COMBINE arm64: Implement custom mmap functions for dma mapping arm64: Fix __range_ok macro arm64: Fix duplicated Kconfig entries arm64: mm: Route pmd thp functions through pte equivalents arm64: rwsem: use asm-generic rwsem implementation asm-generic: rwsem: de-PPCify rwsem.h arm64: enable generic CPU feature modalias matching for this architecture arm64: smp: make local symbol static arm64: debug: make local symbols static ARM64: perf: support dwarf unwinding in compat mode ARM64: perf: add support for frame pointer unwinding in compat mode ARM64: perf: add support for perf registers API arm64: Add boot time configuration of Intermediate Physical Address size arm64: Do not synchronise I and D caches for special ptes arm64: Make DMA coherent and strongly ordered mappings not executable arm64: barriers: add dmb barrier arm64: topology: Implement basic CPU topology support arm64: advertise ARMv8 extensions to 32-bit compat ELF binaries ...
Diffstat (limited to 'arch/arm64/mm/cache.S')
-rw-r--r--arch/arm64/mm/cache.S80
1 files changed, 79 insertions, 1 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 1ea9f26..c46f48b 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -30,7 +30,7 @@
*
* Corrupted registers: x0-x7, x9-x11
*/
-ENTRY(__flush_dcache_all)
+__flush_dcache_all:
dsb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
@@ -166,3 +166,81 @@ ENTRY(__flush_dcache_area)
dsb sy
ret
ENDPROC(__flush_dcache_area)
+
+/*
+ * __dma_inv_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__dma_inv_range:
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+ bic x1, x1, x3
+1: dc ivac, x0 // invalidate D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_inv_range)
+
+/*
+ * __dma_clean_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__dma_clean_range:
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc cvac, x0 // clean D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_clean_range)
+
+/*
+ * __dma_flush_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__dma_flush_range)
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc civac, x0 // clean & invalidate D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_flush_range)
+
+/*
+ * __dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(__dma_map_area)
+ add x1, x1, x0
+ cmp w2, #DMA_FROM_DEVICE
+ b.eq __dma_inv_range
+ b __dma_clean_range
+ENDPROC(__dma_map_area)
+
+/*
+ * __dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(__dma_unmap_area)
+ add x1, x1, x0
+ cmp w2, #DMA_TO_DEVICE
+ b.ne __dma_inv_range
+ ret
+ENDPROC(__dma_unmap_area)
OpenPOWER on IntegriCloud