summaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2014-12-03 16:07:28 +0100
committerMichal Simek <michal.simek@xilinx.com>2014-12-17 12:59:59 +0100
commit3a8e3265179b7e6394d7aab4d6df5651b49e7243 (patch)
tree82194a886cb05fe27ba3ddf0dca4ec05c3977e4a /arch/microblaze/kernel
parentb2776bf7149bddd1f4161f14f79520f17fc1d71d (diff)
downloadop-kernel-dev-3a8e3265179b7e6394d7aab4d6df5651b49e7243.zip
op-kernel-dev-3a8e3265179b7e6394d7aab4d6df5651b49e7243.tar.gz
microblaze: Fix mmap for cache coherent memory
When running in non-cache coherent configuration the memory that was allocated with dma_alloc_coherent() has a custom mapping and so there is no 1-to-1 relationship between the kernel virtual address and the PFN. This means that virt_to_pfn() will not work correctly for those addresses and the default mmap implementation in the form of dma_common_mmap() will map some random, but not the requested, memory area. Fix this by providing a custom mmap implementation that looks up the PFN from the page table rather than using virt_to_pfn. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Michal Simek <michal.simek@xilinx.com>
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/dma.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 4633c36..ed7ba8a 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -154,9 +154,36 @@ dma_direct_sync_sg_for_device(struct device *dev,
__dma_sync(sg->dma_address, sg->length, direction);
}
+int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+#ifdef CONFIG_MMU
+ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long off = vma->vm_pgoff;
+ unsigned long pfn;
+
+ if (off >= count || user_count > (count - off))
+ return -ENXIO;
+
+#ifdef NOT_COHERENT_CACHE
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pfn = consistent_virt_to_pfn(cpu_addr);
+#else
+ pfn = virt_to_pfn(cpu_addr);
+#endif
+ return remap_pfn_range(vma, vma->vm_start, pfn + off,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+#else
+ return -ENXIO;
+#endif
+}
+
struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = dma_direct_map_sg,
.dma_supported = dma_direct_dma_supported,
.map_page = dma_direct_map_page,
OpenPOWER on IntegriCloud