summaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/dma.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-11-16 14:19:34 +0100
committerThomas Gleixner <tglx@linutronix.de>2016-11-16 14:19:34 +0100
commit7ce7f35b33eb42b6aa4cf176fa34372b21b8472b (patch)
tree12f3027003f1d7cce30f838265e4fab2d52c6cef /arch/arc/mm/dma.c
parentf410770293a1fbc08906474c24104a7a11943eb6 (diff)
parent47bdf3378d62a627cfb8a54e1180c08d67078b61 (diff)
downloadop-kernel-dev-7ce7f35b33eb42b6aa4cf176fa34372b21b8472b.zip
op-kernel-dev-7ce7f35b33eb42b6aa4cf176fa34372b21b8472b.tar.gz
Merge branch 'x86/cpufeature' into x86/cache
Resolve the cpu/scattered conflict. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/arc/mm/dma.c')
-rw-r--r--arch/arc/mm/dma.c30
1 files changed, 28 insertions, 2 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 20afc65..cd8aad8 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
- if ((is_isa_arcv2() && ioc_exists) ||
+ if ((is_isa_arcv2() && ioc_enable) ||
(attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
@@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
- (is_isa_arcv2() && ioc_exists);
+ (is_isa_arcv2() && ioc_enable);
if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);
@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size));
}
+static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+ unsigned long off = vma->vm_pgoff;
+ int ret = -ENXIO;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc,
.free = arc_dma_free,
+ .mmap = arc_dma_mmap,
.map_page = arc_dma_map_page,
.map_sg = arc_dma_map_sg,
.sync_single_for_device = arc_dma_sync_single_for_device,
OpenPOWER on IntegriCloud