summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/dma-mapping.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-12-03 21:03:21 -0500
committerDavid S. Miller <davem@davemloft.net>2015-12-03 21:09:12 -0500
commitf188b951f33a0464338f94f928338f84fc0e4392 (patch)
tree17ad63719242b1de0266627a1dc92ba869a3ba4e /arch/arm64/mm/dma-mapping.c
parent6b20da4d8f3f6a3be9f67e3207f435cfaa5f7f97 (diff)
parent071f5d105a0ae93aeb02197c4ee3557e8cc57a21 (diff)
downloadop-kernel-dev-f188b951f33a0464338f94f928338f84fc0e4392.zip
op-kernel-dev-f188b951f33a0464338f94f928338f84fc0e4392.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/renesas/ravb_main.c kernel/bpf/syscall.c net/ipv4/ipmr.c All three conflicts were cases of overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/arm64/mm/dma-mapping.c')
-rw-r--r--arch/arm64/mm/dma-mapping.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 131a199..7963aa4 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -18,6 +18,7 @@
*/
#include <linux/gfp.h>
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
@@ -28,9 +29,6 @@
#include <asm/cacheflush.h>
-struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
@@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
static int __init arm64_dma_init(void)
{
- int ret;
-
- dma_ops = &swiotlb_dma_ops;
-
- ret = atomic_pool_init();
-
- return ret;
+ return atomic_pool_init();
}
arch_initcall(arm64_dma_init);
@@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
{
bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+ size_t iosize = size;
void *addr;
if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
return NULL;
+
+ size = PAGE_ALIGN(size);
+
/*
* Some drivers rely on this, and we probably don't want the
* possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages;
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
- pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
+ pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
flush_page);
if (!pages)
return NULL;
@@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
__builtin_return_address(0));
if (!addr)
- iommu_dma_free(dev, pages, size, handle);
+ iommu_dma_free(dev, pages, iosize, handle);
} else {
struct page *page;
/*
@@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
if (!addr)
return NULL;
- *handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
+ *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
if (iommu_dma_mapping_error(dev, *handle)) {
if (coherent)
__free_pages(page, get_order(size));
@@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
+ size_t iosize = size;
+
+ size = PAGE_ALIGN(size);
/*
* @cpu_addr will be one of 3 things depending on how it was allocated:
* - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
* Hence how dodgy the below logic looks...
*/
if (__in_atomic_pool(cpu_addr, size)) {
- iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
__free_from_pool(cpu_addr, size);
} else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr);
if (WARN_ON(!area || !area->pages))
return;
- iommu_dma_free(dev, area->pages, size, &handle);
+ iommu_dma_free(dev, area->pages, iosize, &handle);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else {
- iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
__free_pages(virt_to_page(cpu_addr), get_order(size));
}
}
@@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
- if (!acpi_disabled && !dev->archdata.dma_ops)
- dev->archdata.dma_ops = dma_ops;
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = &swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
OpenPOWER on IntegriCloud