summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2014-11-21 11:05:39 +0000
committerDavid Vrabel <david.vrabel@citrix.com>2014-12-04 12:41:52 +0000
commit3567258d281b5b515d5165ed23851d9f84087e7d (patch)
tree69da5f20e5354507e3219eaeb6e72b7e5e93f58d
parenta0f2dee0cd651efb5fac6a1d35b0a14460ebcdd4 (diff)
downloadop-kernel-dev-3567258d281b5b515d5165ed23851d9f84087e7d.zip
op-kernel-dev-3567258d281b5b515d5165ed23851d9f84087e7d.tar.gz
xen/arm: use hypercall to flush caches in map_page
In xen_dma_map_page, if the page is a local page, call the native map_page dma_ops. If the page is foreign, call __xen_dma_map_page that issues any required cache maintenane operations via hypercall. The reason for doing this is that the native dma_ops map_page could allocate buffers than need to be freed. If the page is foreign we don't call the native unmap_page dma_ops function, resulting in a memory leak. Suggested-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h13
-rw-r--r--arch/arm/xen/mm32.c12
2 files changed, 24 insertions, 1 deletions
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index a309f42..efd5624 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -5,6 +5,9 @@
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs);
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
@@ -32,7 +35,15 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
+ /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
+ * is a foreign page grant-mapped in dom0. If the page is local we
+ * can safely call the native dma_ops function, otherwise we call
+ * the xen specific function. */
+ if (local)
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ else
+ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
index 3ce9dc1..c86919b 100644
--- a/arch/arm/xen/mm32.c
+++ b/arch/arm/xen/mm32.c
@@ -43,6 +43,18 @@ static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
}
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ if (is_device_dma_coherent(hwdev))
+ return;
+ if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ return;
+
+ __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
+}
+
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
OpenPOWER on IntegriCloud