summaryrefslogtreecommitdiffstats
path: root/arch/arm/xen
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@citrix.com>2015-09-09 15:18:45 +0100
committerDavid Vrabel <david.vrabel@citrix.com>2015-10-23 14:20:43 +0100
commit9435cce87950d805e6c8315410f2cb8ff6b2c6a2 (patch)
tree8d7dbbe5e1ce9ccffcdfda949d0b084ae720997d /arch/arm/xen
parent291be10fd7511101d44cf98166d049bd31bc7600 (diff)
downloadop-kernel-dev-9435cce87950d805e6c8315410f2cb8ff6b2c6a2.zip
op-kernel-dev-9435cce87950d805e6c8315410f2cb8ff6b2c6a2.tar.gz
xen/swiotlb: Add support for 64KB page granularity
Swiotlb is used on ARM64 to support DMA on platform where devices are not protected by an SMMU. Furthermore it's only enabled for DOM0. While Xen is always using 4KB page granularity in the stage-2 page table, Linux ARM64 may either use 4KB or 64KB. This means that a Linux page can be spanned accross multiple Xen page. The Swiotlb code has to validate that the buffer used for DMA is physically contiguous in the memory. As a Linux page can't be shared between local memory and foreign page by design (the balloon code always removing entirely a Linux page), the changes in the code are very minimal because we only need to check the first Xen PFN. Note that it may be possible to optimize the function check_page_physically_contiguous to avoid looping over every Xen PFN for local memory. Although I will let this optimization for a follow-up. Signed-off-by: Julien Grall <julien.grall@citrix.com> Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'arch/arm/xen')
-rw-r--r--arch/arm/xen/mm.c38
1 files changed, 27 insertions, 11 deletions
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 7b517e91..7c34f71 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -48,22 +48,22 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
size_t size, enum dma_data_direction dir, enum dma_cache_op op)
{
struct gnttab_cache_flush cflush;
- unsigned long pfn;
+ unsigned long xen_pfn;
size_t left = size;
- pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
- offset %= PAGE_SIZE;
+ xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
+ offset %= XEN_PAGE_SIZE;
do {
size_t len = left;
/* buffers in highmem or foreign pages cannot cross page
* boundaries */
- if (len + offset > PAGE_SIZE)
- len = PAGE_SIZE - offset;
+ if (len + offset > XEN_PAGE_SIZE)
+ len = XEN_PAGE_SIZE - offset;
cflush.op = 0;
- cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
+ cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
cflush.offset = offset;
cflush.length = len;
@@ -79,7 +79,7 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
offset = 0;
- pfn++;
+ xen_pfn++;
left -= len;
} while (left);
}
@@ -141,10 +141,26 @@ bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys,
dma_addr_t dev_addr)
{
- unsigned long pfn = PFN_DOWN(phys);
- unsigned long bfn = PFN_DOWN(dev_addr);
-
- return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev));
+ unsigned int xen_pfn = XEN_PFN_DOWN(phys);
+ unsigned int bfn = XEN_PFN_DOWN(dev_addr);
+
+ /*
+ * The swiotlb buffer should be used if
+ * - Xen doesn't have the cache flush hypercall
+ * - The Linux page refers to foreign memory
+ * - The device doesn't support coherent DMA request
+ *
+ * The Linux page may be spanned acrros multiple Xen page, although
+ * it's not possible to have a mix of local and foreign Xen page.
+ * Furthermore, range_straddles_page_boundary is already checking
+ * if buffer is physically contiguous in the host RAM.
+ *
+ * Therefore we only need to check the first Xen page to know if we
+ * require a bounce buffer because the device doesn't support coherent
+ * memory and we are not able to flush the cache.
+ */
+ return (!hypercall_cflush && (xen_pfn != bfn) &&
+ !is_device_dma_coherent(dev));
}
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
OpenPOWER on IntegriCloud