summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-30 10:11:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-30 10:11:31 -0700
commit6f51f51582e793ea13e7de7ed6b138f71c51784b (patch)
tree211ecbf88cdf2f183e23da3f8f23153ac6133410 /arch
parent76159c20c0bcf5b38178fbfb61049eeb6380bb54 (diff)
parent97ef952a20853fad72087a53fa556fbec45edd8f (diff)
downloadop-kernel-dev-6f51f51582e793ea13e7de7ed6b138f71c51784b.zip
op-kernel-dev-6f51f51582e793ea13e7de7ed6b138f71c51784b.tar.gz
Merge branch 'for-linus-for-3.6-rc1' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA-mapping updates from Marek Szyprowski: "Those patches are continuation of my earlier work. They contains extensions to DMA-mapping framework to remove limitation of the current ARM implementation (like limited total size of DMA coherent/write combine buffers), improve performance of buffer sharing between devices (attributes to skip cpu cache operations or creation of additional kernel mapping for some specific use cases) as well as some unification of the common code for dma_mmap_attrs() and dma_mmap_coherent() functions. All extensions have been implemented and tested for ARM architecture." * 'for-linus-for-3.6-rc1' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: ARM: dma-mapping: add support for DMA_ATTR_SKIP_CPU_SYNC attribute common: DMA-mapping: add DMA_ATTR_SKIP_CPU_SYNC attribute ARM: dma-mapping: add support for dma_get_sgtable() common: dma-mapping: introduce dma_get_sgtable() function ARM: dma-mapping: add support for DMA_ATTR_NO_KERNEL_MAPPING attribute common: DMA-mapping: add DMA_ATTR_NO_KERNEL_MAPPING attribute common: dma-mapping: add support for generic dma_mmap_* calls ARM: dma-mapping: fix error path for memory allocation failure ARM: dma-mapping: add more sanity checks in arm_dma_mmap() ARM: dma-mapping: remove custom consistent dma region mm: vmalloc: use const void * for caller argument scatterlist: add sg_alloc_table_from_pages function
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/common/dmabounce.c1
-rw-r--r--arch/arm/include/asm/dma-mapping.h24
-rw-r--r--arch/arm/mm/dma-mapping.c561
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h8
-rw-r--r--arch/powerpc/kernel/dma-iommu.c1
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c1
-rw-r--r--arch/powerpc/kernel/dma.c36
-rw-r--r--arch/powerpc/kernel/vio.c1
9 files changed, 261 insertions, 375 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index aa07f59..1143c4d 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -452,6 +452,7 @@ static struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
+ .get_sgtable = arm_dma_get_sgtable,
.map_page = dmabounce_map_page,
.unmap_page = dmabounce_unmap_page,
.sync_single_for_cpu = dmabounce_sync_for_cpu,
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index bbef15d..2ae842d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs);
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-
-static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
@@ -213,20 +202,12 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
}
-static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
-
/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
*/
-extern void __init init_consistent_dma_size(unsigned long size);
+static inline void init_consistent_dma_size(unsigned long size) { }
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
@@ -280,6 +261,9 @@ extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
+extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5cfc989..c2cdf65 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -22,6 +22,7 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
@@ -72,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
@@ -95,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
@@ -124,6 +125,7 @@ struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
+ .get_sgtable = arm_dma_get_sgtable,
.map_page = arm_dma_map_page,
.unmap_page = arm_dma_unmap_page,
.map_sg = arm_dma_map_sg,
@@ -217,115 +219,70 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
#ifdef CONFIG_MMU
+#ifdef CONFIG_HUGETLB_PAGE
+#error ARM Coherent DMA allocator does not (yet) support huge TLB
+#endif
-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
-
-/*
- * These are the page tables (2MB each) covering uncached, DMA consistent allocations
- */
-static pte_t **consistent_pte;
-
-#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page);
-static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ pgprot_t prot, struct page **ret_page,
+ const void *caller);
-void __init init_consistent_dma_size(unsigned long size)
+static void *
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
{
- unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
+ struct vm_struct *area;
+ unsigned long addr;
- BUG_ON(consistent_pte); /* Check we're called before DMA region init */
- BUG_ON(base < VMALLOC_END);
+ /*
+ * DMA allocation can be mapped to user space, so lets
+ * set VM_USERMAP flags too.
+ */
+ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+ caller);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
- /* Grow region to accommodate specified size */
- if (base < consistent_base)
- consistent_base = base;
+ if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+ return (void *)addr;
}
-#include "vmregion.h"
-
-static struct arm_vmregion_head consistent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
- .vm_end = CONSISTENT_END,
-};
-
-#ifdef CONFIG_HUGETLB_PAGE
-#error ARM Coherent DMA allocator does not (yet) support huge TLB
-#endif
-
-/*
- * Initialise the consistent memory allocation.
- */
-static int __init consistent_init(void)
+static void __dma_free_remap(void *cpu_addr, size_t size)
{
- int ret = 0;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i = 0;
- unsigned long base = consistent_base;
- unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
-
- if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
- return 0;
-
- consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
- if (!consistent_pte) {
- pr_err("%s: no memory\n", __func__);
- return -ENOMEM;
+ unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
+ struct vm_struct *area = find_vm_area(cpu_addr);
+ if (!area || (area->flags & flags) != flags) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
}
-
- pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
- consistent_head.vm_start = base;
-
- do {
- pgd = pgd_offset(&init_mm, base);
-
- pud = pud_alloc(&init_mm, pgd, base);
- if (!pud) {
- pr_err("%s: no pud tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
-
- pmd = pmd_alloc(&init_mm, pud, base);
- if (!pmd) {
- pr_err("%s: no pmd tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
- WARN_ON(!pmd_none(*pmd));
-
- pte = pte_alloc_kernel(pmd, base);
- if (!pte) {
- pr_err("%s: no pte tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
-
- consistent_pte[i++] = pte;
- base += PMD_SIZE;
- } while (base < CONSISTENT_END);
-
- return ret;
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
}
-core_initcall(consistent_init);
-static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page);
-
-static struct arm_vmregion_head coherent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+struct dma_pool {
+ size_t size;
+ spinlock_t lock;
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+ void *vaddr;
+ struct page *page;
};
-static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+static struct dma_pool atomic_pool = {
+ .size = SZ_256K,
+};
static int __init early_coherent_pool(char *p)
{
- coherent_pool_size = memparse(p, &p);
+ atomic_pool.size = memparse(p, &p);
return 0;
}
early_param("coherent_pool", early_coherent_pool);
@@ -333,32 +290,45 @@ early_param("coherent_pool", early_coherent_pool);
/*
* Initialise the coherent pool for atomic allocations.
*/
-static int __init coherent_init(void)
+static int __init atomic_pool_init(void)
{
+ struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
- size_t size = coherent_pool_size;
+ unsigned long nr_pages = pool->size >> PAGE_SHIFT;
+ unsigned long *bitmap;
struct page *page;
void *ptr;
+ int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
- if (!IS_ENABLED(CONFIG_CMA))
- return 0;
+ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!bitmap)
+ goto no_bitmap;
- ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+ if (IS_ENABLED(CONFIG_CMA))
+ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
+ else
+ ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
+ &page, NULL);
if (ptr) {
- coherent_head.vm_start = (unsigned long) ptr;
- coherent_head.vm_end = (unsigned long) ptr + size;
- printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
- (unsigned)size / 1024);
+ spin_lock_init(&pool->lock);
+ pool->vaddr = ptr;
+ pool->page = page;
+ pool->bitmap = bitmap;
+ pool->nr_pages = nr_pages;
+ pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+ (unsigned)pool->size / 1024);
return 0;
}
- printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
- (unsigned)size / 1024);
+ kfree(bitmap);
+no_bitmap:
+ pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+ (unsigned)pool->size / 1024);
return -ENOMEM;
}
/*
* CMA is activated by core_initcall, so we must be called after it.
*/
-postcore_initcall(coherent_init);
+postcore_initcall(atomic_pool_init);
struct dma_contig_early_reserve {
phys_addr_t base;
@@ -406,112 +376,6 @@ void __init dma_contiguous_remap(void)
}
}
-static void *
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- struct arm_vmregion *c;
- size_t align;
- int bit;
-
- if (!consistent_pte) {
- pr_err("%s: not initialised\n", __func__);
- dump_stack();
- return NULL;
- }
-
- /*
- * Align the virtual region allocation - maximum alignment is
- * a section size, minimum is a page size. This helps reduce
- * fragmentation of the DMA space, and also prevents allocations
- * smaller than a section from crossing a section boundary.
- */
- bit = fls(size - 1);
- if (bit > SECTION_SHIFT)
- bit = SECTION_SHIFT;
- align = 1 << bit;
-
- /*
- * Allocate a virtual address in the consistent mapping region.
- */
- c = arm_vmregion_alloc(&consistent_head, align, size,
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
- if (c) {
- pte_t *pte;
- int idx = CONSISTENT_PTE_INDEX(c->vm_start);
- u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
-
- pte = consistent_pte[idx] + off;
- c->priv = page;
-
- do {
- BUG_ON(!pte_none(*pte));
-
- set_pte_ext(pte, mk_pte(page, prot), 0);
- page++;
- pte++;
- off++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- pte = consistent_pte[++idx];
- }
- } while (size -= PAGE_SIZE);
-
- dsb();
-
- return (void *)c->vm_start;
- }
- return NULL;
-}
-
-static void __dma_free_remap(void *cpu_addr, size_t size)
-{
- struct arm_vmregion *c;
- unsigned long addr;
- pte_t *ptep;
- int idx;
- u32 off;
-
- c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
- if (!c) {
- pr_err("%s: trying to free invalid coherent area: %p\n",
- __func__, cpu_addr);
- dump_stack();
- return;
- }
-
- if ((c->vm_end - c->vm_start) != size) {
- pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
- __func__, c->vm_end - c->vm_start, size);
- dump_stack();
- size = c->vm_end - c->vm_start;
- }
-
- idx = CONSISTENT_PTE_INDEX(c->vm_start);
- off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
- ptep = consistent_pte[idx] + off;
- addr = c->vm_start;
- do {
- pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
-
- ptep++;
- addr += PAGE_SIZE;
- off++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- ptep = consistent_pte[++idx];
- }
-
- if (pte_none(pte) || !pte_present(pte))
- pr_crit("%s: bad page in kernel page table\n",
- __func__);
- } while (size -= PAGE_SIZE);
-
- flush_tlb_kernel_range(c->vm_start, c->vm_end);
-
- arm_vmregion_free(&consistent_head, c);
-}
-
static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
void *data)
{
@@ -552,16 +416,17 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
return ptr;
}
-static void *__alloc_from_pool(struct device *dev, size_t size,
- struct page **ret_page, const void *caller)
+static void *__alloc_from_pool(size_t size, struct page **ret_page)
{
- struct arm_vmregion *c;
+ struct dma_pool *pool = &atomic_pool;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned int pageno;
+ unsigned long flags;
+ void *ptr = NULL;
size_t align;
- if (!coherent_head.vm_start) {
- printk(KERN_ERR "%s: coherent pool not initialised!\n",
- __func__);
- dump_stack();
+ if (!pool->vaddr) {
+ WARN(1, "coherent pool not initialised!\n");
return NULL;
}
@@ -571,35 +436,41 @@ static void *__alloc_from_pool(struct device *dev, size_t size,
* size. This helps reduce fragmentation of the DMA space.
*/
align = PAGE_SIZE << get_order(size);
- c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
- if (c) {
- void *ptr = (void *)c->vm_start;
- struct page *page = virt_to_page(ptr);
- *ret_page = page;
- return ptr;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
+ 0, count, (1 << align) - 1);
+ if (pageno < pool->nr_pages) {
+ bitmap_set(pool->bitmap, pageno, count);
+ ptr = pool->vaddr + PAGE_SIZE * pageno;
+ *ret_page = pool->page + pageno;
}
- return NULL;
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return ptr;
}
-static int __free_from_pool(void *cpu_addr, size_t size)
+static int __free_from_pool(void *start, size_t size)
{
- unsigned long start = (unsigned long)cpu_addr;
- unsigned long end = start + size;
- struct arm_vmregion *c;
+ struct dma_pool *pool = &atomic_pool;
+ unsigned long pageno, count;
+ unsigned long flags;
- if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+ if (start < pool->vaddr || start > pool->vaddr + pool->size)
return 0;
- c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
-
- if ((c->vm_end - c->vm_start) != size) {
- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
- __func__, c->vm_end - c->vm_start, size);
- dump_stack();
- size = c->vm_end - c->vm_start;
+ if (start + size > pool->vaddr + pool->size) {
+ WARN(1, "freeing wrong coherent size from pool\n");
+ return 0;
}
- arm_vmregion_free(&coherent_head, c);
+ pageno = (start - pool->vaddr) >> PAGE_SHIFT;
+ count = size >> PAGE_SHIFT;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ bitmap_clear(pool->bitmap, pageno, count);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
return 1;
}
@@ -644,7 +515,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
-#define __alloc_from_pool(dev, size, ret_page, c) NULL
+#define __alloc_from_pool(size, ret_page) NULL
#define __alloc_from_contiguous(dev, size, prot, ret) NULL
#define __free_from_pool(cpu_addr, size) 0
#define __free_from_contiguous(dev, page, size) do { } while (0)
@@ -702,10 +573,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (arch_is_coherent() || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(size, &page);
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
- else if (gfp & GFP_ATOMIC)
- addr = __alloc_from_pool(dev, size, &page, caller);
else
addr = __alloc_from_contiguous(dev, size, prot, &page);
@@ -741,16 +612,22 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
+ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr);
+ unsigned long off = vma->vm_pgoff;
+
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
+ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
#endif /* CONFIG_MMU */
return ret;
@@ -785,6 +662,21 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
}
}
+int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
+
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
@@ -998,9 +890,6 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
static int __init dma_debug_do_init(void)
{
-#ifdef CONFIG_MMU
- arm_vmregion_create_proc("dma-mappings", &consistent_head);
-#endif
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
@@ -1088,7 +977,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
return pages;
error:
- while (--i)
+ while (i--)
if (pages[i])
__free_pages(pages[i], 0);
if (array_size <= PAGE_SIZE)
@@ -1117,61 +1006,32 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
* Create a CPU mapping for a specified pages
*/
static void *
-__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
{
- struct arm_vmregion *c;
- size_t align;
- size_t count = size >> PAGE_SHIFT;
- int bit;
+ unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct vm_struct *area;
+ unsigned long p;
- if (!consistent_pte[0]) {
- pr_err("%s: not initialised\n", __func__);
- dump_stack();
+ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+ caller);
+ if (!area)
return NULL;
- }
-
- /*
- * Align the virtual region allocation - maximum alignment is
- * a section size, minimum is a page size. This helps reduce
- * fragmentation of the DMA space, and also prevents allocations
- * smaller than a section from crossing a section boundary.
- */
- bit = fls(size - 1);
- if (bit > SECTION_SHIFT)
- bit = SECTION_SHIFT;
- align = 1 << bit;
-
- /*
- * Allocate a virtual address in the consistent mapping region.
- */
- c = arm_vmregion_alloc(&consistent_head, align, size,
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
- if (c) {
- pte_t *pte;
- int idx = CONSISTENT_PTE_INDEX(c->vm_start);
- int i = 0;
- u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
-
- pte = consistent_pte[idx] + off;
- c->priv = pages;
-
- do {
- BUG_ON(!pte_none(*pte));
-
- set_pte_ext(pte, mk_pte(pages[i], prot), 0);
- pte++;
- off++;
- i++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- pte = consistent_pte[++idx];
- }
- } while (i < count);
- dsb();
+ area->pages = pages;
+ area->nr_pages = nr_pages;
+ p = (unsigned long)area->addr;
- return (void *)c->vm_start;
+ for (i = 0; i < nr_pages; i++) {
+ phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
+ if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
+ goto err;
+ p += PAGE_SIZE;
}
+ return area->addr;
+err:
+ unmap_kernel_range((unsigned long)area->addr, size);
+ vunmap(area->addr);
return NULL;
}
@@ -1230,6 +1090,19 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
return 0;
}
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+{
+ struct vm_struct *area;
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return cpu_addr;
+
+ area = find_vm_area(cpu_addr);
+ if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
+ return area->pages;
+ return NULL;
+}
+
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
@@ -1248,7 +1121,11 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
- addr = __iommu_alloc_remap(pages, size, gfp, prot);
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return pages;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot,
+ __builtin_return_address(0));
if (!addr)
goto err_mapping;
@@ -1265,31 +1142,25 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
- struct arm_vmregion *c;
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
-
- if (c) {
- struct page **pages = c->priv;
-
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- int i = 0;
- do {
- int ret;
+ if (!pages)
+ return -ENXIO;
- ret = vm_insert_page(vma, uaddr, pages[i++]);
- if (ret) {
- pr_err("Remapping memory, error: %d\n", ret);
- return ret;
- }
+ do {
+ int ret = vm_insert_page(vma, uaddr, *pages++);
+ if (ret) {
+ pr_err("Remapping memory failed: %d\n", ret);
+ return ret;
+ }
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- }
return 0;
}
@@ -1300,16 +1171,35 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
- struct arm_vmregion *c;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
size = PAGE_ALIGN(size);
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- struct page **pages = c->priv;
- __dma_free_remap(cpu_addr, size);
- __iommu_remove_mapping(dev, handle, size);
- __iommu_free_buffer(dev, pages, size);
+ if (!pages) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
}
+
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+ }
+
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size);
+}
+
+static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+ if (!pages)
+ return -ENXIO;
+
+ return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
+ GFP_KERNEL);
}
/*
@@ -1317,7 +1207,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
*/
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
dma_addr_t iova, iova_base;
@@ -1336,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
- if (!arch_is_coherent())
+ if (!arch_is_coherent() &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
ret = iommu_map(mapping->domain, iova, phys, len, 0);
@@ -1383,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
if (__map_sg_chunk(dev, start, size, &dma->dma_address,
- dir) < 0)
+ dir, attrs) < 0)
goto bad_mapping;
dma->dma_address += offset;
@@ -1396,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
}
size += s->length;
}
- if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0)
goto bad_mapping;
dma->dma_address += offset;
@@ -1430,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
if (sg_dma_len(s))
__iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s));
- if (!arch_is_coherent())
+ if (!arch_is_coherent() &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
@@ -1492,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
dma_addr_t dma_addr;
int ret, len = PAGE_ALIGN(size + offset);
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir);
dma_addr = __alloc_iova(mapping, len);
@@ -1531,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
if (!iova)
return;
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(page, offset, size, dir);
iommu_unmap(mapping->domain, iova, len);
@@ -1571,6 +1463,7 @@ struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs,
+ .get_sgtable = arm_iommu_get_sgtable,
.map_page = arm_iommu_map_page,
.unmap_page = arm_iommu_unmap_page,
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 2e8a1ef..6776160 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -59,6 +59,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
+/* consistent regions used by dma_alloc_attrs() */
+#define VM_ARM_DMA_CONSISTENT 0x20000000
+
#endif
#ifdef CONFIG_ZONE_DMA
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 62678e3..7816087 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -27,7 +27,10 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
extern void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs);
-
+extern int dma_direct_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle,
+ size_t size, struct dma_attrs *attrs);
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
@@ -207,11 +210,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
#define ARCH_HAS_DMA_MMAP_COHERENT
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index bcfdcd2..2d7bb8c 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -109,6 +109,7 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 4ab88da..4694365 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -49,6 +49,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
struct dma_map_ops swiotlb_dma_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 289be75..355b9d8 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -67,6 +67,24 @@ void dma_direct_free_coherent(struct device *dev, size_t size,
#endif
}
+int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+ unsigned long pfn;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
+#else
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+#endif
+ return remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
struct dma_attrs *attrs)
@@ -156,6 +174,7 @@ static inline void dma_direct_sync_single(struct device *dev,
struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported,
@@ -219,20 +238,3 @@ static int __init dma_init(void)
}
fs_initcall(dma_init);
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t handle, size_t size)
-{
- unsigned long pfn;
-
-#ifdef CONFIG_NOT_COHERENT_CACHE
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
-#else
- pfn = page_to_pfn(virt_to_page(cpu_addr));
-#endif
- return remap_pfn_range(vma, vma->vm_start,
- pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-}
-EXPORT_SYMBOL_GPL(dma_mmap_coherent);
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 3052a93..02b3221 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -611,6 +611,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
OpenPOWER on IntegriCloud