diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-21 12:32:08 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-21 12:32:08 -0800 |
commit | eae21770b4fed5597623aad0d618190fa60426ff (patch) | |
tree | 23c59fb7a33e93a79525e2b10d56df54d40049d1 /arch/arc/include/asm/dma-mapping.h | |
parent | e9f57ebcba563e0cd532926cab83c92bb4d79360 (diff) | |
parent | 9f273c24ec5f4a6f785bb83e931b3808a07b459e (diff) | |
download | op-kernel-dev-eae21770b4fed5597623aad0d618190fa60426ff.zip op-kernel-dev-eae21770b4fed5597623aad0d618190fa60426ff.tar.gz |
Merge branch 'akpm' (patches from Andrew)
Merge third patch-bomb from Andrew Morton:
"I'm pretty much done for -rc1 now:
- the rest of MM, basically
- lib/ updates
- checkpatch, epoll, hfs, fatfs, ptrace, coredump, exit
- cpu_mask simplifications
- kexec, rapidio, MAINTAINERS etc, etc.
- more dma-mapping cleanups/simplifications from hch"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits)
MAINTAINERS: add/fix git URLs for various subsystems
mm: memcontrol: add "sock" to cgroup2 memory.stat
mm: memcontrol: basic memory statistics in cgroup2 memory controller
mm: memcontrol: do not uncharge old page in page cache replacement
Documentation: cgroup: add memory.swap.{current,max} description
mm: free swap cache aggressively if memcg swap is full
mm: vmscan: do not scan anon pages if memcg swap limit is hit
swap.h: move memcg related stuff to the end of the file
mm: memcontrol: replace mem_cgroup_lruvec_online with mem_cgroup_online
mm: vmscan: pass memcg to get_scan_count()
mm: memcontrol: charge swap to cgroup2
mm: memcontrol: clean up alloc, online, offline, free functions
mm: memcontrol: flatten struct cg_proto
mm: memcontrol: rein in the CONFIG space madness
net: drop tcp_memcontrol.c
mm: memcontrol: introduce CONFIG_MEMCG_LEGACY_KMEM
mm: memcontrol: allow to disable kmem accounting for cgroup2
mm: memcontrol: account "kmem" consumers in cgroup2 memory controller
mm: memcontrol: move kmem accounting code to CONFIG_MEMCG
mm: memcontrol: separate kmem code from legacy tcp accounting code
...
Diffstat (limited to 'arch/arc/include/asm/dma-mapping.h')
-rw-r--r-- | arch/arc/include/asm/dma-mapping.h | 187 |
1 files changed, 3 insertions, 184 deletions
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 2d28ba9..6602054 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -11,192 +11,11 @@ #ifndef ASM_ARC_DMA_MAPPING_H #define ASM_ARC_DMA_MAPPING_H -#include <asm-generic/dma-coherent.h> -#include <asm/cacheflush.h> +extern struct dma_map_ops arc_dma_ops; -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle); - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - -void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, - dma_addr_t dma_handle); - -/* drivers/base/dma-mapping.c */ -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); -extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size); - -#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) -#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) - -/* - * streaming DMA Mapping API... - * CPU accesses page via normal paddr, thus needs to explicitly made - * consistent before each use - */ - -static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size, - enum dma_data_direction dir) -{ - switch (dir) { - case DMA_FROM_DEVICE: - dma_cache_inv(paddr, size); - break; - case DMA_TO_DEVICE: - dma_cache_wback(paddr, size); - break; - case DMA_BIDIRECTIONAL: - dma_cache_wback_inv(paddr, size); - break; - default: - pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); - } -} - -void __arc_dma_cache_sync(unsigned long paddr, size_t size, - enum dma_data_direction dir); - -#define _dma_cache_sync(addr, sz, dir) \ -do { \ - if (__builtin_constant_p(dir)) \ - __inline_dma_cache_sync(addr, sz, dir); \ - else \ - __arc_dma_cache_sync(addr, sz, dir); \ -} \ -while (0); - -static inline dma_addr_t -dma_map_single(struct device *dev, void *cpu_addr, size_t size, - enum dma_data_direction dir) -{ - _dma_cache_sync((unsigned long)cpu_addr, size, dir); - return (dma_addr_t)cpu_addr; -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir) -{ -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - unsigned long paddr = page_to_phys(page) + offset; - return dma_map_single(dev, (void *)paddr, size, dir); -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - s->dma_address = dma_map_page(dev, sg_page(s), s->offset, - s->length, dir); - - return nents; -} - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ - _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ - _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); -} - -static inline int dma_supported(struct device *dev, u64 dma_mask) -{ - /* Support 32 bit DMA mask exclusively */ - return dma_mask == DMA_BIT_MASK(32); -} - -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; + return &arc_dma_ops; } #endif |