diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-25 13:45:43 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-25 13:45:43 -0800 |
commit | ac1820fb286b552b6885d40ab34f1e59b815f1f1 (patch) | |
tree | b9b4e6dc5df8574e6875e4e2f5f27105addc7812 /arch/tile | |
parent | edccb59429657b09806146339e2b27594c1d1da0 (diff) | |
parent | 0bbb3b7496eabb6779962a998a8a91f4a8e589ff (diff) | |
download | op-kernel-dev-ac1820fb286b552b6885d40ab34f1e59b815f1f1.zip op-kernel-dev-ac1820fb286b552b6885d40ab34f1e59b815f1f1.tar.gz |
Merge tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma DMA mapping updates from Doug Ledford:
"Drop IB DMA mapping code and use core DMA code instead.
Bart Van Assche noted that the ib DMA mapping code was significantly
similar enough to the core DMA mapping code that with a few changes it
was possible to remove the IB DMA mapping code entirely and switch the
RDMA stack to use the core DMA mapping code.
This resulted in a nice set of cleanups, but touched the entire tree
and has been kept separate for that reason."
* tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (37 commits)
IB/rxe, IB/rdmavt: Use dma_virt_ops instead of duplicating it
IB/core: Remove ib_device.dma_device
nvme-rdma: Switch from dma_device to dev.parent
RDS: net: Switch from dma_device to dev.parent
IB/srpt: Modify a debug statement
IB/srp: Switch from dma_device to dev.parent
IB/iser: Switch from dma_device to dev.parent
IB/IPoIB: Switch from dma_device to dev.parent
IB/rxe: Switch from dma_device to dev.parent
IB/vmw_pvrdma: Switch from dma_device to dev.parent
IB/usnic: Switch from dma_device to dev.parent
IB/qib: Switch from dma_device to dev.parent
IB/qedr: Switch from dma_device to dev.parent
IB/ocrdma: Switch from dma_device to dev.parent
IB/nes: Remove a superfluous assignment statement
IB/mthca: Switch from dma_device to dev.parent
IB/mlx5: Switch from dma_device to dev.parent
IB/mlx4: Switch from dma_device to dev.parent
IB/i40iw: Remove a superfluous assignment statement
IB/hns: Switch from dma_device to dev.parent
...
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/include/asm/device.h | 3 | ||||
-rw-r--r-- | arch/tile/include/asm/dma-mapping.h | 20 | ||||
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 24 |
3 files changed, 18 insertions, 29 deletions
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h index 6ab8bf1..1cf4542 100644 --- a/arch/tile/include/asm/device.h +++ b/arch/tile/include/asm/device.h @@ -17,9 +17,6 @@ #define _ASM_TILE_DEVICE_H struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; - /* Offset of the DMA address from the PA. */ dma_addr_t dma_offset; diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 01ceb4a..bbc71a2 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -24,17 +24,14 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK #endif -extern struct dma_map_ops *tile_dma_map_ops; -extern struct dma_map_ops *gx_pci_dma_map_ops; -extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; -extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops; +extern const struct dma_map_ops *tile_dma_map_ops; +extern const struct dma_map_ops *gx_pci_dma_map_ops; +extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops; +extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; - else - return tile_dma_map_ops; + return tile_dma_map_ops; } static inline dma_addr_t get_dma_offset(struct device *dev) @@ -59,11 +56,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) -{ - dev->archdata.dma_ops = ops; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 24e0f8c..569bb6d 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -329,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops tile_default_dma_map_ops = { +static const struct dma_map_ops tile_default_dma_map_ops = { .alloc = tile_dma_alloc_coherent, .free = tile_dma_free_coherent, .map_page = tile_dma_map_page, @@ -344,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = { .dma_supported = tile_dma_supported }; -struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; +const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; EXPORT_SYMBOL(tile_dma_map_ops); /* Generic PCI DMA mapping functions */ @@ -516,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops tile_pci_default_dma_map_ops = { +static const struct dma_map_ops tile_pci_default_dma_map_ops = { .alloc = tile_pci_dma_alloc_coherent, .free = tile_pci_dma_free_coherent, .map_page = tile_pci_dma_map_page, @@ -531,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = { .dma_supported = tile_pci_dma_supported }; -struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; +const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; EXPORT_SYMBOL(gx_pci_dma_map_ops); /* PCI DMA mapping functions for legacy PCI devices */ @@ -552,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_addr); } -static struct dma_map_ops pci_swiotlb_dma_ops = { +static const struct dma_map_ops pci_swiotlb_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = swiotlb_map_page, @@ -567,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, }; -static struct dma_map_ops pci_hybrid_dma_ops = { +static const struct dma_map_ops pci_hybrid_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = tile_pci_dma_map_page, @@ -582,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = { .dma_supported = tile_pci_dma_supported }; -struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; -struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; +const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; +const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; #else -struct dma_map_ops *gx_legacy_pci_dma_map_ops; -struct dma_map_ops *gx_hybrid_pci_dma_map_ops; +const struct dma_map_ops *gx_legacy_pci_dma_map_ops; +const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; #endif EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); /* * For PCI devices with 64-bit DMA addressing capability, promote @@ -623,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask); #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK int dma_set_coherent_mask(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); /* * For PCI devices with 64-bit DMA addressing capability, promote |