diff options
author | Christoph Hellwig <hch@lst.de> | 2019-02-13 08:01:29 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-02-18 22:41:04 +1100 |
commit | 461db2bdbf3c978e76dd10a04a63fa06bb29114f (patch) | |
tree | 40cb4af74482f83549335f9d35f404bb1277b459 /arch/powerpc/include/asm/dma-mapping.h | |
parent | 31f940afda6add7a7bb182adde97e615e5355c6d (diff) | |
download | lwn-461db2bdbf3c978e76dd10a04a63fa06bb29114f.tar.gz lwn-461db2bdbf3c978e76dd10a04a63fa06bb29114f.zip |
powerpc/dma: use the dma_direct mapping routines
Switch the streaming DMA mapping and ownership transfer methods to the
functionally identical dma_direct_ versions. Factor the cache
maintainance helpers into the form expected by the common code for that.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/dma-mapping.h')
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 30 |
1 files changed, 0 insertions, 30 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index cdf70aaeafeb..4de9d4ee23c1 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -25,36 +25,6 @@ extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, extern void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs); -int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction, - unsigned long attrs); -dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs); - -#ifdef CONFIG_NOT_COHERENT_CACHE -/* - * DMA-consistent mapping functions for PowerPCs that don't support - * cache snooping. These allocate/free a region of uncached mapped - * memory space for use with DMA devices. Alternatively, you could - * allocate the space "normally" and use the cache management functions - * to ensure it is consistent. - */ -struct device; -extern void __dma_sync(void *vaddr, size_t size, int direction); -extern void __dma_sync_page(struct page *page, unsigned long offset, - size_t size, int direction); -extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); - -#else /* ! CONFIG_NOT_COHERENT_CACHE */ -/* - * Cache coherent cores. - */ - -#define __dma_sync(addr, size, rw) ((void)0) -#define __dma_sync_page(pg, off, sz, rw) ((void)0) - -#endif /* ! CONFIG_NOT_COHERENT_CACHE */ static inline unsigned long device_to_mask(struct device *dev) { |