From dff8d6c1ed584de65aac40494d3e7468c50980c3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 16 Aug 2018 15:30:39 +0300 Subject: swiotlb: remove the overflow buffer Like all other dma mapping drivers just return an error code instead of an actual memory buffer. The reason for the overflow buffer was that at the time swiotlb was invented there was no way to check for dma mapping errors, but this has long been fixed. Signed-off-by: Christoph Hellwig Acked-by: Catalin Marinas Reviewed-by: Robin Murphy Reviewed-by: Konrad Rzeszutek Wilk --- arch/arm64/mm/dma-mapping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/mm/dma-mapping.c') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 072c51fb07d7..8d91b927e09e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -324,7 +324,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) { if (swiotlb) - return swiotlb_dma_mapping_error(hwdev, addr); + return dma_direct_mapping_error(hwdev, addr); return 0; } -- cgit v1.2.3 From fafadcd16595c1df82df399f62421718ec9bf70a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 30 Sep 2018 16:13:33 -0700 Subject: swiotlb: don't dip into swiotlb pool for coherent allocations All architectures that support swiotlb also have a zone that backs up these less than full addressing allocations (usually ZONE_DMA32). Because of that it is rather pointless to fall back to the global swiotlb buffer if the normal dma direct allocation failed - the only thing this will do is to eat up bounce buffers that would be more useful to serve streaming mappings. Signed-off-by: Christoph Hellwig Acked-by: Catalin Marinas Acked-by: Konrad Rzeszutek Wilk --- arch/arm64/mm/dma-mapping.c | 6 +-- include/linux/swiotlb.h | 5 --- kernel/dma/swiotlb.c | 105 +------------------------------------------- 3 files changed, 5 insertions(+), 111 deletions(-) (limited to 'arch/arm64/mm/dma-mapping.c') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 8d91b927e09e..eee6cfcfde9e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -112,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return addr; } - ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs); + ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; @@ -133,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return coherent_ptr; no_map: - swiotlb_free(dev, size, ptr, *dma_handle, attrs); + dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs); no_mem: return NULL; } @@ -151,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size, return; vunmap(vaddr); } - swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs); + dma_direct_free_pages(dev, size, swiotlb_addr, dma_handle, attrs); } static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index f847c1b265c4..a387b59640a4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -67,11 +67,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev, /* Accessory functions. */ -void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, - gfp_t flags, unsigned long attrs); -void swiotlb_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs); - extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 57507b18caa4..1a01b0ac0a5e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -622,78 +622,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, } } -static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, - size_t size) -{ - u64 mask = DMA_BIT_MASK(32); - - if (dev && dev->coherent_dma_mask) - mask = dev->coherent_dma_mask; - return addr + size - 1 <= mask; -} - -static void * -swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, - unsigned long attrs) -{ - phys_addr_t phys_addr; - - if (swiotlb_force == SWIOTLB_NO_FORCE) - goto out_warn; - - phys_addr = swiotlb_tbl_map_single(dev, - __phys_to_dma(dev, io_tlb_start), - 0, size, DMA_FROM_DEVICE, attrs); - if (phys_addr == SWIOTLB_MAP_ERROR) - goto out_warn; - - *dma_handle = __phys_to_dma(dev, phys_addr); - if (!dma_coherent_ok(dev, *dma_handle, size)) - goto out_unmap; - - memset(phys_to_virt(phys_addr), 0, size); - return phys_to_virt(phys_addr); - -out_unmap: - dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)dev->coherent_dma_mask, - (unsigned long long)*dma_handle); - - /* - * DMA_TO_DEVICE to avoid memcpy in unmap_single. - * DMA_ATTR_SKIP_CPU_SYNC is optional. - */ - swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); -out_warn: - if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { - dev_warn(dev, - "swiotlb: coherent allocation failed, size=%zu\n", - size); - dump_stack(); - } - return NULL; -} - -static bool swiotlb_free_buffer(struct device *dev, size_t size, - dma_addr_t dma_addr) -{ - phys_addr_t phys_addr = dma_to_phys(dev, dma_addr); - - WARN_ON_ONCE(irqs_disabled()); - - if (!is_swiotlb_buffer(phys_addr)) - return false; - - /* - * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. - * DMA_ATTR_SKIP_CPU_SYNC is optional. - */ - swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - return true; -} - static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -926,39 +854,10 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask) return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; } -void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - void *vaddr; - - /* temporary workaround: */ - if (gfp & __GFP_NOWARN) - attrs |= DMA_ATTR_NO_WARN; - - /* - * Don't print a warning when the first allocation attempt fails. - * swiotlb_alloc_coherent() will print a warning when the DMA memory - * allocation ultimately failed. - */ - gfp |= __GFP_NOWARN; - - vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); - if (!vaddr) - vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); - return vaddr; -} - -void swiotlb_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs) -{ - if (!swiotlb_free_buffer(dev, size, dma_addr)) - dma_direct_free(dev, size, vaddr, dma_addr, attrs); -} - const struct dma_map_ops swiotlb_dma_ops = { .mapping_error = dma_direct_mapping_error, - .alloc = swiotlb_alloc, - .free = swiotlb_free, + .alloc = dma_direct_alloc, + .free = dma_direct_free, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, -- cgit v1.2.3 From 886643b766321f15f63bd950be618cbb6dd22bbc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 8 Oct 2018 09:12:01 +0200 Subject: arm64: use the generic swiotlb_dma_ops Now that the generic swiotlb code supports non-coherent DMA we can switch to it for arm64. For that we need to refactor the existing alloc/free/mmap/pgprot helpers to be used as the architecture hooks, and implement the standard arch_sync_dma_for_{device,cpu} hooks for cache maintaincance in the streaming dma hooks, which also implies using the generic dma_coherent flag in struct device. Note that we need to keep the old is_device_dma_coherent function around for now, so that the shared arm/arm64 Xen code keeps working. Signed-off-by: Christoph Hellwig Acked-by: Catalin Marinas --- arch/arm64/Kconfig | 4 + arch/arm64/include/asm/device.h | 1 - arch/arm64/include/asm/dma-mapping.h | 7 +- arch/arm64/mm/dma-mapping.c | 253 +++++++---------------------------- 4 files changed, 55 insertions(+), 210 deletions(-) (limited to 'arch/arm64/mm/dma-mapping.c') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b1a0e95c751..c4db5131d837 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -11,6 +11,8 @@ config ARM64 select ARCH_CLOCKSOURCE_DATA select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_COHERENT_TO_PFN + select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER @@ -24,6 +26,8 @@ config ARM64 select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 5a5fa47a6b18..3dd3d664c5c5 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -23,7 +23,6 @@ struct dev_archdata { #ifdef CONFIG_XEN const struct dma_map_ops *dev_dma_ops; #endif - bool dma_coherent; }; struct pdev_archdata { diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index b7847eb8a7bb..c41f3fb1446c 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -44,10 +44,13 @@ void arch_teardown_dma_ops(struct device *dev); #define arch_teardown_dma_ops arch_teardown_dma_ops #endif -/* do not use this function in a driver */ +/* + * Do not use this function in a driver, it is only provided for + * arch/arm/mm/xen.c, which is used by arm64 as well. + */ static inline bool is_device_dma_coherent(struct device *dev) { - return dev->archdata.dma_coherent; + return dev->dma_coherent; } #endif /* __KERNEL__ */ diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index eee6cfcfde9e..5a5d7990e980 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -32,16 +33,6 @@ #include -static int swiotlb __ro_after_init; - -static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, - bool coherent) -{ - if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) - return pgprot_writecombine(prot); - return prot; -} - static struct gen_pool *atomic_pool __ro_after_init; #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K @@ -91,18 +82,16 @@ static int __free_from_pool(void *start, size_t size) return 1; } -static void *__dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flags, unsigned long attrs) { struct page *page; void *ptr, *coherent_ptr; - bool coherent = is_device_dma_coherent(dev); - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); + pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); size = PAGE_ALIGN(size); - if (!coherent && !gfpflags_allow_blocking(flags)) { + if (!gfpflags_allow_blocking(flags)) { struct page *page = NULL; void *addr = __alloc_from_pool(size, &page, flags); @@ -116,10 +105,6 @@ static void *__dma_alloc(struct device *dev, size_t size, if (!ptr) goto no_mem; - /* no need for non-cacheable mapping if coherent */ - if (coherent) - return ptr; - /* remove any dirty cache lines on the kernel alias */ __dma_flush_area(ptr, size); @@ -138,125 +123,52 @@ no_mem: return NULL; } -static void __dma_free(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) +void arch_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) { - void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); + if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) { + void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle)); - size = PAGE_ALIGN(size); - - if (!is_device_dma_coherent(dev)) { - if (__free_from_pool(vaddr, size)) - return; vunmap(vaddr); + dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs); } - dma_direct_free_pages(dev, size, swiotlb_addr, dma_handle, attrs); } -static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) { - dma_addr_t dev_addr; - - dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); - if (!is_device_dma_coherent(dev) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); - - return dev_addr; + return __phys_to_pfn(dma_to_phys(dev, dma_addr)); } - -static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) { - if (!is_device_dma_coherent(dev) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); - swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); -} - -static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *sg; - int i, ret; - - ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); - if (!is_device_dma_coherent(dev) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - for_each_sg(sgl, sg, ret, i) - __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), - sg->length, dir); - - return ret; -} - -static void __swiotlb_unmap_sg_attrs(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - if (!is_device_dma_coherent(dev) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - for_each_sg(sgl, sg, nelems, i) - __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), - sg->length, dir); - swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); + if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) + return pgprot_writecombine(prot); + return prot; } -static void __swiotlb_sync_single_for_cpu(struct device *dev, - dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - if (!is_device_dma_coherent(dev)) - __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); - swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); + __dma_map_area(phys_to_virt(paddr), size, dir); } -static void __swiotlb_sync_single_for_device(struct device *dev, - dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - swiotlb_sync_single_for_device(dev, dev_addr, size, dir); - if (!is_device_dma_coherent(dev)) - __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); + __dma_unmap_area(phys_to_virt(paddr), size, dir); } -static void __swiotlb_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) +static int __swiotlb_get_sgtable_page(struct sg_table *sgt, + struct page *page, size_t size) { - struct scatterlist *sg; - int i; - - if (!is_device_dma_coherent(dev)) - for_each_sg(sgl, sg, nelems, i) - __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), - sg->length, dir); - swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); -} + int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); -static void __swiotlb_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; + if (!ret) + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); - swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); - if (!is_device_dma_coherent(dev)) - for_each_sg(sgl, sg, nelems, i) - __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), - sg->length, dir); + return ret; } static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, @@ -277,74 +189,6 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, return ret; } -static int __swiotlb_mmap(struct device *dev, - struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - int ret; - unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; - - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, - is_device_dma_coherent(dev)); - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - return __swiotlb_mmap_pfn(vma, pfn, size); -} - -static int __swiotlb_get_sgtable_page(struct sg_table *sgt, - struct page *page, size_t size) -{ - int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); - - if (!ret) - sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); - - return ret; -} - -static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) -{ - struct page *page = phys_to_page(dma_to_phys(dev, handle)); - - return __swiotlb_get_sgtable_page(sgt, page, size); -} - -static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) -{ - if (swiotlb) - return swiotlb_dma_supported(hwdev, mask); - return 1; -} - -static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) -{ - if (swiotlb) - return dma_direct_mapping_error(hwdev, addr); - return 0; -} - -static const struct dma_map_ops arm64_swiotlb_dma_ops = { - .alloc = __dma_alloc, - .free = __dma_free, - .mmap = __swiotlb_mmap, - .get_sgtable = __swiotlb_get_sgtable, - .map_page = __swiotlb_map_page, - .unmap_page = __swiotlb_unmap_page, - .map_sg = __swiotlb_map_sg_attrs, - .unmap_sg = __swiotlb_unmap_sg_attrs, - .sync_single_for_cpu = __swiotlb_sync_single_for_cpu, - .sync_single_for_device = __swiotlb_sync_single_for_device, - .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = __swiotlb_sync_sg_for_device, - .dma_supported = __swiotlb_dma_supported, - .mapping_error = __swiotlb_dma_mapping_error, -}; - static int __init atomic_pool_init(void) { pgprot_t prot = __pgprot(PROT_NORMAL_NC); @@ -500,10 +344,6 @@ EXPORT_SYMBOL(dummy_dma_ops); static int __init arm64_dma_init(void) { - if (swiotlb_force == SWIOTLB_FORCE || - max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) - swiotlb = 1; - WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(), TAINT_CPU_OUT_OF_SPEC, "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)", @@ -528,7 +368,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = dev_is_dma_coherent(dev); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); size_t iosize = size; void *addr; @@ -569,7 +409,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, addr = NULL; } } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); + pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); struct page *page; page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, @@ -596,7 +436,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, size >> PAGE_SHIFT); } } else { - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); + pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); struct page **pages; pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot, @@ -658,8 +498,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, struct vm_struct *area; int ret; - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, - is_device_dma_coherent(dev)); + vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; @@ -709,11 +548,11 @@ static void __iommu_sync_single_for_cpu(struct device *dev, { phys_addr_t phys; - if (is_device_dma_coherent(dev)) + if (dev_is_dma_coherent(dev)) return; phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); - __dma_unmap_area(phys_to_virt(phys), size, dir); + arch_sync_dma_for_cpu(dev, phys, size, dir); } static void __iommu_sync_single_for_device(struct device *dev, @@ -722,11 +561,11 @@ static void __iommu_sync_single_for_device(struct device *dev, { phys_addr_t phys; - if (is_device_dma_coherent(dev)) + if (dev_is_dma_coherent(dev)) return; phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); - __dma_map_area(phys_to_virt(phys), size, dir); + arch_sync_dma_for_device(dev, phys, size, dir); } static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, @@ -734,7 +573,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = dev_is_dma_coherent(dev); int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); @@ -762,11 +601,11 @@ static void __iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg; int i; - if (is_device_dma_coherent(dev)) + if (dev_is_dma_coherent(dev)) return; for_each_sg(sgl, sg, nelems, i) - __dma_unmap_area(sg_virt(sg), sg->length, dir); + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); } static void __iommu_sync_sg_for_device(struct device *dev, @@ -776,18 +615,18 @@ static void __iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg; int i; - if (is_device_dma_coherent(dev)) + if (dev_is_dma_coherent(dev)) return; for_each_sg(sgl, sg, nelems, i) - __dma_map_area(sg_virt(sg), sg->length, dir); + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); } static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = dev_is_dma_coherent(dev); if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_sg_for_device(dev, sgl, nelems, dir); @@ -879,9 +718,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { if (!dev->dma_ops) - dev->dma_ops = &arm64_swiotlb_dma_ops; + dev->dma_ops = &swiotlb_dma_ops; - dev->archdata.dma_coherent = coherent; + dev->dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); #ifdef CONFIG_XEN -- cgit v1.2.3