summaryrefslogtreecommitdiff
path: root/kernel/dma
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-10-29 09:57:09 +0100
committerChristoph Hellwig <hch@lst.de>2019-11-07 17:25:40 +0100
commitacaade1af3587132e7ea585f470a95261e14f60c (patch)
tree032d046986cd66197cd24e63e252774886467424 /kernel/dma
parentb3d53f5fce5d07b4981f0f4f93e579e389a99b07 (diff)
downloadlwn-acaade1af3587132e7ea585f470a95261e14f60c.tar.gz
lwn-acaade1af3587132e7ea585f470a95261e14f60c.zip
dma-direct: remove __dma_direct_free_pages
We can just call dma_free_contiguous directly instead of wrapping it. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/direct.c11
-rw-r--r--kernel/dma/remap.c4
2 files changed, 5 insertions, 10 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 8402b29c280f..a7a2739fb747 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -153,7 +153,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
* so log an error and fail.
*/
dev_info(dev, "Rejecting highmem page from CMA.\n");
- __dma_direct_free_pages(dev, size, page);
+ dma_free_contiguous(dev, page, size);
return NULL;
}
@@ -175,11 +175,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
return ret;
}
-void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
-{
- dma_free_contiguous(dev, page, size);
-}
-
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
@@ -188,7 +183,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
- __dma_direct_free_pages(dev, size, cpu_addr);
+ dma_free_contiguous(dev, cpu_addr, size);
return;
}
@@ -198,7 +193,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
dma_alloc_need_uncached(dev, attrs))
cpu_addr = cached_kernel_address(cpu_addr);
- __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
+ dma_free_contiguous(dev, virt_to_page(cpu_addr), size);
}
void *dma_direct_alloc(struct device *dev, size_t size,
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index c00b9258fa6a..fb1e50c2d48a 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -238,7 +238,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
- __dma_direct_free_pages(dev, size, page);
+ dma_free_contiguous(dev, page, size);
return ret;
}
@@ -256,7 +256,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
struct page *page = pfn_to_page(__phys_to_pfn(phys));
vunmap(vaddr);
- __dma_direct_free_pages(dev, size, page);
+ dma_free_contiguous(dev, page, size);
}
}