summaryrefslogtreecommitdiff
path: root/arch/mips/jazz/jazzdma.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-08-17 16:51:47 +0200
committerChristoph Hellwig <hch@lst.de>2020-09-11 08:14:57 +0200
commit170780be324da2c32ec0f7e9de2ebd0c4370ade5 (patch)
treeb306de992ac6d8621c7426b72f2bb8008f32265d /arch/mips/jazz/jazzdma.c
parenta4877c44a48e25f98f767a0fd54acacac55d1aac (diff)
downloadlwn-170780be324da2c32ec0f7e9de2ebd0c4370ade5.tar.gz
lwn-170780be324da2c32ec0f7e9de2ebd0c4370ade5.zip
MIPS/jazzdma: decouple from dma-direct
The jazzdma ops implement support for a very basic IOMMU. Thus we really should not use the dma-direct code that takes physical address limits into account. This survived through the great MIPS DMA ops cleanup mostly because I was lazy, but now it is time to fully split the implementations. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Diffstat (limited to 'arch/mips/jazz/jazzdma.c')
-rw-r--r--arch/mips/jazz/jazzdma.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index fe40dbed04c1..dab4d058cea9 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -16,7 +16,6 @@
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
-#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <asm/mipsregs.h>
#include <asm/jazz.h>
@@ -492,26 +491,39 @@ int vdma_get_enable(int channel)
static void *jazz_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
+ struct page *page;
void *ret;
- ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
- if (!ret)
- return NULL;
+ if (attrs & DMA_ATTR_NO_WARN)
+ gfp |= __GFP_NOWARN;
- *dma_handle = vdma_alloc(virt_to_phys(ret), size);
- if (*dma_handle == DMA_MAPPING_ERROR) {
- dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
+ size = PAGE_ALIGN(size);
+ page = alloc_pages(gfp, get_order(size));
+ if (!page)
return NULL;
- }
+ ret = page_address(page);
+ memset(ret, 0, size);
+ *dma_handle = vdma_alloc(virt_to_phys(ret), size);
+ if (*dma_handle == DMA_MAPPING_ERROR)
+ goto out_free_pages;
+
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
+ return ret;
+ arch_dma_prep_coherent(page, size);
+ return (void *)(UNCAC_BASE + __pa(ret));
- return ret;
+out_free_pages:
+ __free_pages(page, get_order(size));
+ return NULL;
}
static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
vdma_free(dma_handle);
- dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs);
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
+ vaddr = __va(vaddr - UNCAC_BASE);
+ __free_pages(virt_to_page(vaddr), get_order(size));
}
static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
@@ -608,7 +620,6 @@ const struct dma_map_ops jazz_dma_ops = {
.sync_single_for_device = jazz_dma_sync_single_for_device,
.sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
.sync_sg_for_device = jazz_dma_sync_sg_for_device,
- .dma_supported = dma_direct_supported,
.cache_sync = arch_dma_cache_sync,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,