diff options
author | Christoph Hellwig <hch@lst.de> | 2020-09-01 13:59:45 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2020-09-25 06:20:47 +0200 |
commit | de7cf917768f438aae6d2f4e9bced3739f15f5b6 (patch) | |
tree | ded43ac607a6825a0e1d73e1b199568b005f7905 | |
parent | efa70f2fdc842e63a0a13223e0e83cedcc2117f1 (diff) | |
download | lwn-de7cf917768f438aae6d2f4e9bced3739f15f5b6.tar.gz lwn-de7cf917768f438aae6d2f4e9bced3739f15f5b6.zip |
dma-mapping: add new {alloc,free}_noncoherent dma_map_ops methods
This will allow IOMMU drivers to allocate non-contigous memory and
return a vmapped virtual address.
Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r-- | include/linux/dma-mapping.h | 5 | ||||
-rw-r--r-- | kernel/dma/mapping.c | 33 |
2 files changed, 32 insertions, 6 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4b9b1d64f5ec..7c77cd6f3604 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -74,6 +74,11 @@ struct dma_map_ops { gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir); + void* (*alloc_noncoherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, + gfp_t gfp); + void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, enum dma_data_direction dir); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, unsigned long attrs); diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 06115f59f4ff..9669550656a0 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -513,19 +513,40 @@ EXPORT_SYMBOL_GPL(dma_free_pages); void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) { - struct page *page; + const struct dma_map_ops *ops = get_dma_ops(dev); + void *vaddr; - page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); - if (!page) - return NULL; - return page_address(page); + if (!ops || !ops->alloc_noncoherent) { + struct page *page; + + page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); + if (!page) + return NULL; + return page_address(page); + } + + size = PAGE_ALIGN(size); + vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp); + if (vaddr) + debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir, + *dma_handle); + return vaddr; } EXPORT_SYMBOL_GPL(dma_alloc_noncoherent); void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) { - dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (!ops || !ops->free_noncoherent) { + dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); + return; + } + + size = PAGE_ALIGN(size); + debug_dma_unmap_page(dev, dma_handle, size, dir); + ops->free_noncoherent(dev, size, vaddr, dma_handle, dir); } EXPORT_SYMBOL_GPL(dma_free_noncoherent); |