summaryrefslogtreecommitdiff
path: root/include/linux/dma-iommu.h
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-04-13 17:29:10 +0100
committerJoerg Roedel <jroedel@suse.de>2016-05-09 15:33:29 +0200
commit3b6b7e19e31a816ee02a8d4372cbea9ad7db3784 (patch)
tree28196392e6972feaf5ead20f9ecaf8e9af7ca140 /include/linux/dma-iommu.h
parentd16e0faab911cc0e100a1e8e93635b432566608e (diff)
downloadlwn-3b6b7e19e31a816ee02a8d4372cbea9ad7db3784.tar.gz
lwn-3b6b7e19e31a816ee02a8d4372cbea9ad7db3784.zip
iommu/dma: Finish optimising higher-order allocations
Now that we know exactly which page sizes our caller wants to use in the given domain, we can restrict higher-order allocation attempts to just those sizes, if any, and avoid wasting any time or effort on other sizes which offer no benefit. In the same vein, this also lets us accommodate a minimum order greater than 0 for special cases. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Will Deacon <will.deacon@arm.com> Tested-by: Yong Wu <yong.wu@mediatek.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'include/linux/dma-iommu.h')
-rw-r--r--include/linux/dma-iommu.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index fc481037478a..8443bbb5c071 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
* These implement the bulk of the relevant DMA mapping callbacks, but require
* the arch code to take care of attributes and cache maintenance
*/
-struct page **iommu_dma_alloc(struct device *dev, size_t size,
- gfp_t gfp, int prot, dma_addr_t *handle,
+struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
+ struct dma_attrs *attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle);