diff options
author | Christoph Hellwig <hch@lst.de> | 2019-08-20 11:45:49 +0900 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2019-08-21 07:14:10 +0900 |
commit | 90ae409f9eb3bcaf38688f9ec22375816053a08e (patch) | |
tree | 9ab3713cac2e657fc87fa8108a9dfa2b67f4d439 /kernel/dma/contiguous.c | |
parent | 936376f88ff1845b384b3a82b9cd167e53039229 (diff) | |
download | lwn-90ae409f9eb3bcaf38688f9ec22375816053a08e.tar.gz lwn-90ae409f9eb3bcaf38688f9ec22375816053a08e.zip |
dma-direct: fix zone selection after an unaddressable CMA allocation
The new dma_alloc_contiguous hides if we allocate CMA or regular
pages, and thus fails to retry a ZONE_NORMAL allocation if the CMA
allocation succeeds but isn't addressable. That means we either fail
outright or dip into a small zone that might not succeed either.
Thanks to Hillf Danton for debugging this issue.
Fixes: b1d2dc009dec ("dma-contiguous: add dma_{alloc,free}_contiguous() helpers")
Reported-by: Tobias Klausmann <tobias.johannes.klausmann@mni.thm.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Tobias Klausmann <tobias.johannes.klausmann@mni.thm.de>
Diffstat (limited to 'kernel/dma/contiguous.c')
-rw-r--r-- | kernel/dma/contiguous.c | 8 |
1 files changed, 2 insertions, 6 deletions
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index 2bd410f934b3..69cfb4345388 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, */ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) { - int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; - size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT; - size_t align = get_order(PAGE_ALIGN(size)); + size_t count = size >> PAGE_SHIFT; struct page *page = NULL; struct cma *cma = NULL; @@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) /* CMA can be used only in the context which permits sleeping */ if (cma && gfpflags_allow_blocking(gfp)) { + size_t align = get_order(size); size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); } - /* Fallback allocation of normal pages */ - if (!page) - page = alloc_pages_node(node, gfp, align); return page; } |