diff options
author | Barry Song <song.bao.hua@hisilicon.com> | 2020-07-23 21:15:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-07-24 12:42:41 -0700 |
commit | dbda8feadfa46b3d8dd7a2304f84ccbc036effe9 (patch) | |
tree | cd57ca585afbf0aed49f65effb9738e542630414 | |
parent | d38a2b7a9c939e6d7329ab92b96559ccebf7b135 (diff) | |
download | lwn-dbda8feadfa46b3d8dd7a2304f84ccbc036effe9.tar.gz lwn-dbda8feadfa46b3d8dd7a2304f84ccbc036effe9.zip |
mm/hugetlb: avoid hardcoding while checking if cma is enabled
hugetlb_cma[0] can be NULL due to various reasons, for example, node0
has no memory. so NULL hugetlb_cma[0] doesn't necessarily mean cma is
not enabled. gigantic pages might have been reserved on other nodes.
This patch fixes possible double reservation and CMA leak.
[akpm@linux-foundation.org: fix CONFIG_CMA=n warning]
[sfr@canb.auug.org.au: better checks before using hugetlb_cma]
Link: http://lkml.kernel.org/r/20200721205716.6dbaa56b@canb.auug.org.au
Fixes: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma")
Signed-off-by: Barry Song <song.bao.hua@hisilicon.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Roman Gushchin <guro@fb.com>
Cc: Jonathan Cameron <jonathan.cameron@huawei.com>
Cc: <stable@vger.kernel.org>
Link: http://lkml.kernel.org/r/20200710005726.36068-1-song.bao.hua@hisilicon.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/hugetlb.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fab4485b9e52..590111ea6975 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -45,7 +45,10 @@ int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; +#ifdef CONFIG_CMA static struct cma *hugetlb_cma[MAX_NUMNODES]; +#endif +static unsigned long hugetlb_cma_size __initdata; /* * Minimum page order among possible hugepage sizes, set to a proper value @@ -1235,9 +1238,10 @@ static void free_gigantic_page(struct page *page, unsigned int order) * If the page isn't allocated using the cma allocator, * cma_release() returns false. */ - if (IS_ENABLED(CONFIG_CMA) && - cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) +#ifdef CONFIG_CMA + if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) return; +#endif free_contig_range(page_to_pfn(page), 1 << order); } @@ -1248,7 +1252,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, { unsigned long nr_pages = 1UL << huge_page_order(h); - if (IS_ENABLED(CONFIG_CMA)) { +#ifdef CONFIG_CMA + { struct page *page; int node; @@ -1262,6 +1267,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, return page; } } +#endif return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); } @@ -2571,7 +2577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) for (i = 0; i < h->max_huge_pages; ++i) { if (hstate_is_gigantic(h)) { - if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) { + if (hugetlb_cma_size) { pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); break; } @@ -5654,7 +5660,6 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) } #ifdef CONFIG_CMA -static unsigned long hugetlb_cma_size __initdata; static bool cma_reserve_called __initdata; static int __init cmdline_parse_hugetlb_cma(char *p) |