diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-04-29 23:01:10 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 11:20:42 -0700 |
commit | 8e6a930bb3ea6aa4b623eececc25465d09ee7b13 (patch) | |
tree | 93020fa4dfe0a12ef841f2a732b323e02663d707 /mm/page_alloc.c | |
parent | 1587db62d8c0dbd943752f657b452213e1c4d8d4 (diff) | |
download | lwn-8e6a930bb3ea6aa4b623eececc25465d09ee7b13.tar.gz lwn-8e6a930bb3ea6aa4b623eececc25465d09ee7b13.zip |
mm/page_alloc: rename alloc_mask to alloc_gfp
Patch series "Rationalise __alloc_pages wrappers", v3.
I was poking around the __alloc_pages variants trying to understand why
they each exist, and couldn't really find a good justification for keeping
__alloc_pages and __alloc_pages_nodemask as separate functions. That led
to getting rid of alloc_pages_current() and then I noticed the
documentation was bad, and then I noticed the mempolicy documentation
wasn't included.
Anyway, this is all cleanups & doc fixes.
This patch (of 7):
We have two masks involved -- the nodemask and the gfp mask, so alloc_mask
is an unclear name.
Link: https://lkml.kernel.org/r/20210225150642.2582252-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c73557bb8c58..5932a95830dd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4966,7 +4966,7 @@ got_pg: static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, - struct alloc_context *ac, gfp_t *alloc_mask, + struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) { ac->highest_zoneidx = gfp_zone(gfp_mask); @@ -4975,7 +4975,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, ac->migratetype = gfp_migratetype(gfp_mask); if (cpusets_enabled()) { - *alloc_mask |= __GFP_HARDWALL; + *alloc_gfp |= __GFP_HARDWALL; /* * When we are in the interrupt context, it is irrelevant * to the current task context. It means that any node ok. @@ -5019,7 +5019,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, { struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; - gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ + gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ struct alloc_context ac = { }; /* @@ -5032,8 +5032,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, } gfp_mask &= gfp_allowed_mask; - alloc_mask = gfp_mask; - if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) + alloc_gfp = gfp_mask; + if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, + &alloc_gfp, &alloc_flags)) return NULL; /* @@ -5043,7 +5044,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); /* First allocation attempt */ - page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); + page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); if (likely(page)) goto out; @@ -5053,7 +5054,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, * from a particular context which has been marked by * memalloc_no{fs,io}_{save,restore}. */ - alloc_mask = current_gfp_context(gfp_mask); + alloc_gfp = current_gfp_context(gfp_mask); ac.spread_dirty_pages = false; /* @@ -5062,7 +5063,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, */ ac.nodemask = nodemask; - page = __alloc_pages_slowpath(alloc_mask, order, &ac); + page = __alloc_pages_slowpath(alloc_gfp, order, &ac); out: if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && @@ -5071,7 +5072,7 @@ out: page = NULL; } - trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); + trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); return page; } |