summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorOscar Salvador <osalvador@suse.de>2021-05-04 18:35:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-05-05 11:27:22 -0700
commitd3d99fcc4e28f1a613744608c289d4f18b60b12f (patch)
tree7744457271b430c9abbc2d30e5d5c04a00b4e61f /mm/hugetlb.c
parent9f27b34f234da7a185b4f1a2aa2cea2c47c458bf (diff)
downloadlwn-d3d99fcc4e28f1a613744608c289d4f18b60b12f.tar.gz
lwn-d3d99fcc4e28f1a613744608c289d4f18b60b12f.zip
mm,hugetlb: split prep_new_huge_page functionality
Currently, prep_new_huge_page() performs two functions. It sets the right state for a new hugetlb, and increases the hstate's counters to account for the new page. Let us split its functionality into two separate functions, decoupling the handling of the counters from initializing a hugepage. The outcome is having __prep_new_huge_page(), which only initializes the page , and __prep_account_new_huge_page(), which adds the new page to the hstate's counters. This allows us to be able to set a hugetlb without having to worry about the counter/locking. It will prove useful in the next patch. prep_new_huge_page() still calls both functions. Link: https://lkml.kernel.org/r/20210419075413.1064-5-osalvador@suse.de Signed-off-by: Oscar Salvador <osalvador@suse.de> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c1539fb95f51..63760be2688e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1484,16 +1484,30 @@ void free_huge_page(struct page *page)
}
}
-static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+/*
+ * Must be called with the hugetlb lock held
+ */
+static void __prep_account_new_huge_page(struct hstate *h, int nid)
+{
+ lockdep_assert_held(&hugetlb_lock);
+ h->nr_huge_pages++;
+ h->nr_huge_pages_node[nid]++;
+}
+
+static void __prep_new_huge_page(struct page *page)
{
INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
hugetlb_set_page_subpool(page, NULL);
set_hugetlb_cgroup(page, NULL);
set_hugetlb_cgroup_rsvd(page, NULL);
+}
+
+static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+{
+ __prep_new_huge_page(page);
spin_lock_irq(&hugetlb_lock);
- h->nr_huge_pages++;
- h->nr_huge_pages_node[nid]++;
+ __prep_account_new_huge_page(h, nid);
spin_unlock_irq(&hugetlb_lock);
}