diff options
author | Kefeng Wang <wangkefeng.wang@huawei.com> | 2024-08-27 19:47:28 +0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-09-03 21:15:59 -0700 |
commit | 6f1833b8208c3b9e59eff10792667b6639365146 (patch) | |
tree | b8dd951bc278866c543d81efb06448dcead7b378 /mm/memory_hotplug.c | |
parent | f1264e9531b0fbadf88e0b9c8143c546343b481c (diff) | |
download | lwn-6f1833b8208c3b9e59eff10792667b6639365146.tar.gz lwn-6f1833b8208c3b9e59eff10792667b6639365146.zip |
mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation
Use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU folio
isolation, which cleanup code a bit and save a few calls to
compound_head().
[wangkefeng.wang@huawei.com: various fixes]
Link: https://lkml.kernel.org/r/20240829150500.2599549-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20240827114728.3212578-6-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r-- | mm/memory_hotplug.c | 43 |
1 files changed, 15 insertions, 28 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 6bdac4cddd9f..4c6e3b181204 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1772,15 +1772,14 @@ found: static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { + struct folio *folio; unsigned long pfn; - struct page *page; LIST_HEAD(source); static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); for (pfn = start_pfn; pfn < end_pfn; pfn++) { - struct folio *folio; - bool isolated; + struct page *page; if (!pfn_valid(pfn)) continue; @@ -1811,34 +1810,21 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) continue; } - if (folio_test_hugetlb(folio)) { - isolate_hugetlb(folio, &source); + if (!folio_try_get(folio)) continue; - } - if (!get_page_unless_zero(page)) - continue; - /* - * We can skip free pages. And we can deal with pages on - * LRU and non-lru movable pages. - */ - if (PageLRU(page)) - isolated = isolate_lru_page(page); - else - isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE); - if (isolated) { - list_add_tail(&page->lru, &source); - if (!__PageMovable(page)) - inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_lru(page)); + if (unlikely(page_folio(page) != folio)) + goto put_folio; - } else { + if (!isolate_folio_to_list(folio, &source)) { if (__ratelimit(&migrate_rs)) { - pr_warn("failed to isolate pfn %lx\n", pfn); + pr_warn("failed to isolate pfn %lx\n", + page_to_pfn(page)); dump_page(page, "isolation failed"); } } - put_page(page); +put_folio: + folio_put(folio); } if (!list_empty(&source)) { nodemask_t nmask = node_states[N_MEMORY]; @@ -1853,7 +1839,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) * We have checked that migration range is on a single zone so * we can use the nid of the first page to all the others. */ - mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); + mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru)); /* * try to allocate from a different node but reuse this node @@ -1866,11 +1852,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ret = migrate_pages(&source, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL); if (ret) { - list_for_each_entry(page, &source, lru) { + list_for_each_entry(folio, &source, lru) { if (__ratelimit(&migrate_rs)) { pr_warn("migrating pfn %lx failed ret:%d\n", - page_to_pfn(page), ret); - dump_page(page, "migration failure"); + folio_pfn(folio), ret); + dump_page(&folio->page, + "migration failure"); } } putback_movable_pages(&source); |