summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2022-05-09 18:20:44 -0700
committerakpm <akpm@linux-foundation.org>2022-05-09 18:20:44 -0700
commit7f5abe609b3dcbc62a36e18b1437f4f3521ecb75 (patch)
tree37cb472bda88c5b0d11248ea5f66564fc332a4c6 /mm
parent6c287605fd56466e645693eff3ae7c08fba56e0a (diff)
downloadlwn-7f5abe609b3dcbc62a36e18b1437f4f3521ecb75.tar.gz
lwn-7f5abe609b3dcbc62a36e18b1437f4f3521ecb75.zip
mm/rmap: fail try_to_migrate() early when setting a PMD migration entry fails
Let's fail right away in case we cannot clear PG_anon_exclusive because the anon THP may be pinned. Right now, we continue trying to install migration entries and the caller of try_to_migrate() will realize that the page is still mapped and has to restore the migration entries. Let's just fail fast just like for PTE migration entries. Link: https://lkml.kernel.org/r/20220428083441.37290-14-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Rientjes <rientjes@google.com> Cc: Don Dutile <ddutile@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Khalid Aziz <khalid.aziz@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Liang Zhang <zhangliang5@huawei.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Nadav Amit <namit@vmware.com> Cc: Oded Gabbay <oded.gabbay@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Pedro Demarchi Gomes <pedrodemargomes@gmail.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c8
-rw-r--r--mm/rmap.c6
2 files changed, 10 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 231911b7bf9d..14d7fa6dc793 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3066,7 +3066,7 @@ late_initcall(split_huge_pages_debugfs);
#endif
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
struct vm_area_struct *vma = pvmw->vma;
@@ -3078,7 +3078,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
pmd_t pmdswp;
if (!(pvmw->pmd && !pvmw->pte))
- return;
+ return 0;
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
@@ -3086,7 +3086,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
if (anon_exclusive && page_try_share_anon_rmap(page)) {
set_pmd_at(mm, address, pvmw->pmd, pmdval);
- return;
+ return -EBUSY;
}
if (pmd_dirty(pmdval))
@@ -3104,6 +3104,8 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
page_remove_rmap(page, vma, true);
put_page(page);
trace_set_migration_pmd(address, pmd_val(pmdswp));
+
+ return 0;
}
void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
diff --git a/mm/rmap.c b/mm/rmap.c
index 0d63e7ce35cc..f96cc7eb23ec 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1856,7 +1856,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
!folio_test_pmd_mappable(folio), folio);
- set_pmd_migration_entry(&pvmw, subpage);
+ if (set_pmd_migration_entry(&pvmw, subpage)) {
+ ret = false;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
continue;
}
#endif