diff options
| author | Lorenzo Stoakes (Oracle) <ljs@kernel.org> | 2026-03-20 18:07:27 +0000 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2026-04-05 13:53:46 -0700 |
| commit | f87854c9091014207ecb5bc108810ff3e4dbb08f (patch) | |
| tree | 186726afcadc43ba55bd5f04ab619bc8460fc45d /mm | |
| parent | 1c6b7ff60bd477bb73b737e2955c0ad49cffd7ca (diff) | |
| download | lwn-f87854c9091014207ecb5bc108810ff3e4dbb08f.tar.gz lwn-f87854c9091014207ecb5bc108810ff3e4dbb08f.zip | |
mm/huge_memory: separate out the folio part of zap_huge_pmd()
Place the part of the logic that manipulates counters and possibly updates
the accessed bit of the folio into its own function to make zap_huge_pmd()
more readable.
Also rename flush_needed to is_present as we only require a flush for
present entries.
Additionally add comments as to why we're doing what we're doing with
respect to softleaf entries.
This also lays the ground for further refactoring.
Link: https://lkml.kernel.org/r/6c4db67952f5529da4db102a6149b9050b5dda4e.1774029655.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nico Pache <npache@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/huge_memory.c | 61 |
1 files changed, 35 insertions, 26 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c515e293ed48..d1e66df05c86 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2402,6 +2402,37 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) mm_dec_nr_ptes(mm); } +static void zap_huge_pmd_folio(struct mm_struct *mm, struct vm_area_struct *vma, + pmd_t pmdval, struct folio *folio, bool is_present, + bool *has_deposit) +{ + const bool is_device_private = folio_is_device_private(folio); + + /* Present and device private folios are rmappable. */ + if (is_present || is_device_private) + folio_remove_rmap_pmd(folio, &folio->page, vma); + + if (folio_test_anon(folio)) { + *has_deposit = true; + add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR); + } else { + add_mm_counter(mm, mm_counter_file(folio), + -HPAGE_PMD_NR); + + /* + * Use flush_needed to indicate whether the PMD entry + * is present, instead of checking pmd_present() again. + */ + if (is_present && pmd_young(pmdval) && + likely(vma_has_recency(vma))) + folio_mark_accessed(folio); + } + + /* Device private folios are pinned. */ + if (is_device_private) + folio_put(folio); +} + /** * zap_huge_pmd - Zap a huge THP which is of PMD size. * @tlb: The MMU gather TLB state associated with the operation. @@ -2417,7 +2448,7 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, bool has_deposit = arch_needs_pgtable_deposit(); struct mm_struct *mm = tlb->mm; struct folio *folio = NULL; - bool flush_needed = false; + bool is_present = false; spinlock_t *ptl; pmd_t orig_pmd; @@ -2446,14 +2477,11 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_present(orig_pmd)) { folio = pmd_folio(orig_pmd); - - flush_needed = true; - folio_remove_rmap_pmd(folio, &folio->page, vma); + is_present = true; } else if (pmd_is_valid_softleaf(orig_pmd)) { const softleaf_t entry = softleaf_from_pmd(orig_pmd); folio = softleaf_to_folio(entry); - if (!thp_migration_supported()) WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); } else { @@ -2461,33 +2489,14 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, goto out; } - if (folio_test_anon(folio)) { - has_deposit = true; - add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR); - } else { - add_mm_counter(mm, mm_counter_file(folio), - -HPAGE_PMD_NR); - - /* - * Use flush_needed to indicate whether the PMD entry - * is present, instead of checking pmd_present() again. - */ - if (flush_needed && pmd_young(orig_pmd) && - likely(vma_has_recency(vma))) - folio_mark_accessed(folio); - } - - if (folio_is_device_private(folio)) { - folio_remove_rmap_pmd(folio, &folio->page, vma); - folio_put(folio); - } + zap_huge_pmd_folio(mm, vma, orig_pmd, folio, is_present, &has_deposit); out: if (has_deposit) zap_deposited_table(mm, pmd); spin_unlock(ptl); - if (flush_needed) + if (is_present) tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); return true; } |
