summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLorenzo Stoakes (Oracle) <ljs@kernel.org>2026-03-20 18:07:19 +0000
committerAndrew Morton <akpm@linux-foundation.org>2026-04-05 13:53:45 -0700
commit6886f93790b3c1935bfb9e668a7c3f68d7eff510 (patch)
treefe8047ccdb204ff5d0cbf7e3f332b144ba689f13 /mm
parentc0ea52c18c78c33c68c350eb9d3dcdf8c513254d (diff)
downloadlwn-6886f93790b3c1935bfb9e668a7c3f68d7eff510.tar.gz
lwn-6886f93790b3c1935bfb9e668a7c3f68d7eff510.zip
mm/huge: avoid big else branch in zap_huge_pmd()
We don't need to have an extra level of indentation, we can simply exit early in the first two branches. No functional change intended. Link: https://lkml.kernel.org/r/6b4d5efdbf5554b8fe788f677d0b50f355eec999.1774029655.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Acked-by: Qi Zheng <zhengqi.arch@bytedance.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Cc: Barry Song <baohua@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: Dev Jain <dev.jain@arm.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nico Pache <npache@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c87
1 files changed, 45 insertions, 42 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index db390b0098d9..4dfffd6a1bbe 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2405,8 +2405,10 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr)
{
- pmd_t orig_pmd;
+ struct folio *folio = NULL;
+ int flush_needed = 1;
spinlock_t *ptl;
+ pmd_t orig_pmd;
tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
@@ -2427,59 +2429,60 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
- } else if (is_huge_zero_pmd(orig_pmd)) {
+ return 1;
+ }
+ if (is_huge_zero_pmd(orig_pmd)) {
if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
- } else {
- struct folio *folio = NULL;
- int flush_needed = 1;
+ return 1;
+ }
- if (pmd_present(orig_pmd)) {
- struct page *page = pmd_page(orig_pmd);
+ if (pmd_present(orig_pmd)) {
+ struct page *page = pmd_page(orig_pmd);
- folio = page_folio(page);
- folio_remove_rmap_pmd(folio, page, vma);
- WARN_ON_ONCE(folio_mapcount(folio) < 0);
- VM_BUG_ON_PAGE(!PageHead(page), page);
- } else if (pmd_is_valid_softleaf(orig_pmd)) {
- const softleaf_t entry = softleaf_from_pmd(orig_pmd);
+ folio = page_folio(page);
+ folio_remove_rmap_pmd(folio, page, vma);
+ WARN_ON_ONCE(folio_mapcount(folio) < 0);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ } else if (pmd_is_valid_softleaf(orig_pmd)) {
+ const softleaf_t entry = softleaf_from_pmd(orig_pmd);
- folio = softleaf_to_folio(entry);
- flush_needed = 0;
+ folio = softleaf_to_folio(entry);
+ flush_needed = 0;
- if (!thp_migration_supported())
- WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
- }
+ if (!thp_migration_supported())
+ WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+ }
- if (folio_test_anon(folio)) {
+ if (folio_test_anon(folio)) {
+ zap_deposited_table(tlb->mm, pmd);
+ add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+ } else {
+ if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
- add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
- } else {
- if (arch_needs_pgtable_deposit())
- zap_deposited_table(tlb->mm, pmd);
- add_mm_counter(tlb->mm, mm_counter_file(folio),
- -HPAGE_PMD_NR);
-
- /*
- * Use flush_needed to indicate whether the PMD entry
- * is present, instead of checking pmd_present() again.
- */
- if (flush_needed && pmd_young(orig_pmd) &&
- likely(vma_has_recency(vma)))
- folio_mark_accessed(folio);
- }
+ add_mm_counter(tlb->mm, mm_counter_file(folio),
+ -HPAGE_PMD_NR);
- if (folio_is_device_private(folio)) {
- folio_remove_rmap_pmd(folio, &folio->page, vma);
- WARN_ON_ONCE(folio_mapcount(folio) < 0);
- folio_put(folio);
- }
+ /*
+ * Use flush_needed to indicate whether the PMD entry
+ * is present, instead of checking pmd_present() again.
+ */
+ if (flush_needed && pmd_young(orig_pmd) &&
+ likely(vma_has_recency(vma)))
+ folio_mark_accessed(folio);
+ }
- spin_unlock(ptl);
- if (flush_needed)
- tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
+ if (folio_is_device_private(folio)) {
+ folio_remove_rmap_pmd(folio, &folio->page, vma);
+ WARN_ON_ONCE(folio_mapcount(folio) < 0);
+ folio_put(folio);
}
+
+ spin_unlock(ptl);
+ if (flush_needed)
+ tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
+
return 1;
}