summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linux.alibaba.com>2026-02-09 22:07:25 +0800
committerAndrew Morton <akpm@linux-foundation.org>2026-02-12 15:43:00 -0800
commit67d59bddfc26a2f7d5740d4d7ce9ff45274322d5 (patch)
treedc9b8a707f34d2d410de9c5d803cc2753c721d9b /arch
parent52e054f7184097bea009963e033cdd54af7bf8a2 (diff)
downloadlwn-67d59bddfc26a2f7d5740d4d7ce9ff45274322d5.tar.gz
lwn-67d59bddfc26a2f7d5740d4d7ce9ff45274322d5.zip
arm64: mm: factor out the address and ptep alignment into a new helper
Factor out the contpte block's address and ptep alignment into a new helper, and will be reused in the following patch. No functional changes. Link: https://lkml.kernel.org/r/8076d12cb244b2d9e91119b44dc6d5e4ad9c00af.1770645603.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: David Hildenbrand (Arm) <david@kernel.org> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Barry Song <baohua@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Harry Yoo <harry.yoo@oracle.com> Cc: Jann Horn <jannh@google.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Rik van Riel <riel@surriel.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/mm/contpte.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index 589bcf878938..e4ddeb46f25d 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -26,6 +26,26 @@ static inline pte_t *contpte_align_down(pte_t *ptep)
return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
}
+static inline pte_t *contpte_align_addr_ptep(unsigned long *start,
+ unsigned long *end, pte_t *ptep,
+ unsigned int nr)
+{
+ /*
+ * Note: caller must ensure these nr PTEs are consecutive (present)
+ * PTEs that map consecutive pages of the same large folio within a
+ * single VMA and a single page table.
+ */
+ if (pte_cont(__ptep_get(ptep + nr - 1)))
+ *end = ALIGN(*end, CONT_PTE_SIZE);
+
+ if (pte_cont(__ptep_get(ptep))) {
+ *start = ALIGN_DOWN(*start, CONT_PTE_SIZE);
+ ptep = contpte_align_down(ptep);
+ }
+
+ return ptep;
+}
+
static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr)
{
@@ -569,14 +589,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
unsigned long start = addr;
unsigned long end = start + nr * PAGE_SIZE;
- if (pte_cont(__ptep_get(ptep + nr - 1)))
- end = ALIGN(end, CONT_PTE_SIZE);
-
- if (pte_cont(__ptep_get(ptep))) {
- start = ALIGN_DOWN(start, CONT_PTE_SIZE);
- ptep = contpte_align_down(ptep);
- }
-
+ ptep = contpte_align_addr_ptep(&start, &end, ptep, nr);
__clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
}
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);