summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2024-07-02 15:51:20 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-07-12 15:52:15 -0700
commite6c0c03245b14d6c205814aee67128257d0bea84 (patch)
tree7d814f43ae1356b12e3762f8b9d572b91adf8e9a /mm
parent18d095b2556e5e1292003c8e9f5d845ed42ef89b (diff)
downloadlwn-e6c0c03245b14d6c205814aee67128257d0bea84.tar.gz
lwn-e6c0c03245b14d6c205814aee67128257d0bea84.zip
mm: provide mm_struct and address to huge_ptep_get()
On powerpc 8xx huge_ptep_get() will need to know whether the given ptep is a PTE entry or a PMD entry. This cannot be known with the PMD entry itself because there is no easy way to know it from the content of the entry. So huge_ptep_get() will need to know either the size of the page or get the pmd. In order to be consistent with huge_ptep_get_and_clear(), give mm and address to huge_ptep_get(). Link: https://lkml.kernel.org/r/cc00c70dd384298796a4e1b25d6c4eb306d3af85.1719928057.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/vaddr.c6
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/hugetlb.c44
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/userfaultfd.c2
9 files changed, 33 insertions, 33 deletions
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 381559e4a1fa..58829baf8b5d 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -339,7 +339,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long addr)
{
bool referenced = false;
- pte_t entry = huge_ptep_get(pte);
+ pte_t entry = huge_ptep_get(mm, addr, pte);
struct folio *folio = pfn_folio(pte_pfn(entry));
unsigned long psize = huge_page_size(hstate_vma(vma));
@@ -373,7 +373,7 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry;
ptl = huge_pte_lock(h, walk->mm, pte);
- entry = huge_ptep_get(pte);
+ entry = huge_ptep_get(walk->mm, addr, pte);
if (!pte_present(entry))
goto out;
@@ -509,7 +509,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry;
ptl = huge_pte_lock(h, walk->mm, pte);
- entry = huge_ptep_get(pte);
+ entry = huge_ptep_get(walk->mm, addr, pte);
if (!pte_present(entry))
goto out;
diff --git a/mm/gup.c b/mm/gup.c
index 85d45ec57f7c..cdb81db7e366 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -604,7 +604,7 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
if (pte_end < end)
end = pte_end;
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(vma->vm_mm, addr, ptep);
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
return 0;
diff --git a/mm/hmm.c b/mm/hmm.c
index 93aebd9cc130..7e0229ae4a5a 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -480,7 +480,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
- entry = huge_ptep_get(pte);
+ entry = huge_ptep_get(walk->mm, addr, pte);
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f88ff0333945..fd2050934b13 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5287,7 +5287,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
{
pte_t entry;
- entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
+ entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
update_mmu_cache(vma, address, ptep);
}
@@ -5395,7 +5395,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
- entry = huge_ptep_get(src_pte);
+ entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
again:
if (huge_pte_none(entry)) {
/*
@@ -5433,7 +5433,7 @@ again:
set_huge_pte_at(dst, addr, dst_pte,
make_pte_marker(marker), sz);
} else {
- entry = huge_ptep_get(src_pte);
+ entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
pte_folio = page_folio(pte_page(entry));
folio_get(pte_folio);
@@ -5474,7 +5474,7 @@ again:
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
- entry = huge_ptep_get(src_pte);
+ entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
if (!pte_same(src_pte_old, entry)) {
restore_reserve_on_error(h, dst_vma, addr,
new_folio);
@@ -5584,7 +5584,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
new_addr |= last_addr_mask;
continue;
}
- if (huge_pte_none(huge_ptep_get(src_pte)))
+ if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
continue;
if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
@@ -5657,7 +5657,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
continue;
}
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(mm, address, ptep);
if (huge_pte_none(pte)) {
spin_unlock(ptl);
continue;
@@ -5906,7 +5906,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm;
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
- pte_t pte = huge_ptep_get(vmf->pte);
+ pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
struct hstate *h = hstate_vma(vma);
struct folio *old_folio;
struct folio *new_folio;
@@ -6027,7 +6027,7 @@ retry_avoidcopy:
vmf->pte = hugetlb_walk(vma, vmf->address,
huge_page_size(h));
if (likely(vmf->pte &&
- pte_same(huge_ptep_get(vmf->pte), pte)))
+ pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
goto retry_avoidcopy;
/*
* race occurs while re-acquiring page table
@@ -6065,7 +6065,7 @@ retry_avoidcopy:
*/
spin_lock(vmf->ptl);
vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
- if (likely(vmf->pte && pte_same(huge_ptep_get(vmf->pte), pte))) {
+ if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
/* Break COW or unshare */
@@ -6166,14 +6166,14 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
* Recheck pte with pgtable lock. Returns true if pte didn't change, or
* false if pte changed or is changing.
*/
-static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
+static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t old_pte)
{
spinlock_t *ptl;
bool same;
ptl = huge_pte_lock(h, mm, ptep);
- same = pte_same(huge_ptep_get(ptep), old_pte);
+ same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
spin_unlock(ptl);
return same;
@@ -6234,7 +6234,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* never happen on the page after UFFDIO_COPY has
* correctly installed the page and returned.
*/
- if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
+ if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
ret = 0;
goto out;
}
@@ -6263,7 +6263,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* here. Before returning error, get ptl and make
* sure there really is no pte entry.
*/
- if (hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte))
+ if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
ret = vmf_error(PTR_ERR(folio));
else
ret = 0;
@@ -6312,7 +6312,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
folio_unlock(folio);
folio_put(folio);
/* See comment in userfaultfd_missing() block above */
- if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
+ if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
ret = 0;
goto out;
}
@@ -6339,7 +6339,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
ret = 0;
/* If pte changed from under us, retry */
- if (!pte_same(huge_ptep_get(vmf->pte), vmf->orig_pte))
+ if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
goto backout;
if (anon_rmap)
@@ -6460,7 +6460,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
}
- vmf.orig_pte = huge_ptep_get(vmf.pte);
+ vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
if (huge_pte_none_mostly(vmf.orig_pte)) {
if (is_pte_marker(vmf.orig_pte)) {
pte_marker marker =
@@ -6501,7 +6501,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* be released there.
*/
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- migration_entry_wait_huge(vma, vmf.pte);
+ migration_entry_wait_huge(vma, vmf.address, vmf.pte);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte)))
ret = VM_FAULT_HWPOISON_LARGE |
@@ -6534,11 +6534,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
/* Check for a racing update before calling hugetlb_wp() */
- if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(vmf.pte))))
+ if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
goto out_ptl;
/* Handle userfault-wp first, before trying to lock more pages */
- if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(vmf.pte)) &&
+ if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
(flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
if (!userfaultfd_wp_async(vma)) {
spin_unlock(vmf.ptl);
@@ -6666,7 +6666,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
ptl = huge_pte_lock(h, dst_mm, dst_pte);
/* Don't overwrite any existing PTEs (even markers) */
- if (!huge_pte_none(huge_ptep_get(dst_pte))) {
+ if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
spin_unlock(ptl);
return -EEXIST;
}
@@ -6802,7 +6802,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
* page backing it, then access the page.
*/
ret = -EEXIST;
- if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
+ if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte)))
goto out_release_unlock;
if (folio_in_pagecache)
@@ -6923,7 +6923,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
address |= last_addr_mask;
continue;
}
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(mm, address, ptep);
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
/* Nothing to do. */
} else if (unlikely(is_hugetlb_entry_migration(pte))) {
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 0276cc299b03..16f8651436d5 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -835,7 +835,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
struct mm_walk *walk)
{
struct hwpoison_walk *hwp = walk->private;
- pte_t pte = huge_ptep_get(ptep);
+ pte_t pte = huge_ptep_get(walk->mm, addr, ptep);
struct hstate *h = hstate_vma(walk->vma);
return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f73acb01ad45..f8703feb68b7 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -624,7 +624,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
pte_t entry;
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
- entry = huge_ptep_get(pte);
+ entry = huge_ptep_get(walk->mm, addr, pte);
if (!pte_present(entry)) {
if (unlikely(is_hugetlb_entry_migration(entry)))
qp->nr_failed++;
diff --git a/mm/migrate.c b/mm/migrate.c
index abb3aa45bed9..ff512c43fecb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -338,14 +338,14 @@ out:
*
* This function will release the vma lock before returning.
*/
-void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
+void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
pte_t pte;
hugetlb_vma_assert_locked(vma);
spin_lock(ptl);
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(vma->vm_mm, addr, ptep);
if (unlikely(!is_hugetlb_entry_migration(pte))) {
spin_unlock(ptl);
diff --git a/mm/mincore.c b/mm/mincore.c
index e31cf1bde614..d6bd19e520fc 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -33,7 +33,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
* Hugepages under user process are always in RAM and never
* swapped out, but theoretically it needs to be checked.
*/
- present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
+ present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
for (; addr != end; vec++, addr += PAGE_SIZE)
*vec = present;
walk->private = vec;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 8dedaec00486..e54e5c8907fa 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -587,7 +587,7 @@ retry:
}
if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
- !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
+ !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
err = -EEXIST;
hugetlb_vma_unlock_read(dst_vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);