summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2021-01-27 23:53:45 +0000
committerPeter Zijlstra <peterz@infradead.org>2021-01-29 20:02:29 +0100
commita72afd873089c697053e9daa85ff343b3140d2e7 (patch)
treed9bbe462cc43d6e5f8caa74cc752671e795315a8 /mm/hugetlb.c
parentd8b450530b90f8845ab962af18b8a10ed77fc889 (diff)
downloadlwn-a72afd873089c697053e9daa85ff343b3140d2e7.tar.gz
lwn-a72afd873089c697053e9daa85ff343b3140d2e7.zip
tlb: mmu_gather: Remove start/end arguments from tlb_gather_mmu()
The 'start' and 'end' arguments to tlb_gather_mmu() are no longer needed now that there is a separate function for 'fullmm' flushing. Remove the unused arguments and update all callers. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Yu Zhao <yuzhao@google.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/CAHk-=wjQWa14_4UpfDf=fiineNP+RH74kZeDMo_f1D35xNzq9w@mail.gmail.com
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c16
1 files changed, 1 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 33db4fa62c7b..89635f407232 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3967,23 +3967,9 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
- struct mm_struct *mm;
struct mmu_gather tlb;
- unsigned long tlb_start = start;
- unsigned long tlb_end = end;
- /*
- * If shared PMDs were possibly used within this vma range, adjust
- * start/end for worst case tlb flushing.
- * Note that we can not be sure if PMDs are shared until we try to
- * unmap pages. However, we want to make sure TLB flushing covers
- * the largest possible range.
- */
- adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
-
- mm = vma->vm_mm;
-
- tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
+ tlb_gather_mmu(&tlb, vma->vm_mm);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
tlb_finish_mmu(&tlb);
}