summaryrefslogtreecommitdiff
path: root/mm/damon
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2023-01-09 21:33:30 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-01-18 17:12:58 -0800
commitfc8c7d2380ab7d6aa1ddef1f69169ef9a15596eb (patch)
treef2d15cdd9f8d15fd109019476d9d9856ebd6b157 /mm/damon
parent61d3d5108eb621d2a097c41f6cc83bf63b1b6c03 (diff)
downloadlwn-fc8c7d2380ab7d6aa1ddef1f69169ef9a15596eb.tar.gz
lwn-fc8c7d2380ab7d6aa1ddef1f69169ef9a15596eb.zip
mm/damon/vaddr: rename 'damon_young_walk_private->page_sz' to 'folio_sz'
Patch series "mm/damon/{v,p}addr: misc fixups for folio usage". DAMON's monitoring operations set for the virtual and the physical address spaces use folio now, but some code is not reflecting the fact. Further cleanup the code for folio usage. This patch (of 6): DAMON's virtual address space monitoring operations set is using folio now. Rename 'damon_pa_access_chk_result->page_sz' to reflect the fact. Link: https://lkml.kernel.org/r/20230109213335.62525-1-sj@kernel.org Link: https://lkml.kernel.org/r/20230109213335.62525-2-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/damon')
-rw-r--r--mm/damon/vaddr.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 9d92c5eb3a1f..d6cb1fca1769 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -422,7 +422,8 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
}
struct damon_young_walk_private {
- unsigned long *page_sz;
+ /* size of the folio for the access checked virtual memory address */
+ unsigned long *folio_sz;
bool young;
};
@@ -452,7 +453,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
if (pmd_young(*pmd) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm,
addr)) {
- *priv->page_sz = HPAGE_PMD_SIZE;
+ *priv->folio_sz = HPAGE_PMD_SIZE;
priv->young = true;
}
folio_put(folio);
@@ -474,7 +475,7 @@ regular_page:
goto out;
if (pte_young(*pte) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr)) {
- *priv->page_sz = PAGE_SIZE;
+ *priv->folio_sz = PAGE_SIZE;
priv->young = true;
}
folio_put(folio);
@@ -504,7 +505,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
if (pte_young(entry) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr)) {
- *priv->page_sz = huge_page_size(h);
+ *priv->folio_sz = huge_page_size(h);
priv->young = true;
}
@@ -524,10 +525,10 @@ static const struct mm_walk_ops damon_young_ops = {
};
static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
- unsigned long *page_sz)
+ unsigned long *folio_sz)
{
struct damon_young_walk_private arg = {
- .page_sz = page_sz,
+ .folio_sz = folio_sz,
.young = false,
};
@@ -547,18 +548,18 @@ static void __damon_va_check_access(struct mm_struct *mm,
struct damon_region *r, bool same_target)
{
static unsigned long last_addr;
- static unsigned long last_page_sz = PAGE_SIZE;
+ static unsigned long last_folio_sz = PAGE_SIZE;
static bool last_accessed;
/* If the region is in the last checked page, reuse the result */
- if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
- ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
+ if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
+ ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
if (last_accessed)
r->nr_accesses++;
return;
}
- last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
+ last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
if (last_accessed)
r->nr_accesses++;