summaryrefslogtreecommitdiff
path: root/mm/page_io.c
diff options
context:
space:
mode:
authorJingxiang Zeng <linuszeng@tencent.com>2024-08-30 16:22:44 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-11-05 16:56:21 -0800
commit15ff4d409e1a6f939d94d2005ae275c26b2b0d9d (patch)
treec53b63f2652fd4d70dabb3efd5fa66ff15f75bce /mm/page_io.c
parentba7196e566516f798635e26e976ae44f708d9d54 (diff)
downloadlwn-15ff4d409e1a6f939d94d2005ae275c26b2b0d9d.tar.gz
lwn-15ff4d409e1a6f939d94d2005ae275c26b2b0d9d.zip
mm/memcontrol: add per-memcg pgpgin/pswpin counter
In proactive memory reclamation scenarios, it is necessary to estimate the pswpin and pswpout metrics of the cgroup to determine whether to continue reclaiming anonymous pages in the current batch. This patch will collect these metrics and expose them. [linuszeng@tencent.com: v2] Link: https://lkml.kernel.org/r/20240830082244.156923-1-jingxiangzeng.cas@gmail.com Li nk: https://lkml.kernel.org/r/20240913084453.3605621-1-jingxiangzeng.cas@gmail.com Link: https://lkml.kernel.org/r/20240830082244.156923-1-jingxiangzeng.cas@gmail.com Signed-off-by: Jingxiang Zeng <linuszeng@tencent.com> Acked-by: Nhat Pham <nphamcs@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_io.c')
-rw-r--r--mm/page_io.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/mm/page_io.c b/mm/page_io.c
index 69536a2b3c13..40392782cdcb 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -290,6 +290,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
}
count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
#endif
+ count_memcg_folio_events(folio, PSWPOUT, folio_nr_pages(folio));
count_vm_events(PSWPOUT, folio_nr_pages(folio));
}
@@ -485,6 +486,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
for (p = 0; p < sio->pages; p++) {
struct folio *folio = page_folio(sio->bvec[p].bv_page);
+ count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
folio_mark_uptodate(folio);
folio_unlock(folio);
}
@@ -570,6 +572,7 @@ static void swap_read_folio_bdev_sync(struct folio *folio,
* attempt to access it in the page fault retry time check.
*/
get_task_struct(current);
+ count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
count_vm_events(PSWPIN, folio_nr_pages(folio));
submit_bio_wait(&bio);
__end_swap_bio_read(&bio);
@@ -585,6 +588,7 @@ static void swap_read_folio_bdev_async(struct folio *folio,
bio->bi_iter.bi_sector = swap_folio_sector(folio);
bio->bi_end_io = end_swap_bio_read;
bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
+ count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
count_vm_events(PSWPIN, folio_nr_pages(folio));
submit_bio(bio);
}