summaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-08-20 14:09:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-20 15:40:30 -0700
commit16f8c5b2e64dec7faa5d3c7e9bdf0765e864e481 (patch)
treee0b8c713b7f246ac93953904f2dd8e488f9c276e /mm/rmap.c
parentd0fd93781c49cbe127f9e7a5b402e9b167c105a6 (diff)
downloadlwn-16f8c5b2e64dec7faa5d3c7e9bdf0765e864e481.tar.gz
lwn-16f8c5b2e64dec7faa5d3c7e9bdf0765e864e481.zip
mm: page_remove_rmap comments on PageAnon
Add a comment to s390's page_test_dirty/page_clear_dirty/page_set_dirty dance in page_remove_rmap(): I was wrong to think the PageSwapCache test could be avoided, and would like a comment in there to remind me. And mention s390, to help us remember that this block is not really common. Also move down the "It would be tidy to reset PageAnon" comment: it does not belong to s390's block, and it would be unwise to reset PageAnon before we're done with testing it. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 1ea4e6fcee77..059774712c08 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -659,23 +659,30 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
}
/*
- * It would be tidy to reset the PageAnon mapping here,
- * but that might overwrite a racing page_add_anon_rmap
- * which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
- * and remember that it's only reliable while mapped.
- * Leaving it set also helps swapoff to reinstate ptes
- * faster for those pages still in swapcache.
+ * Now that the last pte has gone, s390 must transfer dirty
+ * flag from storage key to struct page. We can usually skip
+ * this if the page is anon, so about to be freed; but perhaps
+ * not if it's in swapcache - there might be another pte slot
+ * containing the swap entry, but page not yet written to swap.
*/
if ((!PageAnon(page) || PageSwapCache(page)) &&
page_test_dirty(page)) {
page_clear_dirty(page);
set_page_dirty(page);
}
- mem_cgroup_uncharge_page(page);
+ mem_cgroup_uncharge_page(page);
__dec_zone_page_state(page,
- PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
+ PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
+ /*
+ * It would be tidy to reset the PageAnon mapping here,
+ * but that might overwrite a racing page_add_anon_rmap
+ * which increments mapcount after us but sets mapping
+ * before us: so leave the reset to free_hot_cold_page,
+ * and remember that it's only reliable while mapped.
+ * Leaving it set also helps swapoff to reinstate ptes
+ * faster for those pages still in swapcache.
+ */
}
}