summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2017-11-29 08:32:39 -0500
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:37 -0400
commit4e17ec250fce0eba9b70a91c9622da2748a3ec50 (patch)
tree20e5811ad93f57e28afc506dcba223227866ee73 /mm
parent8d93b41c09d1b74f376e3b9779eb3badeb550301 (diff)
downloadlwn-4e17ec250fce0eba9b70a91c9622da2748a3ec50.tar.gz
lwn-4e17ec250fce0eba9b70a91c9622da2748a3ec50.zip
mm: Convert delete_from_swap_cache to XArray
Both callers of __delete_from_swap_cache have the swp_entry_t already, so pass that in to make constructing the XA_STATE easier. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swap_state.c24
-rw-r--r--mm/vmscan.c2
2 files changed, 11 insertions, 15 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 850314c2c3ab..f393c994cc60 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -154,23 +154,22 @@ unlock:
* This must be called only on pages that have
* been verified to be in the swap cache.
*/
-void __delete_from_swap_cache(struct page *page)
+void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
{
- struct address_space *address_space;
+ struct address_space *address_space = swap_address_space(entry);
int i, nr = hpage_nr_pages(page);
- swp_entry_t entry;
- pgoff_t idx;
+ pgoff_t idx = swp_offset(entry);
+ XA_STATE(xas, &address_space->i_pages, idx);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
VM_BUG_ON_PAGE(PageWriteback(page), page);
- entry.val = page_private(page);
- address_space = swap_address_space(entry);
- idx = swp_offset(entry);
for (i = 0; i < nr; i++) {
- radix_tree_delete(&address_space->i_pages, idx + i);
+ void *entry = xas_store(&xas, NULL);
+ VM_BUG_ON_PAGE(entry != page + i, entry);
set_page_private(page + i, 0);
+ xas_next(&xas);
}
ClearPageSwapCache(page);
address_space->nrpages -= nr;
@@ -243,14 +242,11 @@ fail:
*/
void delete_from_swap_cache(struct page *page)
{
- swp_entry_t entry;
- struct address_space *address_space;
-
- entry.val = page_private(page);
+ swp_entry_t entry = { .val = page_private(page) };
+ struct address_space *address_space = swap_address_space(entry);
- address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
- __delete_from_swap_cache(page);
+ __delete_from_swap_cache(page, entry);
xa_unlock_irq(&address_space->i_pages);
put_swap_page(page, entry);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c7ce2c161225..80f731cf974e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -923,7 +923,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) };
mem_cgroup_swapout(page, swap);
- __delete_from_swap_cache(page);
+ __delete_from_swap_cache(page, swap);
xa_unlock_irqrestore(&mapping->i_pages, flags);
put_swap_page(page, swap);
} else {