summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2017-11-15 17:37:59 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 18:21:06 -0800
commit2d4894b5d2ae0fe1725ea7abd57b33bfbbe45492 (patch)
treeab55401d2d6181491fa9e767561275c2992680e6
parentc6f92f9fbe7dbcc8903a67229aa88b4077ae4422 (diff)
downloadlwn-2d4894b5d2ae0fe1725ea7abd57b33bfbbe45492.tar.gz
lwn-2d4894b5d2ae0fe1725ea7abd57b33bfbbe45492.zip
mm: remove cold parameter from free_hot_cold_page*
Most callers users of free_hot_cold_page claim the pages being released are cache hot. The exception is the page reclaim paths where it is likely that enough pages will be freed in the near future that the per-cpu lists are going to be recycled and the cache hotness information is lost. As no one really cares about the hotness of pages being released to the allocator, just ditch the parameter. The APIs are renamed to indicate that it's no longer about hot/cold pages. It should also be less confusing as there are subtle differences between them. __free_pages drops a reference and frees a page when the refcount reaches zero. free_hot_cold_page handled pages whose refcount was already zero which is non-obvious from the name. free_unref_page should be more obvious. No performance impact is expected as the overhead is marginal. The parameter is removed simply because it is a bit stupid to have a useless parameter copied everywhere. [mgorman@techsingularity.net: add pages to head, not tail] Link: http://lkml.kernel.org/r/20171019154321.qtpzaeftoyyw4iey@techsingularity.net Link: http://lkml.kernel.org/r/20171018075952.10627-8-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andi Kleen <ak@linux.intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/tile/mm/homecache.c2
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/trace/events/kmem.h11
-rw-r--r--mm/page_alloc.c29
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/swap.c4
-rw-r--r--mm/vmscan.c6
10 files changed, 28 insertions, 36 deletions
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 05e15386d4cb..a7e998158f37 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -200,7 +200,7 @@ static void destroy_pagetable_page(struct mm_struct *mm)
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
pgtable_page_dtor(page);
- free_hot_cold_page(page, 0);
+ free_unref_page(page);
}
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac0717a90ca6..1ec3aee43624 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -404,7 +404,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
if (put_page_testzero(page)) {
if (!kernel)
pgtable_page_dtor(page);
- free_hot_cold_page(page, 0);
+ free_unref_page(page);
}
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 051f73401793..55ba62957e64 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2939,7 +2939,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm,
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
- free_hot_cold_page(page, 0);
+ free_unref_page(page);
return NULL;
}
return (pte_t *) page_address(page);
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index b51cc28acd0a..4432f31e8479 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -409,7 +409,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)
if (put_page_testzero(page)) {
homecache_change_page_home(page, order, PAGE_HOME_HASH);
if (order == 0) {
- free_hot_cold_page(page, false);
+ free_unref_page(page);
} else {
init_page_count(page);
__free_pages(page, order);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b041f94678de..f7e62d9096fe 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -530,8 +530,8 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-extern void free_hot_cold_page(struct page *page, bool cold);
-extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+extern void free_unref_page(struct page *page);
+extern void free_unref_page_list(struct list_head *list);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 285feeadac39..eb57e3037deb 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -172,24 +172,21 @@ TRACE_EVENT(mm_page_free,
TRACE_EVENT(mm_page_free_batched,
- TP_PROTO(struct page *page, int cold),
+ TP_PROTO(struct page *page),
- TP_ARGS(page, cold),
+ TP_ARGS(page),
TP_STRUCT__entry(
__field( unsigned long, pfn )
- __field( int, cold )
),
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->cold = cold;
),
- TP_printk("page=%p pfn=%lu order=0 cold=%d",
+ TP_printk("page=%p pfn=%lu order=0",
pfn_to_page(__entry->pfn),
- __entry->pfn,
- __entry->cold)
+ __entry->pfn)
);
TRACE_EVENT(mm_page_alloc,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6a3c4a1d513f..f265d37b3152 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2611,7 +2611,7 @@ void mark_free_pages(struct zone *zone)
}
#endif /* CONFIG_PM */
-static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn)
+static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
{
int migratetype;
@@ -2623,8 +2623,7 @@ static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn)
return true;
}
-static void free_hot_cold_page_commit(struct page *page, unsigned long pfn,
- bool cold)
+static void free_unref_page_commit(struct page *page, unsigned long pfn)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
@@ -2649,10 +2648,7 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn,
}
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- if (!cold)
- list_add(&page->lru, &pcp->lists[migratetype]);
- else
- list_add_tail(&page->lru, &pcp->lists[migratetype]);
+ list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -2663,25 +2659,24 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn,
/*
* Free a 0-order page
- * cold == true ? free a cold page : free a hot page
*/
-void free_hot_cold_page(struct page *page, bool cold)
+void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
- if (!free_hot_cold_page_prepare(page, pfn))
+ if (!free_unref_page_prepare(page, pfn))
return;
local_irq_save(flags);
- free_hot_cold_page_commit(page, pfn, cold);
+ free_unref_page_commit(page, pfn);
local_irq_restore(flags);
}
/*
* Free a list of 0-order pages
*/
-void free_hot_cold_page_list(struct list_head *list, bool cold)
+void free_unref_page_list(struct list_head *list)
{
struct page *page, *next;
unsigned long flags, pfn;
@@ -2689,7 +2684,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
pfn = page_to_pfn(page);
- if (!free_hot_cold_page_prepare(page, pfn))
+ if (!free_unref_page_prepare(page, pfn))
list_del(&page->lru);
set_page_private(page, pfn);
}
@@ -2699,8 +2694,8 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
unsigned long pfn = page_private(page);
set_page_private(page, 0);
- trace_mm_page_free_batched(page, cold);
- free_hot_cold_page_commit(page, pfn, cold);
+ trace_mm_page_free_batched(page);
+ free_unref_page_commit(page, pfn);
}
local_irq_restore(flags);
}
@@ -4301,7 +4296,7 @@ void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
if (order == 0)
- free_hot_cold_page(page, false);
+ free_unref_page(page);
else
__free_pages_ok(page, order);
}
@@ -4359,7 +4354,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
unsigned int order = compound_order(page);
if (order == 0)
- free_hot_cold_page(page, false);
+ free_unref_page(page);
else
__free_pages_ok(page, order);
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 6b5a0f219ac0..47db27f8049e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1321,7 +1321,7 @@ void page_remove_rmap(struct page *page, bool compound)
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
+ * before us: so leave the reset to free_unref_page,
* and remember that it's only reliable while mapped.
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
diff --git a/mm/swap.c b/mm/swap.c
index 29cf75f1a860..b480279c760c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -76,7 +76,7 @@ static void __page_cache_release(struct page *page)
static void __put_single_page(struct page *page)
{
__page_cache_release(page);
- free_hot_cold_page(page, false);
+ free_unref_page(page);
}
static void __put_compound_page(struct page *page)
@@ -817,7 +817,7 @@ void release_pages(struct page **pages, int nr)
spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
mem_cgroup_uncharge_list(&pages_to_free);
- free_hot_cold_page_list(&pages_to_free, 0);
+ free_unref_page_list(&pages_to_free);
}
EXPORT_SYMBOL(release_pages);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2852b8c5a917..c02c850ea349 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1349,7 +1349,7 @@ keep:
mem_cgroup_uncharge_list(&free_pages);
try_to_unmap_flush();
- free_hot_cold_page_list(&free_pages, true);
+ free_unref_page_list(&free_pages);
list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);
@@ -1824,7 +1824,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge_list(&page_list);
- free_hot_cold_page_list(&page_list, true);
+ free_unref_page_list(&page_list);
/*
* If reclaim is isolating dirty pages under writeback, it implies
@@ -2063,7 +2063,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge_list(&l_hold);
- free_hot_cold_page_list(&l_hold, true);
+ free_unref_page_list(&l_hold);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file);
}