summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2019-03-05 15:45:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 21:07:17 -0800
commitdbe2d4e4f12e07c6a2215e3603a5f77056323081 (patch)
tree03ac02c91b3900b20c999bbc48c064f5c584afd6 /mm/compaction.c
parentd097a6f63522547dfc7c75c7084a05b6a7f9e838 (diff)
downloadlwn-dbe2d4e4f12e07c6a2215e3603a5f77056323081.tar.gz
lwn-dbe2d4e4f12e07c6a2215e3603a5f77056323081.zip
mm, compaction: round-robin the order while searching the free lists for a target
As compaction proceeds and creates high-order blocks, the free list search gets less efficient as the larger blocks are used as compaction targets. Eventually, the larger blocks will be behind the migration scanner for partially migrated pageblocks and the search fails. This patch round-robins what orders are searched so that larger blocks can be ignored and find smaller blocks that can be used as migration targets. The overall impact was small on 1-socket but it avoids corner cases where the migration/free scanners meet prematurely or situations where many of the pageblocks encountered by the free scanner are almost full instead of being properly packed. Previous testing had indicated that without this patch there were occasional large spikes in the free scanner without this patch. [dan.carpenter@oracle.com: fix static checker warning] Link: http://lkml.kernel.org/r/20190118175136.31341-20-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: YueHaibing <yuehaibing@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c33
1 files changed, 30 insertions, 3 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 452beef0541e..b3055983a80f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1147,6 +1147,24 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
set_pageblock_skip(page);
}
+/* Search orders in round-robin fashion */
+static int next_search_order(struct compact_control *cc, int order)
+{
+ order--;
+ if (order < 0)
+ order = cc->order - 1;
+
+ /* Search wrapped around? */
+ if (order == cc->search_order) {
+ cc->search_order--;
+ if (cc->search_order < 0)
+ cc->search_order = cc->order - 1;
+ return -1;
+ }
+
+ return order;
+}
+
static unsigned long
fast_isolate_freepages(struct compact_control *cc)
{
@@ -1183,9 +1201,15 @@ fast_isolate_freepages(struct compact_control *cc)
if (WARN_ON_ONCE(min_pfn > low_pfn))
low_pfn = min_pfn;
- for (order = cc->order - 1;
- order >= 0 && !page;
- order--) {
+ /*
+ * Search starts from the last successful isolation order or the next
+ * order to search after a previous failure
+ */
+ cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
+
+ for (order = cc->search_order;
+ !page && order >= 0;
+ order = next_search_order(cc, order)) {
struct free_area *area = &cc->zone->free_area[order];
struct list_head *freelist;
struct page *freepage;
@@ -1209,6 +1233,7 @@ fast_isolate_freepages(struct compact_control *cc)
if (pfn >= low_pfn) {
cc->fast_search_fail = 0;
+ cc->search_order = order;
page = freepage;
break;
}
@@ -2138,6 +2163,7 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.total_migrate_scanned = 0,
.total_free_scanned = 0,
.order = order,
+ .search_order = order,
.gfp_mask = gfp_mask,
.zone = zone,
.mode = (prio == COMPACT_PRIO_ASYNC) ?
@@ -2369,6 +2395,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
struct zone *zone;
struct compact_control cc = {
.order = pgdat->kcompactd_max_order,
+ .search_order = pgdat->kcompactd_max_order,
.total_migrate_scanned = 0,
.total_free_scanned = 0,
.classzone_idx = pgdat->kcompactd_classzone_idx,