summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 15:47:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 16:07:41 -0700
commit5a1c84b404a7176b8b36e2a0041b6f0adb3151a3 (patch)
treeff98e242c5d4d3a24ca49f6ddc707028aeb938f9 /mm/page_alloc.c
parentbb4cc2bea6df7854d629bff114ca03237cc718d6 (diff)
downloadlwn-5a1c84b404a7176b8b36e2a0041b6f0adb3151a3.tar.gz
lwn-5a1c84b404a7176b8b36e2a0041b6f0adb3151a3.zip
mm: remove reclaim and compaction retry approximations
If per-zone LRU accounting is available then there is no point approximating whether reclaim and compaction should retry based on pgdat statistics. This is effectively a revert of "mm, vmstat: remove zone and node double accounting by approximating retries" with the difference that inactive/active stats are still available. This preserves the history of why the approximation was retried and why it had to be reverted to handle OOM kills on 32-bit systems. Link: http://lkml.kernel.org/r/1469110261-7365-4-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Minchan Kim <minchan@kernel.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c49
1 files changed, 10 insertions, 39 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 759cfa8cbbeb..dfdb608f7b3d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3402,7 +3402,6 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
{
struct zone *zone;
struct zoneref *z;
- pg_data_t *current_pgdat = NULL;
/*
* Make sure we converge to OOM if we cannot make any progress
@@ -3412,15 +3411,6 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
return false;
/*
- * Blindly retry lowmem allocation requests that are often ignored by
- * the OOM killer up to MAX_RECLAIM_RETRIES as we not have a reliable
- * and fast means of calculating reclaimable, dirty and writeback pages
- * in eligible zones.
- */
- if (ac->high_zoneidx < ZONE_NORMAL)
- goto out;
-
- /*
* Keep reclaiming pages while there is a chance this will lead
* somewhere. If none of the target zones can satisfy our allocation
* request even if all reclaimable pages are considered then we are
@@ -3430,38 +3420,18 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
ac->nodemask) {
unsigned long available;
unsigned long reclaimable;
- int zid;
- if (current_pgdat == zone->zone_pgdat)
- continue;
-
- current_pgdat = zone->zone_pgdat;
- available = reclaimable = pgdat_reclaimable_pages(current_pgdat);
+ available = reclaimable = zone_reclaimable_pages(zone);
available -= DIV_ROUND_UP(no_progress_loops * available,
MAX_RECLAIM_RETRIES);
-
- /* Account for all free pages on eligible zones */
- for (zid = 0; zid <= zone_idx(zone); zid++) {
- struct zone *acct_zone = &current_pgdat->node_zones[zid];
-
- available += zone_page_state_snapshot(acct_zone, NR_FREE_PAGES);
- }
+ available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
/*
* Would the allocation succeed if we reclaimed the whole
- * available? This is approximate because there is no
- * accurate count of reclaimable pages per zone.
+ * available?
*/
- for (zid = 0; zid <= zone_idx(zone); zid++) {
- struct zone *check_zone = &current_pgdat->node_zones[zid];
- unsigned long estimate;
-
- estimate = min(check_zone->managed_pages, available);
- if (!__zone_watermark_ok(check_zone, order,
- min_wmark_pages(check_zone), ac_classzone_idx(ac),
- alloc_flags, estimate))
- continue;
-
+ if (__zone_watermark_ok(zone, order, min_wmark_pages(zone),
+ ac_classzone_idx(ac), alloc_flags, available)) {
/*
* If we didn't make any progress and have a lot of
* dirty + writeback pages then we should wait for
@@ -3471,16 +3441,15 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
if (!did_some_progress) {
unsigned long write_pending;
- write_pending =
- node_page_state(current_pgdat, NR_WRITEBACK) +
- node_page_state(current_pgdat, NR_FILE_DIRTY);
+ write_pending = zone_page_state_snapshot(zone,
+ NR_ZONE_WRITE_PENDING);
if (2 * write_pending > reclaimable) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
return true;
}
}
-out:
+
/*
* Memory allocation/reclaim might be called from a WQ
* context and the current implementation of the WQ
@@ -4361,6 +4330,7 @@ void show_free_areas(unsigned int filter)
" active_file:%lukB"
" inactive_file:%lukB"
" unevictable:%lukB"
+ " writepending:%lukB"
" present:%lukB"
" managed:%lukB"
" mlocked:%lukB"
@@ -4383,6 +4353,7 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
+ K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
K(zone->present_pages),
K(zone->managed_pages),
K(zone_page_state(zone, NR_MLOCK)),