summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-09-21 17:01:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 07:17:28 -0700
commitb35ea17b7bbf5dea35faa0de11030acc620c3197 (patch)
treefba2dabec71193f769c16c3737e152900682bbfa /mm/vmscan.c
parent44c241f166b31999482c3c40448f4bbb2157a804 (diff)
downloadlwn-b35ea17b7bbf5dea35faa0de11030acc620c3197.tar.gz
lwn-b35ea17b7bbf5dea35faa0de11030acc620c3197.zip
mm: shrink_inactive_list() nr_scan accounting fix fix
If sc->isolate_pages() return 0, we don't need to call shrink_page_list(). In past days, shrink_inactive_list() handled it properly. But commit fb8d14e1 (three years ago commit!) breaked it. current shrink_inactive_list() always call shrink_page_list() although isolate_pages() return 0. This patch restore proper return value check. Requirements: o "nr_taken == 0" condition should stay before calling shrink_page_list(). o "nr_taken == 0" condition should stay after nr_scan related statistics modification. Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c30
1 files changed, 18 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 45a150a3a442..d86a91f8c16b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1076,6 +1076,20 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
nr_taken = sc->isolate_pages(sc->swap_cluster_max,
&page_list, &nr_scan, sc->order, mode,
zone, sc->mem_cgroup, 0, file);
+
+ if (scanning_global_lru(sc)) {
+ zone->pages_scanned += nr_scan;
+ if (current_is_kswapd())
+ __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+ nr_scan);
+ else
+ __count_zone_vm_events(PGSCAN_DIRECT, zone,
+ nr_scan);
+ }
+
+ if (nr_taken == 0)
+ goto done;
+
nr_active = clear_active_flags(&page_list, count);
__count_vm_events(PGDEACTIVATE, nr_active);
@@ -1088,8 +1102,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
__mod_zone_page_state(zone, NR_INACTIVE_ANON,
-count[LRU_INACTIVE_ANON]);
- if (scanning_global_lru(sc))
- zone->pages_scanned += nr_scan;
reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
@@ -1123,18 +1135,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
}
nr_reclaimed += nr_freed;
+
local_irq_disable();
- if (current_is_kswapd()) {
- __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+ if (current_is_kswapd())
__count_vm_events(KSWAPD_STEAL, nr_freed);
- } else if (scanning_global_lru(sc))
- __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-
__count_zone_vm_events(PGSTEAL, zone, nr_freed);
- if (nr_taken == 0)
- goto done;
-
spin_lock(&zone->lru_lock);
/*
* Put back any unfreeable pages.
@@ -1164,9 +1170,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
}
}
} while (nr_scanned < max_scan);
- spin_unlock(&zone->lru_lock);
+
done:
- local_irq_enable();
+ spin_unlock_irq(&zone->lru_lock);
pagevec_release(&pvec);
return nr_reclaimed;
}