diff options
author | Konstantin Khlebnikov <khlebnikov@openvz.org> | 2012-05-29 15:07:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 16:22:26 -0700 |
commit | 90126375d89ab8e0bde30ff22139b6097d56ed8a (patch) | |
tree | 3e18ac488acb6bf6f413a57eb8095f04e49d7cb2 /mm/vmscan.c | |
parent | 1a93be0e7a6fc7f3d19101402665c7a958beb568 (diff) | |
download | lwn-90126375d89ab8e0bde30ff22139b6097d56ed8a.tar.gz lwn-90126375d89ab8e0bde30ff22139b6097d56ed8a.zip |
mm/vmscan: push lruvec pointer into get_scan_count()
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 25 |
1 files changed, 9 insertions, 16 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 6dbf2c2082e7..b139ad7f396e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -150,11 +150,6 @@ static bool global_reclaim(struct scan_control *sc) } #endif -static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz) -{ - return &mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup)->reclaim_stat; -} - static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru) { if (!mem_cgroup_disabled()) @@ -1581,20 +1576,18 @@ static int vmscan_swappiness(struct scan_control *sc) * * nr[0] = anon pages to scan; nr[1] = file pages to scan */ -static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, +static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, unsigned long *nr) { unsigned long anon, file, free; unsigned long anon_prio, file_prio; unsigned long ap, fp; - struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); + struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; u64 fraction[2], denominator; enum lru_list lru; int noswap = 0; bool force_scan = false; - struct lruvec *lruvec; - - lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup); + struct zone *zone = lruvec_zone(lruvec); /* * If the zone or memcg is small, nr[l] can be 0. This @@ -1606,7 +1599,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, * latencies, so it's better to scan a minimum amount there as * well. */ - if (current_is_kswapd() && mz->zone->all_unreclaimable) + if (current_is_kswapd() && zone->all_unreclaimable) force_scan = true; if (!global_reclaim(sc)) force_scan = true; @@ -1626,10 +1619,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, get_lruvec_size(lruvec, LRU_INACTIVE_FILE); if (global_reclaim(sc)) { - free = zone_page_state(mz->zone, NR_FREE_PAGES); + free = zone_page_state(zone, NR_FREE_PAGES); /* If we have very few page cache pages, force-scan anon pages. */ - if (unlikely(file + free <= high_wmark_pages(mz->zone))) { + if (unlikely(file + free <= high_wmark_pages(zone))) { fraction[0] = 1; fraction[1] = 0; denominator = 1; @@ -1655,7 +1648,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, * * anon in [0], file in [1] */ - spin_lock_irq(&mz->zone->lru_lock); + spin_lock_irq(&zone->lru_lock); if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { reclaim_stat->recent_scanned[0] /= 2; reclaim_stat->recent_rotated[0] /= 2; @@ -1676,7 +1669,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); fp /= reclaim_stat->recent_rotated[1] + 1; - spin_unlock_irq(&mz->zone->lru_lock); + spin_unlock_irq(&zone->lru_lock); fraction[0] = ap; fraction[1] = fp; @@ -1794,7 +1787,7 @@ static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz, restart: nr_reclaimed = 0; nr_scanned = sc->nr_scanned; - get_scan_count(mz, sc, nr); + get_scan_count(lruvec, sc, nr); blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || |