diff options
author | Hugh Dickins <hughd@google.com> | 2012-05-29 15:07:09 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 16:22:28 -0700 |
commit | 75b00af77ed5b5a3d55549f9e0c33f3969b9330c (patch) | |
tree | 17b59700daed9f536f50c375243ba6ce86c85945 /mm/vmscan.c | |
parent | 4d7dcca213921fbaf08ee05359d28e4aaf2245f1 (diff) | |
download | lwn-75b00af77ed5b5a3d55549f9e0c33f3969b9330c.tar.gz lwn-75b00af77ed5b5a3d55549f9e0c33f3969b9330c.zip |
mm: trivial cleanups in vmscan.c
Utter trivia in mm/vmscan.c, mostly just reducing the linecount slightly;
most exciting change being get_scan_count() calling vmscan_swappiness()
once instead of twice.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 31 |
1 files changed, 10 insertions, 21 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8b941f303cea..05d439dc1af9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1025,12 +1025,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long *nr_scanned, struct scan_control *sc, isolate_mode_t mode, enum lru_list lru) { - struct list_head *src; + struct list_head *src = &lruvec->lists[lru]; unsigned long nr_taken = 0; unsigned long scan; - int file = is_file_lru(lru); - - src = &lruvec->lists[lru]; for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { struct page *page; @@ -1058,11 +1055,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, } *nr_scanned = scan; - - trace_mm_vmscan_lru_isolate(sc->order, - nr_to_scan, scan, - nr_taken, - mode, file); + trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, + nr_taken, mode, is_file_lru(lru)); return nr_taken; } @@ -1140,8 +1134,7 @@ static int too_many_isolated(struct zone *zone, int file, } static noinline_for_stack void -putback_inactive_pages(struct lruvec *lruvec, - struct list_head *page_list) +putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; struct zone *zone = lruvec_zone(lruvec); @@ -1235,11 +1228,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, if (global_reclaim(sc)) { zone->pages_scanned += nr_scanned; if (current_is_kswapd()) - __count_zone_vm_events(PGSCAN_KSWAPD, zone, - nr_scanned); + __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); else - __count_zone_vm_events(PGSCAN_DIRECT, zone, - nr_scanned); + __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); } spin_unlock_irq(&zone->lru_lock); @@ -1534,9 +1525,9 @@ static int inactive_file_is_low(struct lruvec *lruvec) return inactive_file_is_low_global(lruvec_zone(lruvec)); } -static int inactive_list_is_low(struct lruvec *lruvec, int file) +static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru) { - if (file) + if (is_file_lru(lru)) return inactive_file_is_low(lruvec); else return inactive_anon_is_low(lruvec); @@ -1545,10 +1536,8 @@ static int inactive_list_is_low(struct lruvec *lruvec, int file) static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) { - int file = is_file_lru(lru); - if (is_active_lru(lru)) { - if (inactive_list_is_low(lruvec, file)) + if (inactive_list_is_low(lruvec, lru)) shrink_active_list(nr_to_scan, lruvec, sc, lru); return 0; } @@ -1630,7 +1619,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, * This scanning priority is essentially the inverse of IO cost. */ anon_prio = vmscan_swappiness(sc); - file_prio = 200 - vmscan_swappiness(sc); + file_prio = 200 - anon_prio; /* * OK, so we have swap space and a fair amount of page cache |