summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-02-22 16:32:12 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:50:09 -0800
commitd778df51c09264076fe0208c099ef7d428f21790 (patch)
tree8022d838d72a46c23736218c460714652282a6db
parent7c5bd705d8f983ae1868a126956f5aa3a6702e3f (diff)
downloadlwn-d778df51c09264076fe0208c099ef7d428f21790.tar.gz
lwn-d778df51c09264076fe0208c099ef7d428f21790.zip
mm: vmscan: save work scanning (almost) empty LRU lists
In certain cases (kswapd reclaim, memcg target reclaim), a fixed minimum amount of pages is scanned from the LRU lists on each iteration, to make progress. Do not make this minimum bigger than the respective LRU list size, however, and save some busy work trying to isolate and reclaim pages that are not there. Empty LRU lists are quite common with memory cgroups in NUMA environments because there exists a set of LRU lists for each zone for each memory cgroup, while the memory of a single cgroup is expected to stay on just one node. The number of expected empty LRU lists is thus memcgs * (nodes - 1) * lru types Each attempt to reclaim from an empty LRU list does expensive size comparisons between lists, acquires the zone's lru lock etc. Avoid that. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Satoru Moriya <satoru.moriya@hds.com> Cc: Simon Jeons <simon.jeons@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/swap.h2
-rw-r--r--mm/vmscan.c10
2 files changed, 7 insertions, 5 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 68df9c17fbbb..8c66486a8ca8 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -156,7 +156,7 @@ enum {
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
};
-#define SWAP_CLUSTER_MAX 32
+#define SWAP_CLUSTER_MAX 32UL
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ff842d9a7714..e4521ba1ddd0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1761,15 +1761,17 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
out:
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
+ unsigned long size;
unsigned long scan;
- scan = get_lru_size(lruvec, lru);
+ size = get_lru_size(lruvec, lru);
if (sc->priority || noswap || !vmscan_swappiness(sc)) {
- scan >>= sc->priority;
+ scan = size >> sc->priority;
if (!scan && force_scan)
- scan = SWAP_CLUSTER_MAX;
+ scan = min(size, SWAP_CLUSTER_MAX);
scan = div64_u64(scan * fraction[file], denominator);
- }
+ } else
+ scan = size;
nr[lru] = scan;
}
}