summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-20 20:00:43 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-20 20:00:43 -0800
commit4c9a44aebeaef35570a67aed17b72a2cf8d0b219 (patch)
treeabb874fe7f50671627b282f6c7fb58db5e75a2e3 /mm
parent1f0377ff088ed2971c57debc9b0c3b846ec431fd (diff)
parentcfde819088422503b5c69e03ab7bb90f87121d4d (diff)
downloadlwn-4c9a44aebeaef35570a67aed17b72a2cf8d0b219.tar.gz
lwn-4c9a44aebeaef35570a67aed17b72a2cf8d0b219.zip
Merge branch 'akpm' (Andrew's patch-bomb)
Merge the rest of Andrew's patches for -rc1: "A bunch of fixes and misc missed-out-on things. That'll do for -rc1. I still have a batch of IPC patches which still have a possible bug report which I'm chasing down." * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (25 commits) keys: use keyring_alloc() to create module signing keyring keys: fix unreachable code sendfile: allows bypassing of notifier events SGI-XP: handle non-fatal traps fat: fix incorrect function comment Documentation: ABI: remove testing/sysfs-devices-node proc: fix inconsistent lock state linux/kernel.h: fix DIV_ROUND_CLOSEST with unsigned divisors memcg: don't register hotcpu notifier from ->css_alloc() checkpatch: warn on uapi #includes that #include <uapi/... revert "rtc: recycle id when unloading a rtc driver" mm: clean up transparent hugepage sysfs error messages hfsplus: add error message for the case of failure of sync fs in delayed_sync_fs() method hfsplus: rework processing of hfs_btree_write() returned error hfsplus: rework processing errors in hfsplus_free_extents() hfsplus: avoid crash on failed block map free kcmp: include linux/ptrace.h drivers/rtc/rtc-imxdi.c: must include <linux/spinlock.h> mm: cma: WARN if freed memory is still in use exec: do not leave bprm->interp on stack ...
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c26
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/page-writeback.c25
-rw-r--r--mm/page_alloc.c11
5 files changed, 65 insertions, 17 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 5ad7f4f4d6f7..6b807e466497 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -17,6 +17,21 @@
#include <linux/balloon_compaction.h>
#include "internal.h"
+#ifdef CONFIG_COMPACTION
+static inline void count_compact_event(enum vm_event_item item)
+{
+ count_vm_event(item);
+}
+
+static inline void count_compact_events(enum vm_event_item item, long delta)
+{
+ count_vm_events(item, delta);
+}
+#else
+#define count_compact_event(item) do { } while (0)
+#define count_compact_events(item, delta) do { } while (0)
+#endif
+
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#define CREATE_TRACE_POINTS
@@ -303,10 +318,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated, false);
- count_vm_events(COMPACTFREE_SCANNED, nr_scanned);
+ count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
- count_vm_events(COMPACTISOLATED, total_isolated);
-
+ count_compact_events(COMPACTISOLATED, total_isolated);
return total_isolated;
}
@@ -613,9 +627,9 @@ next_pageblock:
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
- count_vm_events(COMPACTMIGRATE_SCANNED, nr_scanned);
+ count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
if (nr_isolated)
- count_vm_events(COMPACTISOLATED, nr_isolated);
+ count_compact_events(COMPACTISOLATED, nr_isolated);
return low_pfn;
}
@@ -1110,7 +1124,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
if (!order || !may_enter_fs || !may_perform_io)
return rc;
- count_vm_event(COMPACTSTALL);
+ count_compact_event(COMPACTSTALL);
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 32754eece63e..9e894edc7811 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -574,19 +574,19 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
- printk(KERN_ERR "hugepage: failed kobject create\n");
+ printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
return -ENOMEM;
}
err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
if (err) {
- printk(KERN_ERR "hugepage: failed register hugeage group\n");
+ printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
goto delete_obj;
}
err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
if (err) {
- printk(KERN_ERR "hugepage: failed register hugeage group\n");
+ printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
goto remove_hp_group;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f3009b4bae51..09255ec8159c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6090,7 +6090,6 @@ mem_cgroup_css_alloc(struct cgroup *cont)
&per_cpu(memcg_stock, cpu);
INIT_WORK(&stock->work, drain_local_stock);
}
- hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
} else {
parent = mem_cgroup_from_cont(cont->parent);
memcg->use_hierarchy = parent->use_hierarchy;
@@ -6756,6 +6755,19 @@ struct cgroup_subsys mem_cgroup_subsys = {
.use_id = 1,
};
+/*
+ * The rest of init is performed during ->css_alloc() for root css which
+ * happens before initcalls. hotcpu_notifier() can't be done together as
+ * it would introduce circular locking by adding cgroup_lock -> cpu hotplug
+ * dependency. Do it from a subsys_initcall().
+ */
+static int __init mem_cgroup_init(void)
+{
+ hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+ return 0;
+}
+subsys_initcall(mem_cgroup_init);
+
#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6f4271224493..0713bfbf0954 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
zone_reclaimable_pages(z) - z->dirty_balance_reserve;
}
/*
+ * Unreclaimable memory (kernel memory or anonymous memory
+ * without swap) can bring down the dirtyable pages below
+ * the zone's dirty balance reserve and the above calculation
+ * will underflow. However we still want to add in nodes
+ * which are below threshold (negative values) to get a more
+ * accurate calculation but make sure that the total never
+ * underflows.
+ */
+ if ((long)x < 0)
+ x = 0;
+
+ /*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
@@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
- dirty_balance_reserve;
+ x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+ x -= min(x, dirty_balance_reserve);
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
@@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
* highmem zone can hold its share of dirty pages, so we don't
* care about vm_highmem_is_dirtyable here.
*/
- return zone_page_state(zone, NR_FREE_PAGES) +
- zone_reclaimable_pages(zone) -
- zone->dirty_balance_reserve;
+ unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
+ zone_reclaimable_pages(zone);
+
+ /* don't allow this to underflow */
+ nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+ return nr_pages;
}
/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2ad2ad168efe..4ba5e37127fc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5978,8 +5978,15 @@ done:
void free_contig_range(unsigned long pfn, unsigned nr_pages)
{
- for (; nr_pages--; ++pfn)
- __free_page(pfn_to_page(pfn));
+ unsigned int count = 0;
+
+ for (; nr_pages--; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ count += page_count(page) != 1;
+ __free_page(page);
+ }
+ WARN(count != 0, "%d pages are still in use!\n", count);
}
#endif