From ca371c0d7e23d0d0afae65fc83a0e91cf7399573 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Fri, 12 Jun 2009 10:33:53 +0300 Subject: memcg: fix page_cgroup fatal error in FLATMEM Now, SLAB is configured in very early stage and it can be used in init routine now. But replacing alloc_bootmem() in FLAT/DISCONTIGMEM's page_cgroup() initialization breaks the allocation, now. (Works well in SPARSEMEM case...it supports MEMORY_HOTPLUG and size of page_cgroup is in reasonable size (< 1 << MAX_ORDER.) This patch revive FLATMEM+memory cgroup by using alloc_bootmem. In future, We stop to support FLATMEM (if no users) or rewrite codes for flatmem completely.But this will adds more messy codes and overheads. Reported-by: Li Zefan Tested-by: Li Zefan Tested-by: Ingo Molnar Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Pekka Enberg --- mm/page_cgroup.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) (limited to 'mm') diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 3dd4a909a1de..11a8a10a3909 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid) struct page_cgroup *base, *pc; unsigned long table_size; unsigned long start_pfn, nr_pages, index; - struct page *page; - unsigned int order; start_pfn = NODE_DATA(nid)->node_start_pfn; nr_pages = NODE_DATA(nid)->node_spanned_pages; @@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid) return 0; table_size = sizeof(struct page_cgroup) * nr_pages; - order = get_order(table_size); - page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); - if (!page) - page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); - if (!page) + + base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), + table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); + if (!base) return -ENOMEM; - base = page_address(page); for (index = 0; index < nr_pages; index++) { pc = base + index; __init_page_cgroup(pc, start_pfn + index); @@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid) return 0; } -void __init page_cgroup_init(void) +void __init page_cgroup_init_flatmem(void) { int nid, fail; @@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn) if (!section->page_cgroup) { nid = page_to_nid(pfn_to_page(pfn)); table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; - if (slab_is_available()) { - base = kmalloc_node(table_size, - GFP_KERNEL | __GFP_NOWARN, nid); - if (!base) - base = vmalloc_node(table_size, nid); - } else { - base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), - table_size, - PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); - } + VM_BUG_ON(!slab_is_available()); + base = kmalloc_node(table_size, + GFP_KERNEL | __GFP_NOWARN, nid); + if (!base) + base = vmalloc_node(table_size, nid); } else { /* * We don't have to allocate page_cgroup again, but -- cgit v1.2.3 From d93f82b6e0c12a4373f2d04b1f92fcb2d175b62c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:41 +0200 Subject: [S390] maccess: add weak attribute to probe_kernel_write probe_kernel_write() gets used to write to the kernel address space. E.g. to patch the kernel (kgdb, ftrace, kprobes...). Some architectures however enable write protection for the kernel text section, so that writes to this region would fault. This patch allows to specify an architecture specific version of probe_kernel_write() which allows to handle and bypass write protection of the text segment. That way it is still possible to catch random writes to kernel text and explicitly allow writes via this interface. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- mm/maccess.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/maccess.c b/mm/maccess.c index ac40796cfb15..9073695ff25f 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ -long probe_kernel_write(void *dst, void *src, size_t size) +long notrace __weak probe_kernel_write(void *dst, void *src, size_t size) { long ret; mm_segment_t old_fs = get_fs(); -- cgit v1.2.3 From eb91f1d0a531289e18f5587dc197d12a251c66a3 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 12 Jun 2009 14:56:09 +0300 Subject: slab: fix gfp flag in setup_cpu_cache() Fixes the following warning during bootup when compiling with CONFIG_SLAB: [ 0.000000] ------------[ cut here ]------------ [ 0.000000] WARNING: at kernel/lockdep.c:2282 lockdep_trace_alloc+0x91/0xb9() [ 0.000000] Hardware name: [ 0.000000] Modules linked in: [ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.30 #491 [ 0.000000] Call Trace: [ 0.000000] [] ? lockdep_trace_alloc+0x91/0xb9 [ 0.000000] [] warn_slowpath_common+0x7c/0xa9 [ 0.000000] [] warn_slowpath_null+0x14/0x16 [ 0.000000] [] lockdep_trace_alloc+0x91/0xb9 [ 0.000000] [] kmem_cache_alloc_node_notrace+0x26/0xdf [ 0.000000] [] ? setup_cpu_cache+0x7e/0x210 [ 0.000000] [] setup_cpu_cache+0x113/0x210 [ 0.000000] [] kmem_cache_create+0x409/0x486 [ 0.000000] [] kmem_cache_init+0x232/0x593 [ 0.000000] [] ? mem_init+0x156/0x161 [ 0.000000] [] start_kernel+0x1cc/0x3b9 [ 0.000000] [] x86_64_start_reservations+0xaa/0xae [ 0.000000] [] x86_64_start_kernel+0xe1/0xe8 [ 0.000000] ---[ end trace 4eaa2a86a8e2da22 ]--- Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index f46b65d124e5..cd76964b53bc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2102,7 +2102,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) for_each_online_node(node) { cachep->nodelists[node] = kmalloc_node(sizeof(struct kmem_list3), - GFP_KERNEL, node); + gfp, node); BUG_ON(!cachep->nodelists[node]); kmem_list3_init(cachep->nodelists[node]); } -- cgit v1.2.3 From 7e85ee0c1d15ca5f8bff0f514f158eba1742dd87 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 12 Jun 2009 14:03:06 +0300 Subject: slab,slub: don't enable interrupts during early boot As explained by Benjamin Herrenschmidt: Oh and btw, your patch alone doesn't fix powerpc, because it's missing a whole bunch of GFP_KERNEL's in the arch code... You would have to grep the entire kernel for things that check slab_is_available() and even then you'll be missing some. For example, slab_is_available() didn't always exist, and so in the early days on powerpc, we used a mem_init_done global that is set form mem_init() (not perfect but works in practice). And we still have code using that to do the test. Therefore, mask out __GFP_WAIT, __GFP_IO, and __GFP_FS in the slab allocators in early boot code to avoid enabling interrupts. Signed-off-by: Pekka Enberg --- include/linux/gfp.h | 3 +++ include/linux/slab.h | 2 ++ include/linux/slob_def.h | 5 +++++ include/linux/slub_def.h | 2 ++ init/main.c | 1 + mm/slab.c | 18 ++++++++++++++++++ mm/slub.c | 16 ++++++++++++++++ 7 files changed, 47 insertions(+) (limited to 'mm') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0bbc15f54536..3760e7c5de02 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -85,6 +85,9 @@ struct vm_area_struct; __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ __GFP_NORETRY|__GFP_NOMEMALLOC) +/* Control slab gfp mask during early boot */ +#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) + /* Control allocation constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) diff --git a/include/linux/slab.h b/include/linux/slab.h index 48803064cedf..219b8fb4651d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -319,4 +319,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } +void __init kmem_cache_init_late(void); + #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..bb5368df4be8 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -34,4 +34,9 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags) return kmalloc(size, flags); } +static inline void kmem_cache_init_late(void) +{ + /* Nothing to do */ +} + #endif /* __LINUX_SLOB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index be5d40c43bd2..4dcbc2c71491 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -302,4 +302,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) } #endif +void __init kmem_cache_init_late(void); + #endif /* _LINUX_SLUB_DEF_H */ diff --git a/init/main.c b/init/main.c index b3e8f14c568a..f6204f712e7c 100644 --- a/init/main.c +++ b/init/main.c @@ -640,6 +640,7 @@ asmlinkage void __init start_kernel(void) "enabled early\n"); early_boot_irqs_on(); local_irq_enable(); + kmem_cache_init_late(); /* * HACK ALERT! This is early. We're enabling the console before diff --git a/mm/slab.c b/mm/slab.c index cd76964b53bc..453efcb1c980 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -303,6 +303,12 @@ struct kmem_list3 { int free_touched; /* updated without locking */ }; +/* + * The slab allocator is initialized with interrupts disabled. Therefore, make + * sure early boot allocations don't accidentally enable interrupts. + */ +static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; + /* * Need this for bootstrapping a per node allocator. */ @@ -1654,6 +1660,14 @@ void __init kmem_cache_init(void) */ } +void __init kmem_cache_init_late(void) +{ + /* + * Interrupts are enabled now so all GFP allocations are safe. + */ + slab_gfp_mask = __GFP_BITS_MASK; +} + static int __init cpucache_init(void) { int cpu; @@ -3354,6 +3368,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long save_flags; void *ptr; + flags &= slab_gfp_mask; + lockdep_trace_alloc(flags); if (slab_should_failslab(cachep, flags)) @@ -3434,6 +3450,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) unsigned long save_flags; void *objp; + flags &= slab_gfp_mask; + lockdep_trace_alloc(flags); if (slab_should_failslab(cachep, flags)) diff --git a/mm/slub.c b/mm/slub.c index 3964d3ce4c15..30354bfeb43d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -178,6 +178,12 @@ static enum { SYSFS /* Sysfs up */ } slab_state = DOWN; +/* + * The slab allocator is initialized with interrupts disabled. Therefore, make + * sure early boot allocations don't accidentally enable interrupts. + */ +static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; + /* A list of all slab caches on the system */ static DECLARE_RWSEM(slub_lock); static LIST_HEAD(slab_caches); @@ -1595,6 +1601,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, unsigned long flags; unsigned int objsize; + gfpflags &= slab_gfp_mask; + lockdep_trace_alloc(gfpflags); might_sleep_if(gfpflags & __GFP_WAIT); @@ -3104,6 +3112,14 @@ void __init kmem_cache_init(void) nr_cpu_ids, nr_node_ids); } +void __init kmem_cache_init_late(void) +{ + /* + * Interrupts are enabled now so all GFP allocations are safe. + */ + slab_gfp_mask = __GFP_BITS_MASK; +} + /* * Find a mergeable slab cache */ -- cgit v1.2.3 From 8429db5c6336083594036c30f49401405d536911 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 12 Jun 2009 15:58:59 +0300 Subject: slab: setup cpu caches later on when interrupts are enabled Fixes the following boot-time warning: [ 0.000000] ------------[ cut here ]------------ [ 0.000000] WARNING: at kernel/smp.c:369 smp_call_function_many+0x56/0x1bc() [ 0.000000] Hardware name: [ 0.000000] Modules linked in: [ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.30 #492 [ 0.000000] Call Trace: [ 0.000000] [] ? _spin_unlock+0x4f/0x5c [ 0.000000] [] ? smp_call_function_many+0x56/0x1bc [ 0.000000] [] warn_slowpath_common+0x7c/0xa9 [ 0.000000] [] warn_slowpath_null+0x14/0x16 [ 0.000000] [] smp_call_function_many+0x56/0x1bc [ 0.000000] [] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [] smp_call_function+0x3d/0x68 [ 0.000000] [] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [] on_each_cpu+0x31/0x7c [ 0.000000] [] do_tune_cpucache+0x119/0x454 [ 0.000000] [] ? lockdep_init_map+0x94/0x10b [ 0.000000] [] ? kmem_cache_init+0x421/0x593 [ 0.000000] [] enable_cpucache+0x68/0xad [ 0.000000] [] kmem_cache_init+0x434/0x593 [ 0.000000] [] ? mem_init+0x156/0x161 [ 0.000000] [] start_kernel+0x1cc/0x3b9 [ 0.000000] [] x86_64_start_reservations+0xaa/0xae [ 0.000000] [] x86_64_start_kernel+0xe1/0xe8 [ 0.000000] ---[ end trace 4eaa2a86a8e2da22 ]--- Cc: Christoph Lameter Cc: Nick Piggin Signed-off-by: Pekka Enberg --- mm/slab.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 453efcb1c980..18e3164de09a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -759,6 +759,7 @@ static enum { NONE, PARTIAL_AC, PARTIAL_L3, + EARLY, FULL } g_cpucache_up; @@ -767,7 +768,7 @@ static enum { */ int slab_is_available(void) { - return g_cpucache_up == FULL; + return g_cpucache_up >= EARLY; } static DEFINE_PER_CPU(struct delayed_work, reap_work); @@ -1631,19 +1632,27 @@ void __init kmem_cache_init(void) } } - /* 6) resize the head arrays to their final sizes */ - { - struct kmem_cache *cachep; - mutex_lock(&cache_chain_mutex); - list_for_each_entry(cachep, &cache_chain, next) - if (enable_cpucache(cachep, GFP_NOWAIT)) - BUG(); - mutex_unlock(&cache_chain_mutex); - } + g_cpucache_up = EARLY; /* Annotate slab for lockdep -- annotate the malloc caches */ init_lock_keys(); +} + +void __init kmem_cache_init_late(void) +{ + struct kmem_cache *cachep; + + /* + * Interrupts are enabled now so all GFP allocations are safe. + */ + slab_gfp_mask = __GFP_BITS_MASK; + /* 6) resize the head arrays to their final sizes */ + mutex_lock(&cache_chain_mutex); + list_for_each_entry(cachep, &cache_chain, next) + if (enable_cpucache(cachep, GFP_NOWAIT)) + BUG(); + mutex_unlock(&cache_chain_mutex); /* Done! */ g_cpucache_up = FULL; @@ -1660,14 +1669,6 @@ void __init kmem_cache_init(void) */ } -void __init kmem_cache_init_late(void) -{ - /* - * Interrupts are enabled now so all GFP allocations are safe. - */ - slab_gfp_mask = __GFP_BITS_MASK; -} - static int __init cpucache_init(void) { int cpu; -- cgit v1.2.3 From c6f37f12197ac3bd2e5a35f2f0e195ae63d437de Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 24 May 2009 22:16:31 +0200 Subject: PM/Suspend: Do not shrink memory before suspend Remove the shrinking of memory from the suspend-to-RAM code, where it is not really necessary. Signed-off-by: Rafael J. Wysocki Acked-by: Nigel Cunningham Acked-by: Wu Fengguang --- kernel/power/main.c | 20 +------------------- mm/vmscan.c | 4 ++-- 2 files changed, 3 insertions(+), 21 deletions(-) (limited to 'mm') diff --git a/kernel/power/main.c b/kernel/power/main.c index 46386b9f8dd1..2a19f347bd8a 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -188,9 +188,6 @@ static void suspend_test_finish(const char *label) #endif -/* This is just an arbitrary number */ -#define FREE_PAGE_NUMBER (100) - static struct platform_suspend_ops *suspend_ops; /** @@ -226,7 +223,6 @@ int suspend_valid_only_mem(suspend_state_t state) static int suspend_prepare(void) { int error; - unsigned int free_pages; if (!suspend_ops || !suspend_ops->enter) return -EPERM; @@ -241,24 +237,10 @@ static int suspend_prepare(void) if (error) goto Finish; - if (suspend_freeze_processes()) { - error = -EAGAIN; - goto Thaw; - } - - free_pages = global_page_state(NR_FREE_PAGES); - if (free_pages < FREE_PAGE_NUMBER) { - pr_debug("PM: free some memory\n"); - shrink_all_memory(FREE_PAGE_NUMBER - free_pages); - if (nr_free_pages() < FREE_PAGE_NUMBER) { - error = -ENOMEM; - printk(KERN_ERR "PM: No enough memory\n"); - } - } + error = suspend_freeze_processes(); if (!error) return 0; - Thaw: suspend_thaw_processes(); usermodehelper_enable(); Finish: diff --git a/mm/vmscan.c b/mm/vmscan.c index d254306562cd..95c08a8cc2ba 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2056,7 +2056,7 @@ unsigned long global_lru_pages(void) + global_page_state(NR_INACTIVE_FILE); } -#ifdef CONFIG_PM +#ifdef CONFIG_HIBERNATION /* * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages * from LRU lists system-wide, for given pass and priority. @@ -2196,7 +2196,7 @@ out: return sc.nr_reclaimed; } -#endif +#endif /* CONFIG_HIBERNATION */ /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes -- cgit v1.2.3