summaryrefslogtreecommitdiff
path: root/kernel/module.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2013-07-03 10:06:28 +0930
committerRusty Russell <rusty@rustcorp.com.au>2013-07-03 10:15:09 +0930
commit8d8022e8aba85192e937f1f0f7450e256d66ae5c (patch)
tree14dfe2cb950815bfcd602cdbd376bb8694c98d3d /kernel/module.c
parent54041d8a73337411b485ff76957fb106cb5d40d0 (diff)
downloadlwn-8d8022e8aba85192e937f1f0f7450e256d66ae5c.tar.gz
lwn-8d8022e8aba85192e937f1f0f7450e256d66ae5c.zip
module: do percpu allocation after uniqueness check. No, really!
v3.8-rc1-5-g1fb9341 was supposed to stop parallel kvm loads exhausting percpu memory on large machines: Now we have a new state MODULE_STATE_UNFORMED, we can insert the module into the list (and thus guarantee its uniqueness) before we allocate the per-cpu region. In my defence, it didn't actually say the patch did this. Just that we "can". This patch actually *does* it. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Tested-by: Jim Hull <jim.hull@hp.com> Cc: stable@kernel.org # 3.8
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 5184877ce98a..d1a161be7b04 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2940,7 +2940,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
{
/* Module within temporary copy. */
struct module *mod;
- Elf_Shdr *pcpusec;
int err;
mod = setup_load_info(info, flags);
@@ -2955,17 +2954,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
err = module_frob_arch_sections(info->hdr, info->sechdrs,
info->secstrings, mod);
if (err < 0)
- goto out;
+ return ERR_PTR(err);
- pcpusec = &info->sechdrs[info->index.pcpu];
- if (pcpusec->sh_size) {
- /* We have a special allocation for this section. */
- err = percpu_modalloc(mod,
- pcpusec->sh_size, pcpusec->sh_addralign);
- if (err)
- goto out;
- pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
- }
+ /* We will do a special allocation for per-cpu sections later. */
+ info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
/* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically; there doesn't appear to be any
@@ -2976,17 +2968,22 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
/* Allocate and move to the final place */
err = move_module(mod, info);
if (err)
- goto free_percpu;
+ return ERR_PTR(err);
/* Module has been copied to its final place now: return it. */
mod = (void *)info->sechdrs[info->index.mod].sh_addr;
kmemleak_load_module(mod, info);
return mod;
+}
-free_percpu:
- percpu_modfree(mod);
-out:
- return ERR_PTR(err);
+static int alloc_module_percpu(struct module *mod, struct load_info *info)
+{
+ Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
+ if (!pcpusec->sh_size)
+ return 0;
+
+ /* We have a special allocation for this section. */
+ return percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign);
}
/* mod is no longer valid after this! */
@@ -3262,6 +3259,11 @@ static int load_module(struct load_info *info, const char __user *uargs,
}
#endif
+ /* To avoid stressing percpu allocator, do this once we're unique. */
+ err = alloc_module_percpu(mod, info);
+ if (err)
+ goto unlink_mod;
+
/* Now module is in final location, initialize linked lists, etc. */
err = module_unload_init(mod);
if (err)