summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2017-11-17 18:22:24 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2018-10-09 11:21:23 +0200
commit793213a82de4ccc96f394ea5deaaf57c0bb01f0b (patch)
tree49afe68d1ca1ed84868329f647fee2483f3ebb62 /arch/s390
parent0dac8f6bc3699f28d807ad3a5ec575e18da8ba62 (diff)
downloadlwn-793213a82de4ccc96f394ea5deaaf57c0bb01f0b.tar.gz
lwn-793213a82de4ccc96f394ea5deaaf57c0bb01f0b.zip
s390/kasan: dynamic shadow mem allocation for modules
Move from modules area entire shadow memory preallocation to dynamic allocation per module load. This behaivior has been introduced for x86 with bebf56a1b: "This patch also forces module_alloc() to return 8*PAGE_SIZE aligned address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() ) more simple. Such alignment guarantees that each shadow page backing modules address space correspond to only one module_alloc() allocation" Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kernel/module.c15
-rw-r--r--arch/s390/mm/kasan_init.c11
2 files changed, 14 insertions, 12 deletions
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index d298d3cb46d0..31889db609e9 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
#include <asm/alternative.h>
@@ -32,12 +33,18 @@
void *module_alloc(unsigned long size)
{
+ void *p;
+
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC,
- 0, NUMA_NO_NODE,
- __builtin_return_address(0));
+ p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ if (p && (kasan_module_alloc(p, size) < 0)) {
+ vfree(p);
+ return NULL;
+ }
+ return p;
}
void module_arch_freeing_init(struct module *mod)
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index b888cbbbcf0d..714ac41e3ee5 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -214,8 +214,6 @@ void __init kasan_early_init(void)
memsize = min(max_physmem_end, KASAN_SHADOW_START);
shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
- if (IS_ENABLED(CONFIG_MODULES))
- shadow_alloc_size += MODULES_LEN >> KASAN_SHADOW_SCALE_SHIFT;
pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
initrd_end =
@@ -239,18 +237,15 @@ void __init kasan_early_init(void)
* +- shadow end -+ | mapping |
* | ... gap ... |\ | (untracked) |
* +- modules vaddr -+ \ +----------------+
- * | 2Gb | \| 256Mb |
+ * | 2Gb | \| unmapped | allocated per module
* +-----------------+ +- shadow end ---+
*/
/* populate identity mapping */
kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
- /* populate kasan shadow (for identity mapping / modules / zero page) */
+ /* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
- if (IS_ENABLED(CONFIG_MODULES)) {
+ if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = vmax - MODULES_LEN;
- kasan_early_vmemmap_populate(__sha(untracked_mem_end),
- __sha(vmax), POPULATE_MAP);
- }
kasan_early_vmemmap_populate(__sha(memsize), __sha(untracked_mem_end),
POPULATE_ZERO_SHADOW);
kasan_set_pgd(early_pg_dir, asce_type);