summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/head64.c
diff options
context:
space:
mode:
authorAlexander Popov <alpopov@ptsecurity.com>2015-07-02 12:09:34 +0300
committerIngo Molnar <mingo@kernel.org>2015-07-06 14:53:13 +0200
commit5d5aa3cfca5cf74cd928daf3674642e6004328d1 (patch)
tree42d53dc7979c3c9141cb7aac20107b9c626ee665 /arch/x86/kernel/head64.c
parentd0f77d4d04b222a817925d33ba3589b190bfa863 (diff)
downloadlwn-5d5aa3cfca5cf74cd928daf3674642e6004328d1.tar.gz
lwn-5d5aa3cfca5cf74cd928daf3674642e6004328d1.zip
x86/kasan: Fix KASAN shadow region page tables
Currently KASAN shadow region page tables created without respect of physical offset (phys_base). This causes kernel halt when phys_base is not zero. So let's initialize KASAN shadow region page tables in kasan_early_init() using __pa_nodebug() which considers phys_base. This patch also separates x86_64_start_kernel() from KASAN low level details by moving kasan_map_early_shadow(init_level4_pgt) into kasan_early_init(). Remove the comment before clear_bss() which stopped bringing much profit to the code readability. Otherwise describing all the new order dependencies would be too verbose. Signed-off-by: Alexander Popov <alpopov@ptsecurity.com> Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: <stable@vger.kernel.org> # 4.0+ Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1435828178-10975-3-git-send-email-a.ryabinin@samsung.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/head64.c')
-rw-r--r--arch/x86/kernel/head64.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 65c8985e3eb2..f129a9af6357 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,13 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
/* Kill off the identity-map trampoline */
reset_early_page_tables();
- kasan_map_early_shadow(early_level4_pgt);
-
- /* clear bss before set_intr_gate with early_idt_handler */
clear_bss();
clear_page(init_level4_pgt);
+ kasan_early_init();
+
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr);
@@ -182,8 +181,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
/* set init_level4_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511];
- kasan_map_early_shadow(init_level4_pgt);
-
x86_64_start_reservations(real_mode_data);
}