From 3b1a4a8640876a966ab68ab4f561642e19674671 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:00:14 -0800 Subject: kasan: group vmalloc code This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. Group all vmalloc-related function declarations in include/linux/kasan.h, and their implementations in mm/kasan/common.c. No functional changes. Link: https://lkml.kernel.org/r/80a6fdd29b039962843bd6cf22ce2643a7c8904e.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Reviewed-by: Marco Elver Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 30d343b4a40a..59538e795df4 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -75,19 +75,6 @@ struct kasan_cache { int free_meta_offset; }; -/* - * These functions provide a special case to support backing module - * allocations with real shadow memory. With KASAN vmalloc, the special - * case is unnecessary, as the work is handled in the generic case. - */ -#ifndef CONFIG_KASAN_VMALLOC -int kasan_module_alloc(void *addr, size_t size); -void kasan_free_shadow(const struct vm_struct *vm); -#else -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_free_shadow(const struct vm_struct *vm) {} -#endif - int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); @@ -156,9 +143,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_free_shadow(const struct vm_struct *vm) {} - static inline int kasan_add_zero_shadow(void *start, unsigned long size) { return 0; @@ -211,13 +195,16 @@ static inline void *kasan_reset_tag(const void *addr) #endif /* CONFIG_KASAN_SW_TAGS */ #ifdef CONFIG_KASAN_VMALLOC + int kasan_populate_vmalloc(unsigned long addr, unsigned long size); void kasan_poison_vmalloc(const void *start, unsigned long size); void kasan_unpoison_vmalloc(const void *start, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -#else + +#else /* CONFIG_KASAN_VMALLOC */ + static inline int kasan_populate_vmalloc(unsigned long start, unsigned long size) { @@ -232,7 +219,25 @@ static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end) {} -#endif + +#endif /* CONFIG_KASAN_VMALLOC */ + +#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) + +/* + * These functions provide a special case to support backing module + * allocations with real shadow memory. With KASAN vmalloc, the special + * case is unnecessary, as the work is handled in the generic case. + */ +int kasan_module_alloc(void *addr, size_t size); +void kasan_free_shadow(const struct vm_struct *vm); + +#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ + +static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } +static inline void kasan_free_shadow(const struct vm_struct *vm) {} + +#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ #ifdef CONFIG_KASAN_INLINE void kasan_non_canonical_hook(unsigned long addr); -- cgit v1.2.3 From d5750edf6da759576f91ec2b57d5553985815b40 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:00:17 -0800 Subject: kasan: shadow declarations only for software modes This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. Group shadow-related KASAN function declarations and only define them for the two existing software modes. No functional changes for software modes. Link: https://lkml.kernel.org/r/35126.1606402815@turing-police Link: https://lore.kernel.org/linux-arm-kernel/24105.1606397102@turing-police/ Link: https://lkml.kernel.org/r/e88d94eff94db883a65dca52e1736d80d28dd9bc.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Signed-off-by: Valdis Kletnieks Reviewed-by: Marco Elver Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon [valdis.kletnieks@vt.edu: fix build issue with asmlinkage] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 48 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 59538e795df4..7828436a3a99 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -11,7 +11,7 @@ struct task_struct; #ifdef CONFIG_KASAN -#include +#include #include /* kasan_data struct is used in KUnit tests for KASAN expected failures */ @@ -20,6 +20,20 @@ struct kunit_kasan_expectation { bool report_found; }; +#endif + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + +#include + +/* Software KASAN implementations use shadow memory. */ + +#ifdef CONFIG_KASAN_SW_TAGS +#define KASAN_SHADOW_INIT 0xFF +#else +#define KASAN_SHADOW_INIT 0 +#endif + extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; @@ -35,6 +49,23 @@ static inline void *kasan_mem_to_shadow(const void *addr) + KASAN_SHADOW_OFFSET; } +int kasan_add_zero_shadow(void *start, unsigned long size); +void kasan_remove_zero_shadow(void *start, unsigned long size); + +#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +static inline int kasan_add_zero_shadow(void *start, unsigned long size) +{ + return 0; +} +static inline void kasan_remove_zero_shadow(void *start, + unsigned long size) +{} + +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +#ifdef CONFIG_KASAN + /* Enable reporting bugs after kasan_disable_current() */ extern void kasan_enable_current(void); @@ -75,9 +106,6 @@ struct kasan_cache { int free_meta_offset; }; -int kasan_add_zero_shadow(void *start, unsigned long size); -void kasan_remove_zero_shadow(void *start, unsigned long size); - size_t __ksize(const void *); static inline void kasan_unpoison_slab(const void *ptr) { @@ -143,14 +171,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } -static inline int kasan_add_zero_shadow(void *start, unsigned long size) -{ - return 0; -} -static inline void kasan_remove_zero_shadow(void *start, - unsigned long size) -{} - static inline void kasan_unpoison_slab(const void *ptr) { } static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } @@ -158,8 +178,6 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #ifdef CONFIG_KASAN_GENERIC -#define KASAN_SHADOW_INIT 0 - void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_record_aux_stack(void *ptr); @@ -174,8 +192,6 @@ static inline void kasan_record_aux_stack(void *ptr) {} #ifdef CONFIG_KASAN_SW_TAGS -#define KASAN_SHADOW_INIT 0xFF - void kasan_init_tags(void); void *kasan_reset_tag(const void *addr); -- cgit v1.2.3 From cebd0eb29acdfc2f5e44e5f356ffcd0c44f16b4a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:00:21 -0800 Subject: kasan: rename (un)poison_shadow to (un)poison_range This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. The new mode won't be using shadow memory. Rename external annotation kasan_unpoison_shadow() to kasan_unpoison_range(), and introduce internal functions (un)poison_range() (without kasan_ prefix). Co-developed-by: Marco Elver Link: https://lkml.kernel.org/r/fccdcaa13dc6b2211bf363d6c6d499279a54fe3a.1606161801.git.andreyknvl@google.com Signed-off-by: Marco Elver Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 6 +++--- kernel/fork.c | 4 ++-- mm/kasan/common.c | 49 +++++++++++++++++++++++++++---------------------- mm/kasan/generic.c | 23 +++++++++++------------ mm/kasan/kasan.h | 3 ++- mm/kasan/tags.c | 2 +- mm/slab_common.c | 2 +- 7 files changed, 47 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 7828436a3a99..9740c06a04a1 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -72,7 +72,7 @@ extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ extern void kasan_disable_current(void); -void kasan_unpoison_shadow(const void *address, size_t size); +void kasan_unpoison_range(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); @@ -109,7 +109,7 @@ struct kasan_cache { size_t __ksize(const void *); static inline void kasan_unpoison_slab(const void *ptr) { - kasan_unpoison_shadow(ptr, __ksize(ptr)); + kasan_unpoison_range(ptr, __ksize(ptr)); } size_t kasan_metadata_size(struct kmem_cache *cache); @@ -118,7 +118,7 @@ void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ -static inline void kasan_unpoison_shadow(const void *address, size_t size) {} +static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} diff --git a/kernel/fork.c b/kernel/fork.c index 41906a52a764..37720a6d04ea 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -225,8 +225,8 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) if (!s) continue; - /* Clear the KASAN shadow of the stack. */ - kasan_unpoison_shadow(s->addr, THREAD_SIZE); + /* Mark stack accessible for KASAN. */ + kasan_unpoison_range(s->addr, THREAD_SIZE); /* Clear stale pointers from reused stack. */ memset(s->addr, 0, THREAD_SIZE); diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 89e5ef9417a7..73e79a34671b 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -108,7 +108,7 @@ void *memcpy(void *dest, const void *src, size_t len) * Poisons the shadow memory for 'size' bytes starting from 'addr'. * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. */ -void kasan_poison_shadow(const void *address, size_t size, u8 value) +void poison_range(const void *address, size_t size, u8 value) { void *shadow_start, *shadow_end; @@ -125,7 +125,7 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value) __memset(shadow_start, value, shadow_end - shadow_start); } -void kasan_unpoison_shadow(const void *address, size_t size) +void unpoison_range(const void *address, size_t size) { u8 tag = get_tag(address); @@ -136,7 +136,7 @@ void kasan_unpoison_shadow(const void *address, size_t size) */ address = reset_tag(address); - kasan_poison_shadow(address, size, tag); + poison_range(address, size, tag); if (size & KASAN_SHADOW_MASK) { u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); @@ -148,12 +148,17 @@ void kasan_unpoison_shadow(const void *address, size_t size) } } +void kasan_unpoison_range(const void *address, size_t size) +{ + unpoison_range(address, size); +} + static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) { void *base = task_stack_page(task); size_t size = sp - base; - kasan_unpoison_shadow(base, size); + unpoison_range(base, size); } /* Unpoison the entire stack for a task. */ @@ -172,7 +177,7 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) */ void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); - kasan_unpoison_shadow(base, watermark - base); + unpoison_range(base, watermark - base); } void kasan_alloc_pages(struct page *page, unsigned int order) @@ -186,13 +191,13 @@ void kasan_alloc_pages(struct page *page, unsigned int order) tag = random_tag(); for (i = 0; i < (1 << order); i++) page_kasan_tag_set(page + i, tag); - kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); + unpoison_range(page_address(page), PAGE_SIZE << order); } void kasan_free_pages(struct page *page, unsigned int order) { if (likely(!PageHighMem(page))) - kasan_poison_shadow(page_address(page), + poison_range(page_address(page), PAGE_SIZE << order, KASAN_FREE_PAGE); } @@ -284,18 +289,18 @@ void kasan_poison_slab(struct page *page) for (i = 0; i < compound_nr(page); i++) page_kasan_tag_reset(page + i); - kasan_poison_shadow(page_address(page), page_size(page), - KASAN_KMALLOC_REDZONE); + poison_range(page_address(page), page_size(page), + KASAN_KMALLOC_REDZONE); } void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) { - kasan_unpoison_shadow(object, cache->object_size); + unpoison_range(object, cache->object_size); } void kasan_poison_object_data(struct kmem_cache *cache, void *object) { - kasan_poison_shadow(object, + poison_range(object, round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), KASAN_KMALLOC_REDZONE); } @@ -408,7 +413,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object, } rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); - kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); + poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE); if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || unlikely(!(cache->flags & SLAB_KASAN))) @@ -448,9 +453,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, tag = assign_tag(cache, object, false, keep_tag); /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ - kasan_unpoison_shadow(set_tag(object, tag), size); - kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, - KASAN_KMALLOC_REDZONE); + unpoison_range(set_tag(object, tag), size); + poison_range((void *)redzone_start, redzone_end - redzone_start, + KASAN_KMALLOC_REDZONE); if (cache->flags & SLAB_KASAN) kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags); @@ -489,9 +494,9 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, KASAN_SHADOW_SCALE_SIZE); redzone_end = (unsigned long)ptr + page_size(page); - kasan_unpoison_shadow(ptr, size); - kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, - KASAN_PAGE_REDZONE); + unpoison_range(ptr, size); + poison_range((void *)redzone_start, redzone_end - redzone_start, + KASAN_PAGE_REDZONE); return (void *)ptr; } @@ -523,7 +528,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip) kasan_report_invalid_free(ptr, ip); return; } - kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE); + poison_range(ptr, page_size(page), KASAN_FREE_PAGE); } else { __kasan_slab_free(page->slab_cache, ptr, ip, false); } @@ -709,7 +714,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) * // vmalloc() allocates memory * // let a = area->addr * // we reach kasan_populate_vmalloc - * // and call kasan_unpoison_shadow: + * // and call unpoison_range: * STORE shadow(a), unpoison_val * ... * STORE shadow(a+99), unpoison_val x = LOAD p @@ -744,7 +749,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size) return; size = round_up(size, KASAN_SHADOW_SCALE_SIZE); - kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID); + poison_range(start, size, KASAN_VMALLOC_INVALID); } void kasan_unpoison_vmalloc(const void *start, unsigned long size) @@ -752,7 +757,7 @@ void kasan_unpoison_vmalloc(const void *start, unsigned long size) if (!is_vmalloc_or_module_addr(start)) return; - kasan_unpoison_shadow(start, size); + unpoison_range(start, size); } static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index d341859a1b95..9fe44f9b3b30 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -202,11 +202,11 @@ static void register_global(struct kasan_global *global) { size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); - kasan_unpoison_shadow(global->beg, global->size); + unpoison_range(global->beg, global->size); - kasan_poison_shadow(global->beg + aligned_size, - global->size_with_redzone - aligned_size, - KASAN_GLOBAL_REDZONE); + poison_range(global->beg + aligned_size, + global->size_with_redzone - aligned_size, + KASAN_GLOBAL_REDZONE); } void __asan_register_globals(struct kasan_global *globals, size_t size) @@ -285,13 +285,12 @@ void __asan_alloca_poison(unsigned long addr, size_t size) WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); - kasan_unpoison_shadow((const void *)(addr + rounded_down_size), - size - rounded_down_size); - kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, - KASAN_ALLOCA_LEFT); - kasan_poison_shadow(right_redzone, - padding_size + KASAN_ALLOCA_REDZONE_SIZE, - KASAN_ALLOCA_RIGHT); + unpoison_range((const void *)(addr + rounded_down_size), + size - rounded_down_size); + poison_range(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, + KASAN_ALLOCA_LEFT); + poison_range(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE, + KASAN_ALLOCA_RIGHT); } EXPORT_SYMBOL(__asan_alloca_poison); @@ -301,7 +300,7 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) if (unlikely(!stack_top || stack_top > stack_bottom)) return; - kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); + unpoison_range(stack_top, stack_bottom - stack_top); } EXPORT_SYMBOL(__asan_allocas_unpoison); diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index ac499456740f..42ab02c61331 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -150,7 +150,8 @@ static inline bool addr_has_shadow(const void *addr) return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); } -void kasan_poison_shadow(const void *address, size_t size, u8 value); +void poison_range(const void *address, size_t size, u8 value); +void unpoison_range(const void *address, size_t size); /** * check_memory_region - Check memory region, and report if invalid access. diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 5c8b08a25715..c0b3f327812b 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -153,7 +153,7 @@ EXPORT_SYMBOL(__hwasan_storeN_noabort); void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) { - kasan_poison_shadow((void *)addr, size, tag); + poison_range((void *)addr, size, tag); } EXPORT_SYMBOL(__hwasan_tag_memory); diff --git a/mm/slab_common.c b/mm/slab_common.c index 2f2b55c2798e..573fbacd9ef5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1176,7 +1176,7 @@ size_t ksize(const void *objp) * We assume that ksize callers could use whole allocated area, * so we need to unpoison this area. */ - kasan_unpoison_shadow(objp, size); + kasan_unpoison_range(objp, size); return size; } EXPORT_SYMBOL(ksize); -- cgit v1.2.3 From d73b49365ee65ac48074bdb5aa717bb4644dbbb7 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:00:56 -0800 Subject: kasan, arm64: only use kasan_depth for software modes This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. Hardware tag-based KASAN won't use kasan_depth. Only define and use it when one of the software KASAN modes are enabled. No functional changes for software modes. Link: https://lkml.kernel.org/r/e16f15aeda90bc7fb4dfc2e243a14b74cc5c8219.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Reviewed-by: Catalin Marinas Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/kasan_init.c | 11 ++++++++--- include/linux/kasan.h | 18 +++++++++--------- include/linux/sched.h | 2 +- init/init_task.c | 2 +- mm/kasan/common.c | 2 ++ mm/kasan/report.c | 2 ++ 6 files changed, 23 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index ffeb80d5aa8d..5172799f831f 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -273,17 +273,22 @@ static void __init kasan_init_shadow(void) cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); } +static void __init kasan_init_depth(void) +{ + init_task.kasan_depth = 0; +} + #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) */ static inline void __init kasan_init_shadow(void) { } +static inline void __init kasan_init_depth(void) { } + #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ void __init kasan_init(void) { kasan_init_shadow(); - - /* At this point kasan is fully initialized. Enable error messages */ - init_task.kasan_depth = 0; + kasan_init_depth(); pr_info("KernelAddressSanitizer initialized\n"); } diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9740c06a04a1..b272960e8396 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -52,6 +52,12 @@ static inline void *kasan_mem_to_shadow(const void *addr) int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); +/* Enable reporting bugs after kasan_disable_current() */ +extern void kasan_enable_current(void); + +/* Disable reporting bugs for current task */ +extern void kasan_disable_current(void); + #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ static inline int kasan_add_zero_shadow(void *start, unsigned long size) @@ -62,16 +68,13 @@ static inline void kasan_remove_zero_shadow(void *start, unsigned long size) {} +static inline void kasan_enable_current(void) {} +static inline void kasan_disable_current(void) {} + #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ #ifdef CONFIG_KASAN -/* Enable reporting bugs after kasan_disable_current() */ -extern void kasan_enable_current(void); - -/* Disable reporting bugs for current task */ -extern void kasan_disable_current(void); - void kasan_unpoison_range(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); @@ -122,9 +125,6 @@ static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} -static inline void kasan_enable_current(void) {} -static inline void kasan_disable_current(void) {} - static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} diff --git a/include/linux/sched.h b/include/linux/sched.h index 51d535b69bd6..6e3a5eeec509 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1234,7 +1234,7 @@ struct task_struct { u64 timer_slack_ns; u64 default_timer_slack_ns; -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) unsigned int kasan_depth; #endif diff --git a/init/init_task.c b/init/init_task.c index 15f6eb93a04f..8a992d73e6fb 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -176,7 +176,7 @@ struct task_struct init_task .numa_group = NULL, .numa_faults = NULL, #endif -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) .kasan_depth = 1, #endif #ifdef CONFIG_KCSAN diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 663ffa71cd20..d5f23b2f170a 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -46,6 +46,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags) track->stack = kasan_save_stack(flags); } +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) void kasan_enable_current(void) { current->kasan_depth++; @@ -55,6 +56,7 @@ void kasan_disable_current(void) { current->kasan_depth--; } +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ void kasan_unpoison_range(const void *address, size_t size) { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index d140d26cfb31..914ab5cfc3ea 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -292,8 +292,10 @@ static void print_shadow_for_address(const void *addr) static bool report_enabled(void) { +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) if (current->kasan_depth) return false; +#endif if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) return true; return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); -- cgit v1.2.3 From 60a3a5fe950f4e6c02e9fc6676dc96de043ed743 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:01:03 -0800 Subject: kasan, arm64: rename kasan_init_tags and mark as __init Rename kasan_init_tags() to kasan_init_sw_tags() as the upcoming hardware tag-based KASAN mode will have its own initialization routine. Also similarly to kasan_init() mark kasan_init_tags() as __init. Link: https://lkml.kernel.org/r/71e52af72a09f4b50c8042f16101c60e50649fbb.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Catalin Marinas Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/kernel/setup.c | 2 +- arch/arm64/mm/kasan_init.c | 2 +- include/linux/kasan.h | 4 ++-- mm/kasan/sw_tags.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index c44eb4b80163..c18aacde8bb0 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -358,7 +358,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) smp_build_mpidr_hash(); /* Init percpu seeds for random tags after cpus are set up. */ - kasan_init_tags(); + kasan_init_sw_tags(); #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index e35ce04beed1..d8e66c78440e 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -283,7 +283,7 @@ void __init kasan_init(void) kasan_init_shadow(); kasan_init_depth(); #if defined(CONFIG_KASAN_GENERIC) - /* CONFIG_KASAN_SW_TAGS also requires kasan_init_tags(). */ + /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */ pr_info("KernelAddressSanitizer initialized\n"); #endif } diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b272960e8396..d7042d129dc1 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -192,7 +192,7 @@ static inline void kasan_record_aux_stack(void *ptr) {} #ifdef CONFIG_KASAN_SW_TAGS -void kasan_init_tags(void); +void __init kasan_init_sw_tags(void); void *kasan_reset_tag(const void *addr); @@ -201,7 +201,7 @@ bool kasan_report(unsigned long addr, size_t size, #else /* CONFIG_KASAN_SW_TAGS */ -static inline void kasan_init_tags(void) { } +static inline void kasan_init_sw_tags(void) { } static inline void *kasan_reset_tag(const void *addr) { diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 9445cf4ccdc8..7317d5229b2b 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -35,7 +35,7 @@ static DEFINE_PER_CPU(u32, prng_state); -void kasan_init_tags(void) +void __init kasan_init_sw_tags(void) { int cpu; -- cgit v1.2.3 From 0fea6e9af889f1a4e072f5de999e07fe6859fc88 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:06 -0800 Subject: kasan, arm64: expand CONFIG_KASAN checks Some #ifdef CONFIG_KASAN checks are only relevant for software KASAN modes (either related to shadow memory or compiler instrumentation). Expand those into CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS. Link: https://lkml.kernel.org/r/e6971e432dbd72bb897ff14134ebb7e169bdcf0c.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Reviewed-by: Catalin Marinas Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/Kconfig | 2 +- arch/arm64/Makefile | 2 +- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/memory.h | 2 +- arch/arm64/include/asm/string.h | 5 +++-- arch/arm64/kernel/head.S | 2 +- arch/arm64/kernel/image-vars.h | 2 +- arch/arm64/kernel/kaslr.c | 3 ++- arch/arm64/kernel/module.c | 6 ++++-- arch/arm64/mm/ptdump.c | 6 +++--- include/linux/kasan-checks.h | 2 +- include/linux/kasan.h | 7 ++++--- include/linux/moduleloader.h | 3 ++- include/linux/string.h | 2 +- mm/ptdump.c | 13 ++++++++----- scripts/Makefile.lib | 2 ++ 16 files changed, 36 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a33718f8b62f..9386b108b132 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -334,7 +334,7 @@ config BROKEN_GAS_INST config KASAN_SHADOW_OFFSET hex - depends on KASAN + depends on KASAN_GENERIC || KASAN_SW_TAGS default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 05f46a60b245..6be9b3750250 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -137,7 +137,7 @@ head-y := arch/arm64/kernel/head.o ifeq ($(CONFIG_KASAN_SW_TAGS), y) KASAN_SHADOW_SCALE_SHIFT := 4 -else +else ifeq ($(CONFIG_KASAN_GENERIC), y) KASAN_SHADOW_SCALE_SHIFT := 3 endif diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ddbe6bf00e33..bf125c591116 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -473,7 +473,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #define NOKPROBE(x) #endif -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #define EXPORT_SYMBOL_NOKASAN(name) #else #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 3bc08e6cf82e..cd671fb6707c 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -72,7 +72,7 @@ * address space for the shadow region respectively. They can bloat the stack * significantly, so double the (minimum) stack size when they are in use. */ -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ + KASAN_SHADOW_OFFSET) diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index b31e8e87a0db..3a3264ff47b9 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -5,7 +5,7 @@ #ifndef __ASM_STRING_H #define __ASM_STRING_H -#ifndef CONFIG_KASAN +#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) #define __HAVE_ARCH_STRRCHR extern char *strrchr(const char *, int c); @@ -48,7 +48,8 @@ extern void *__memset(void *, int, __kernel_size_t); void memcpy_flushcache(void *dst, const void *src, size_t cnt); #endif -#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(__SANITIZE_ADDRESS__) /* * For files that are not instrumented (e.g. mm/slub.c) we diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 42b23ce679dc..a0dc987724ed 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) bl __pi_memset dsb ishst // Make zero page visible to PTW -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) bl kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 39289d75118d..f676243abac6 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp; __efistub_strrchr = __pi_strrchr; __efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc; -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) __efistub___memcpy = __pi_memcpy; __efistub___memmove = __pi_memmove; __efistub___memset = __pi_memset; diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 0921aa1520b0..1c74c45b9494 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -161,7 +161,8 @@ u64 __init kaslr_early_init(u64 dt_phys) /* use the top 16 bits to randomize the linear region */ memstart_offset_seed = seed >> 48; - if (IS_ENABLED(CONFIG_KASAN)) + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) /* * KASAN does not expect the module region to intersect the * vmalloc region, since shadow memory is allocated for each diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 2a1ad95d9b2c..fe21e0f06492 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -30,7 +30,8 @@ void *module_alloc(unsigned long size) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) gfp_mask |= __GFP_NOWARN; - if (IS_ENABLED(CONFIG_KASAN)) + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) /* don't exceed the static module region - see below */ module_alloc_end = MODULES_END; @@ -39,7 +40,8 @@ void *module_alloc(unsigned long size) NUMA_NO_NODE, __builtin_return_address(0)); if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && - !IS_ENABLED(CONFIG_KASAN)) + !IS_ENABLED(CONFIG_KASAN_GENERIC) && + !IS_ENABLED(CONFIG_KASAN_SW_TAGS)) /* * KASAN can only deal with module allocations being served * from the reserved module region, since the remainder of diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 807dc634bbd2..04137a8f3d2d 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -29,7 +29,7 @@ enum address_markers_idx { PAGE_OFFSET_NR = 0, PAGE_END_NR, -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) KASAN_START_NR, #endif }; @@ -37,7 +37,7 @@ enum address_markers_idx { static struct addr_marker address_markers[] = { { PAGE_OFFSET, "Linear Mapping start" }, { 0 /* PAGE_END */, "Linear Mapping end" }, -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { KASAN_SHADOW_END, "Kasan shadow end" }, #endif @@ -383,7 +383,7 @@ void ptdump_check_wx(void) static int ptdump_init(void) { address_markers[PAGE_END_NR].start_address = PAGE_END; -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; #endif ptdump_initialize(); diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index ac6aba632f2d..ca5e89fb10d3 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h @@ -9,7 +9,7 @@ * even in compilation units that selectively disable KASAN, but must use KASAN * to validate access to an address. Never use these in header files! */ -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) bool __kasan_check_read(const volatile void *p, unsigned int size); bool __kasan_check_write(const volatile void *p, unsigned int size); #else diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d7042d129dc1..b1381ee6922a 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -238,7 +238,8 @@ static inline void kasan_release_vmalloc(unsigned long start, #endif /* CONFIG_KASAN_VMALLOC */ -#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(CONFIG_KASAN_VMALLOC) /* * These functions provide a special case to support backing module @@ -248,12 +249,12 @@ static inline void kasan_release_vmalloc(unsigned long start, int kasan_module_alloc(void *addr, size_t size); void kasan_free_shadow(const struct vm_struct *vm); -#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ +#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} -#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ +#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ #ifdef CONFIG_KASAN_INLINE void kasan_non_canonical_hook(unsigned long addr); diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 4fa67a8b2265..9e09d11ffe5b 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -96,7 +96,8 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); -#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(CONFIG_KASAN_VMALLOC) #include #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) #else diff --git a/include/linux/string.h b/include/linux/string.h index 1cd63a8a23ab..4fcfb56abcf5 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -267,7 +267,7 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); diff --git a/mm/ptdump.c b/mm/ptdump.c index ba88ec43ff21..4354c1422d57 100644 --- a/mm/ptdump.c +++ b/mm/ptdump.c @@ -4,7 +4,7 @@ #include #include -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) /* * This is an optimization for KASAN=y case. Since all kasan page tables * eventually point to the kasan_early_shadow_page we could call note_page() @@ -31,7 +31,8 @@ static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr, struct ptdump_state *st = walk->private; pgd_t val = READ_ONCE(*pgd); -#if CONFIG_PGTABLE_LEVELS > 4 && defined(CONFIG_KASAN) +#if CONFIG_PGTABLE_LEVELS > 4 && \ + (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) return note_kasan_page_table(walk, addr); #endif @@ -51,7 +52,8 @@ static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr, struct ptdump_state *st = walk->private; p4d_t val = READ_ONCE(*p4d); -#if CONFIG_PGTABLE_LEVELS > 3 && defined(CONFIG_KASAN) +#if CONFIG_PGTABLE_LEVELS > 3 && \ + (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) return note_kasan_page_table(walk, addr); #endif @@ -71,7 +73,8 @@ static int ptdump_pud_entry(pud_t *pud, unsigned long addr, struct ptdump_state *st = walk->private; pud_t val = READ_ONCE(*pud); -#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_KASAN) +#if CONFIG_PGTABLE_LEVELS > 2 && \ + (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd))) return note_kasan_page_table(walk, addr); #endif @@ -91,7 +94,7 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr, struct ptdump_state *st = walk->private; pmd_t val = READ_ONCE(*pmd); -#if defined(CONFIG_KASAN) +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte))) return note_kasan_page_table(walk, addr); #endif diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 94133708889d..213677a5ed33 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -148,10 +148,12 @@ endif # we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE) # ifeq ($(CONFIG_KASAN),y) +ifneq ($(CONFIG_KASAN_HW_TAGS),y) _c_flags += $(if $(patsubst n%,, \ $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) endif +endif ifeq ($(CONFIG_UBSAN),y) _c_flags += $(if $(patsubst n%,, \ -- cgit v1.2.3 From 2e903b91479782b7dedd869603423d77e079d3de Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:10 -0800 Subject: kasan, arm64: implement HW_TAGS runtime Provide implementation of KASAN functions required for the hardware tag-based mode. Those include core functions for memory and pointer tagging (tags_hw.c) and bug reporting (report_tags_hw.c). Also adapt common KASAN code to support the new mode. Link: https://lkml.kernel.org/r/cfd0fbede579a6b66755c98c88c108e54f9c56bf.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Acked-by: Catalin Marinas Reviewed-by: Alexander Potapenko Tested-by: Vincenzo Frascino Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/memory.h | 4 +- arch/arm64/kernel/cpufeature.c | 3 ++ arch/arm64/kernel/smp.c | 2 + include/linux/kasan.h | 24 ++++++++---- include/linux/mm.h | 2 +- include/linux/page-flags-layout.h | 2 +- mm/kasan/Makefile | 5 +++ mm/kasan/common.c | 15 ++++---- mm/kasan/hw_tags.c | 80 +++++++++++++++++++++++++++++++++++++++ mm/kasan/kasan.h | 19 ++++++++-- mm/kasan/report_hw_tags.c | 42 ++++++++++++++++++++ mm/kasan/report_sw_tags.c | 2 +- mm/kasan/shadow.c | 2 +- mm/kasan/sw_tags.c | 2 +- 14 files changed, 178 insertions(+), 26 deletions(-) create mode 100644 mm/kasan/hw_tags.c create mode 100644 mm/kasan/report_hw_tags.c (limited to 'include/linux') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index cd671fb6707c..18fce223b67b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -214,7 +214,7 @@ static inline unsigned long kaslr_offset(void) (__force __typeof__(addr))__addr; \ }) -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) #define __tag_shifted(tag) ((u64)(tag) << 56) #define __tag_reset(addr) __untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) @@ -222,7 +222,7 @@ static inline unsigned long kaslr_offset(void) #define __tag_shifted(tag) 0UL #define __tag_reset(addr) (addr) #define __tag_get(addr) 0 -#endif /* CONFIG_KASAN_SW_TAGS */ +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline const void *__tag_set(const void *addr, u8 tag) { diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d87cfc6246e0..7ffb5f1d8b68 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #include #include @@ -1710,6 +1711,8 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) cleared_zero_page = true; mte_clear_page_tags(lm_alias(empty_zero_page)); } + + kasan_init_hw_tags_cpu(); } #endif /* CONFIG_ARM64_MTE */ diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 2499b895efea..19b1705ae5cb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -462,6 +462,8 @@ void __init smp_prepare_boot_cpu(void) /* Conditionally switch to GIC PMR for interrupt masking */ if (system_uses_irq_prio_masking()) init_gic_priority_masking(); + + kasan_init_hw_tags(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b1381ee6922a..d22ec4c9c1bd 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -190,25 +190,35 @@ static inline void kasan_record_aux_stack(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ -#ifdef CONFIG_KASAN_SW_TAGS - -void __init kasan_init_sw_tags(void); +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) void *kasan_reset_tag(const void *addr); bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); -#else /* CONFIG_KASAN_SW_TAGS */ - -static inline void kasan_init_sw_tags(void) { } +#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline void *kasan_reset_tag(const void *addr) { return (void *)addr; } -#endif /* CONFIG_KASAN_SW_TAGS */ +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ + +#ifdef CONFIG_KASAN_SW_TAGS +void __init kasan_init_sw_tags(void); +#else +static inline void kasan_init_sw_tags(void) { } +#endif + +#ifdef CONFIG_KASAN_HW_TAGS +void kasan_init_hw_tags_cpu(void); +void __init kasan_init_hw_tags(void); +#else +static inline void kasan_init_hw_tags_cpu(void) { } +static inline void kasan_init_hw_tags(void) { } +#endif #ifdef CONFIG_KASAN_VMALLOC diff --git a/include/linux/mm.h b/include/linux/mm.h index 362579ad0758..024ec0a00c72 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1421,7 +1421,7 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) } #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) static inline u8 page_kasan_tag(const struct page *page) { return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index e200eef6a7fd..7d4ec26d8a3e 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -77,7 +77,7 @@ #define LAST_CPUPID_SHIFT 0 #endif -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) #define KASAN_TAG_WIDTH 8 #else #define KASAN_TAG_WIDTH 0 diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index f1d68a34f3c9..9fe39a66388a 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -10,8 +10,10 @@ CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_quarantine.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_report_generic.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_report_hw_tags.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_report_sw_tags.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_shadow.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_hw_tags.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_sw_tags.o = $(CC_FLAGS_FTRACE) # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 @@ -27,10 +29,13 @@ CFLAGS_init.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_quarantine.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_report.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_report_generic.o := $(CC_FLAGS_KASAN_RUNTIME) +CFLAGS_report_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_report_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_shadow.o := $(CC_FLAGS_KASAN_RUNTIME) +CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) obj-$(CONFIG_KASAN) := common.o report.o obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o +obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o diff --git a/mm/kasan/common.c b/mm/kasan/common.c index d5f23b2f170a..02613883846e 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -118,7 +118,7 @@ void kasan_free_pages(struct page *page, unsigned int order) */ static inline unsigned int optimal_redzone(unsigned int object_size) { - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) return 0; return @@ -183,14 +183,14 @@ size_t kasan_metadata_size(struct kmem_cache *cache) struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, const void *object) { - return (void *)object + cache->kasan_info.alloc_meta_offset; + return (void *)reset_tag(object) + cache->kasan_info.alloc_meta_offset; } struct kasan_free_meta *get_free_info(struct kmem_cache *cache, const void *object) { BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); - return (void *)object + cache->kasan_info.free_meta_offset; + return (void *)reset_tag(object) + cache->kasan_info.free_meta_offset; } void kasan_poison_slab(struct page *page) @@ -272,9 +272,8 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, alloc_info = get_alloc_info(cache, object); __memset(alloc_info, 0, sizeof(*alloc_info)); - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - object = set_tag(object, - assign_tag(cache, object, true, false)); + if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS)) + object = set_tag(object, assign_tag(cache, object, true, false)); return (void *)object; } @@ -342,10 +341,10 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, redzone_end = round_up((unsigned long)object + cache->object_size, KASAN_GRANULE_SIZE); - if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) + if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS)) tag = assign_tag(cache, object, false, keep_tag); - /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ + /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */ unpoison_range(set_tag(object, tag), size); poison_range((void *)redzone_start, redzone_end - redzone_start, KASAN_KMALLOC_REDZONE); diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c new file mode 100644 index 000000000000..66419e908e21 --- /dev/null +++ b/mm/kasan/hw_tags.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains core hardware tag-based KASAN code. + * + * Copyright (c) 2020 Google, Inc. + * Author: Andrey Konovalov + */ + +#define pr_fmt(fmt) "kasan: " fmt + +#include +#include +#include +#include +#include +#include + +#include "kasan.h" + +/* kasan_init_hw_tags_cpu() is called for each CPU. */ +void kasan_init_hw_tags_cpu(void) +{ + hw_init_tags(KASAN_TAG_MAX); + hw_enable_tagging(); +} + +/* kasan_init_hw_tags() is called once on boot CPU. */ +void __init kasan_init_hw_tags(void) +{ + pr_info("KernelAddressSanitizer initialized\n"); +} + +void *kasan_reset_tag(const void *addr) +{ + return reset_tag(addr); +} + +void poison_range(const void *address, size_t size, u8 value) +{ + hw_set_mem_tag_range(reset_tag(address), + round_up(size, KASAN_GRANULE_SIZE), value); +} + +void unpoison_range(const void *address, size_t size) +{ + hw_set_mem_tag_range(reset_tag(address), + round_up(size, KASAN_GRANULE_SIZE), get_tag(address)); +} + +u8 random_tag(void) +{ + return hw_get_random_tag(); +} + +bool check_invalid_free(void *addr) +{ + u8 ptr_tag = get_tag(addr); + u8 mem_tag = hw_get_mem_tag(addr); + + return (mem_tag == KASAN_TAG_INVALID) || + (ptr_tag != KASAN_TAG_KERNEL && ptr_tag != mem_tag); +} + +void kasan_set_free_info(struct kmem_cache *cache, + void *object, u8 tag) +{ + struct kasan_alloc_meta *alloc_meta; + + alloc_meta = get_alloc_info(cache, object); + kasan_set_track(&alloc_meta->free_track[0], GFP_NOWAIT); +} + +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, + void *object, u8 tag) +{ + struct kasan_alloc_meta *alloc_meta; + + alloc_meta = get_alloc_info(cache, object); + return &alloc_meta->free_track[0]; +} diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 92cb2c16e314..64560cc71191 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -154,6 +154,11 @@ struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, struct kasan_free_meta *get_free_info(struct kmem_cache *cache, const void *object); +void poison_range(const void *address, size_t size, u8 value); +void unpoison_range(const void *address, size_t size); + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + static inline const void *kasan_shadow_to_mem(const void *shadow_addr) { return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) @@ -165,9 +170,6 @@ static inline bool addr_has_metadata(const void *addr) return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); } -void poison_range(const void *address, size_t size, u8 value); -void unpoison_range(const void *address, size_t size); - /** * check_memory_region - Check memory region, and report if invalid access. * @addr: the accessed address @@ -179,6 +181,15 @@ void unpoison_range(const void *address, size_t size); bool check_memory_region(unsigned long addr, size_t size, bool write, unsigned long ret_ip); +#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +static inline bool addr_has_metadata(const void *addr) +{ + return true; +} + +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + bool check_invalid_free(void *addr); void *find_first_bad_addr(void *addr, size_t size); @@ -215,7 +226,7 @@ static inline void quarantine_reduce(void) { } static inline void quarantine_remove_cache(struct kmem_cache *cache) { } #endif -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) void print_tags(u8 addr_tag, const void *addr); diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c new file mode 100644 index 000000000000..da543eb832cd --- /dev/null +++ b/mm/kasan/report_hw_tags.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains hardware tag-based KASAN specific error reporting code. + * + * Copyright (c) 2020 Google, Inc. + * Author: Andrey Konovalov + */ + +#include +#include +#include +#include +#include +#include + +#include "kasan.h" + +const char *get_bug_type(struct kasan_access_info *info) +{ + return "invalid-access"; +} + +void *find_first_bad_addr(void *addr, size_t size) +{ + return reset_tag(addr); +} + +void metadata_fetch_row(char *buffer, void *row) +{ + int i; + + for (i = 0; i < META_BYTES_PER_ROW; i++) + buffer[i] = hw_get_mem_tag(row + i * KASAN_GRANULE_SIZE); +} + +void print_tags(u8 addr_tag, const void *addr) +{ + u8 memory_tag = hw_get_mem_tag((void *)addr); + + pr_err("Pointer tag: [%02x], memory tag: [%02x]\n", + addr_tag, memory_tag); +} diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c index add2dfe6169c..aebc44a29e83 100644 --- a/mm/kasan/report_sw_tags.c +++ b/mm/kasan/report_sw_tags.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * This file contains tag-based KASAN specific error reporting code. + * This file contains software tag-based KASAN specific error reporting code. * * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Andrey Ryabinin diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index ba84e5106585..ac6a5f57df33 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -107,7 +107,7 @@ void unpoison_range(const void *address, size_t size) if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) *shadow = tag; - else + else /* CONFIG_KASAN_GENERIC */ *shadow = size & KASAN_GRANULE_MASK; } } diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 7317d5229b2b..a518483f3965 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * This file contains core tag-based KASAN code. + * This file contains core software tag-based KASAN code. * * Copyright (c) 2018 Google, Inc. * Author: Andrey Konovalov -- cgit v1.2.3 From d56a9ef84bd0e1e8fba7a837ab12a4ec8476579f Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:42 -0800 Subject: kasan, arm64: unpoison stack only with CONFIG_KASAN_STACK There's a config option CONFIG_KASAN_STACK that has to be enabled for KASAN to use stack instrumentation and perform validity checks for stack variables. There's no need to unpoison stack when CONFIG_KASAN_STACK is not enabled. Only call kasan_unpoison_task_stack[_below]() when CONFIG_KASAN_STACK is enabled. Note, that CONFIG_KASAN_STACK is an option that is currently always defined when CONFIG_KASAN is enabled, and therefore has to be tested with #if instead of #ifdef. Link: https://lkml.kernel.org/r/d09dd3f8abb388da397fd11598c5edeaa83fe559.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/If8a891e9fe01ea543e00b576852685afec0887e3 Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Acked-by: Catalin Marinas Reviewed-by: Dmitry Vyukov Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/kernel/sleep.S | 2 +- arch/x86/kernel/acpi/wakeup_64.S | 2 +- include/linux/kasan.h | 10 ++++++---- mm/kasan/common.c | 2 ++ 4 files changed, 10 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 4be7f7eed875..6bdef7362c0e 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume) */ bl cpu_do_resume -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK mov x0, sp bl kasan_unpoison_task_stack_below #endif diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index c8daa92f38dc..5d3a0b8fd379 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -112,7 +112,7 @@ SYM_FUNC_START(do_suspend_lowlevel) movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK /* * The suspend path may have poisoned some areas deeper in the stack, * which we now need to unpoison. diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d22ec4c9c1bd..e638255ce906 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -77,8 +77,6 @@ static inline void kasan_disable_current(void) {} void kasan_unpoison_range(const void *address, size_t size); -void kasan_unpoison_task_stack(struct task_struct *task); - void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); @@ -123,8 +121,6 @@ void kasan_restore_multi_shot(bool enabled); static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_unpoison_task_stack(struct task_struct *task) {} - static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} @@ -176,6 +172,12 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #endif /* CONFIG_KASAN */ +#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK +void kasan_unpoison_task_stack(struct task_struct *task); +#else +static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +#endif + #ifdef CONFIG_KASAN_GENERIC void kasan_cache_shrink(struct kmem_cache *cache); diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 6c38fd0a9e5c..2754ce0c8334 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -63,6 +63,7 @@ void kasan_unpoison_range(const void *address, size_t size) unpoison_range(address, size); } +#if CONFIG_KASAN_STACK static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) { void *base = task_stack_page(task); @@ -89,6 +90,7 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) unpoison_range(base, watermark - base); } +#endif /* CONFIG_KASAN_STACK */ void kasan_alloc_pages(struct page *page, unsigned int order) { -- cgit v1.2.3 From c0054c565ae598073d6c27762c7d4f7de49a45d9 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:52 -0800 Subject: kasan: inline kasan_reset_tag for tag-based modes Using kasan_reset_tag() currently results in a function call. As it's called quite often from the allocator code, this leads to a noticeable slowdown. Move it to include/linux/kasan.h and turn it into a static inline function. Also remove the now unneeded reset_tag() internal KASAN macro and use kasan_reset_tag() instead. Link: https://lkml.kernel.org/r/6940383a3a9dfb416134d338d8fac97a9ebb8686.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I4d2061acfe91d480a75df00b07c22d8494ef14b5 Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Reviewed-by: Dmitry Vyukov Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 5 ++++- mm/kasan/common.c | 6 +++--- mm/kasan/hw_tags.c | 9 ++------- mm/kasan/kasan.h | 4 ---- mm/kasan/report.c | 4 ++-- mm/kasan/report_hw_tags.c | 2 +- mm/kasan/report_sw_tags.c | 4 ++-- mm/kasan/shadow.c | 4 ++-- mm/kasan/sw_tags.c | 9 ++------- 9 files changed, 18 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index e638255ce906..3bb72de94f90 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -194,7 +194,10 @@ static inline void kasan_record_aux_stack(void *ptr) {} #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) -void *kasan_reset_tag(const void *addr); +static inline void *kasan_reset_tag(const void *addr) +{ + return (void *)arch_kasan_reset_tag(addr); +} bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b71dfe7c5059..780ec27459ab 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -179,14 +179,14 @@ size_t kasan_metadata_size(struct kmem_cache *cache) struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, const void *object) { - return (void *)reset_tag(object) + cache->kasan_info.alloc_meta_offset; + return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset; } struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, const void *object) { BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); - return (void *)reset_tag(object) + cache->kasan_info.free_meta_offset; + return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; } void kasan_poison_slab(struct page *page) @@ -283,7 +283,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object, tag = get_tag(object); tagged_object = object; - object = reset_tag(object); + object = kasan_reset_tag(object); if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != object)) { diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index cb849c8da978..227599e54e8e 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -30,20 +30,15 @@ void __init kasan_init_hw_tags(void) pr_info("KernelAddressSanitizer initialized\n"); } -void *kasan_reset_tag(const void *addr) -{ - return reset_tag(addr); -} - void poison_range(const void *address, size_t size, u8 value) { - hw_set_mem_tag_range(reset_tag(address), + hw_set_mem_tag_range(kasan_reset_tag(address), round_up(size, KASAN_GRANULE_SIZE), value); } void unpoison_range(const void *address, size_t size) { - hw_set_mem_tag_range(reset_tag(address), + hw_set_mem_tag_range(kasan_reset_tag(address), round_up(size, KASAN_GRANULE_SIZE), get_tag(address)); } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 0eab7e4cecb8..5e8cd2080369 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -248,15 +248,11 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) return addr; } #endif -#ifndef arch_kasan_reset_tag -#define arch_kasan_reset_tag(addr) ((void *)(addr)) -#endif #ifndef arch_kasan_get_tag #define arch_kasan_get_tag(addr) 0 #endif #define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag))) -#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr)) #define get_tag(addr) arch_kasan_get_tag(addr) #ifdef CONFIG_KASAN_HW_TAGS diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 85ce2cb2cd2b..00c590efdaea 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -328,7 +328,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip) unsigned long flags; u8 tag = get_tag(object); - object = reset_tag(object); + object = kasan_reset_tag(object); #if IS_ENABLED(CONFIG_KUNIT) if (current->kunit_test) @@ -361,7 +361,7 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write, disable_trace_on_warning(); tagged_addr = (void *)addr; - untagged_addr = reset_tag(tagged_addr); + untagged_addr = kasan_reset_tag(tagged_addr); info.access_addr = tagged_addr; if (addr_has_metadata(untagged_addr)) diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c index da543eb832cd..57114f0e14d1 100644 --- a/mm/kasan/report_hw_tags.c +++ b/mm/kasan/report_hw_tags.c @@ -22,7 +22,7 @@ const char *get_bug_type(struct kasan_access_info *info) void *find_first_bad_addr(void *addr, size_t size) { - return reset_tag(addr); + return kasan_reset_tag(addr); } void metadata_fetch_row(char *buffer, void *row) diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c index 317100fd95b9..7604b46239d4 100644 --- a/mm/kasan/report_sw_tags.c +++ b/mm/kasan/report_sw_tags.c @@ -41,7 +41,7 @@ const char *get_bug_type(struct kasan_access_info *info) int i; tag = get_tag(info->access_addr); - addr = reset_tag(info->access_addr); + addr = kasan_reset_tag(info->access_addr); page = kasan_addr_to_page(addr); if (page && PageSlab(page)) { cache = page->slab_cache; @@ -72,7 +72,7 @@ const char *get_bug_type(struct kasan_access_info *info) void *find_first_bad_addr(void *addr, size_t size) { u8 tag = get_tag(addr); - void *p = reset_tag(addr); + void *p = kasan_reset_tag(addr); void *end = p + size; while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p)) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index ac6a5f57df33..44a2b748f9d3 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -81,7 +81,7 @@ void poison_range(const void *address, size_t size, u8 value) * some of the callers (e.g. kasan_poison_object_data) pass tagged * addresses to this function. */ - address = reset_tag(address); + address = kasan_reset_tag(address); shadow_start = kasan_mem_to_shadow(address); shadow_end = kasan_mem_to_shadow(address + size); @@ -98,7 +98,7 @@ void unpoison_range(const void *address, size_t size) * some of the callers (e.g. kasan_unpoison_object_data) pass tagged * addresses to this function. */ - address = reset_tag(address); + address = kasan_reset_tag(address); poison_range(address, size, tag); diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 6d7648cc3b98..e17de2619bbf 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -67,11 +67,6 @@ u8 random_tag(void) return (u8)(state % (KASAN_TAG_MAX + 1)); } -void *kasan_reset_tag(const void *addr) -{ - return reset_tag(addr); -} - bool check_memory_region(unsigned long addr, size_t size, bool write, unsigned long ret_ip) { @@ -107,7 +102,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write, if (tag == KASAN_TAG_KERNEL) return true; - untagged_addr = reset_tag((const void *)addr); + untagged_addr = kasan_reset_tag((const void *)addr); if (unlikely(untagged_addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { return !kasan_report(addr, size, write, ret_ip); @@ -126,7 +121,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write, bool check_invalid_free(void *addr) { u8 tag = get_tag(addr); - u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(reset_tag(addr))); + u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(kasan_reset_tag(addr))); return (shadow_byte == KASAN_TAG_INVALID) || (tag != KASAN_TAG_KERNEL && tag != shadow_byte); -- cgit v1.2.3 From bffe690708c8b4fdb8f0bff8ff22b347fc6c709a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:59 -0800 Subject: kasan: open-code kasan_unpoison_slab There's the external annotation kasan_unpoison_slab() that is currently defined as static inline and uses kasan_unpoison_range(). Open-code this function in mempool.c. Otherwise with an upcoming change this function will result in an unnecessary function call. Link: https://lkml.kernel.org/r/131a6694a978a9a8b150187e539eecc8bcbf759b.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/Ia7c8b659f79209935cbaab3913bf7f082cc43a0e Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 6 ------ mm/mempool.c | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 3bb72de94f90..7350de3e9fe4 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -107,11 +107,6 @@ struct kasan_cache { int free_meta_offset; }; -size_t __ksize(const void *); -static inline void kasan_unpoison_slab(const void *ptr) -{ - kasan_unpoison_range(ptr, __ksize(ptr)); -} size_t kasan_metadata_size(struct kmem_cache *cache); bool kasan_save_enable_multi_shot(void); @@ -167,7 +162,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } -static inline void kasan_unpoison_slab(const void *ptr) { } static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #endif /* CONFIG_KASAN */ diff --git a/mm/mempool.c b/mm/mempool.c index f473cdddaff0..583a9865b181 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -112,7 +112,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element) static void kasan_unpoison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - kasan_unpoison_slab(element); + kasan_unpoison_range(element, __ksize(element)); else if (pool->alloc == mempool_alloc_pages) kasan_alloc_pages(element, (unsigned long)pool->pool_data); } -- cgit v1.2.3 From 34303244f2615add92076a4bf2d4f39323bde4f2 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:03:10 -0800 Subject: kasan, mm: check kasan_enabled in annotations Declare the kasan_enabled static key in include/linux/kasan.h and in include/linux/mm.h and check it in all kasan annotations. This allows to avoid any slowdown caused by function calls when kasan_enabled is disabled. Link: https://lkml.kernel.org/r/9f90e3c0aa840dbb4833367c2335193299f69023.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I2589451d3c96c97abbcbf714baabe6161c6f153e Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Reviewed-by: Dmitry Vyukov Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 213 +++++++++++++++++++++++++++++++++++++++----------- include/linux/mm.h | 22 ++++-- mm/kasan/common.c | 56 ++++++------- 3 files changed, 210 insertions(+), 81 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 7350de3e9fe4..9176849c4934 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -2,6 +2,7 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H +#include #include struct kmem_cache; @@ -75,54 +76,176 @@ static inline void kasan_disable_current(void) {} #ifdef CONFIG_KASAN -void kasan_unpoison_range(const void *address, size_t size); +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; -void kasan_alloc_pages(struct page *page, unsigned int order); -void kasan_free_pages(struct page *page, unsigned int order); +#ifdef CONFIG_KASAN_HW_TAGS +DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); +static __always_inline bool kasan_enabled(void) +{ + return static_branch_likely(&kasan_flag_enabled); +} +#else +static inline bool kasan_enabled(void) +{ + return true; +} +#endif -void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, - slab_flags_t *flags); +void __kasan_unpoison_range(const void *addr, size_t size); +static __always_inline void kasan_unpoison_range(const void *addr, size_t size) +{ + if (kasan_enabled()) + __kasan_unpoison_range(addr, size); +} -void kasan_poison_slab(struct page *page); -void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); -void kasan_poison_object_data(struct kmem_cache *cache, void *object); -void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, - const void *object); +void __kasan_alloc_pages(struct page *page, unsigned int order); +static __always_inline void kasan_alloc_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + __kasan_alloc_pages(page, order); +} -void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, - gfp_t flags); -void kasan_kfree_large(void *ptr, unsigned long ip); -void kasan_poison_kfree(void *ptr, unsigned long ip); -void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size, gfp_t flags); -void * __must_check kasan_krealloc(const void *object, size_t new_size, - gfp_t flags); +void __kasan_free_pages(struct page *page, unsigned int order); +static __always_inline void kasan_free_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + __kasan_free_pages(page, order); +} -void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags); -bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); +void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, + slab_flags_t *flags); +static __always_inline void kasan_cache_create(struct kmem_cache *cache, + unsigned int *size, slab_flags_t *flags) +{ + if (kasan_enabled()) + __kasan_cache_create(cache, size, flags); +} -struct kasan_cache { - int alloc_meta_offset; - int free_meta_offset; -}; +size_t __kasan_metadata_size(struct kmem_cache *cache); +static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) +{ + if (kasan_enabled()) + return __kasan_metadata_size(cache); + return 0; +} + +void __kasan_poison_slab(struct page *page); +static __always_inline void kasan_poison_slab(struct page *page) +{ + if (kasan_enabled()) + __kasan_poison_slab(page); +} + +void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); +static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, + void *object) +{ + if (kasan_enabled()) + __kasan_unpoison_object_data(cache, object); +} + +void __kasan_poison_object_data(struct kmem_cache *cache, void *object); +static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, + void *object) +{ + if (kasan_enabled()) + __kasan_poison_object_data(cache, object); +} + +void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, + const void *object); +static __always_inline void * __must_check kasan_init_slab_obj( + struct kmem_cache *cache, const void *object) +{ + if (kasan_enabled()) + return __kasan_init_slab_obj(cache, object); + return (void *)object; +} + +bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); +static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip) +{ + if (kasan_enabled()) + return __kasan_slab_free(s, object, ip); + return false; +} + +void * __must_check __kasan_slab_alloc(struct kmem_cache *s, + void *object, gfp_t flags); +static __always_inline void * __must_check kasan_slab_alloc( + struct kmem_cache *s, void *object, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_slab_alloc(s, object, flags); + return object; +} + +void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size, gfp_t flags); +static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, + const void *object, size_t size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_kmalloc(s, object, size, flags); + return (void *)object; +} -size_t kasan_metadata_size(struct kmem_cache *cache); +void * __must_check __kasan_kmalloc_large(const void *ptr, + size_t size, gfp_t flags); +static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, + size_t size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_kmalloc_large(ptr, size, flags); + return (void *)ptr; +} + +void * __must_check __kasan_krealloc(const void *object, + size_t new_size, gfp_t flags); +static __always_inline void * __must_check kasan_krealloc(const void *object, + size_t new_size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_krealloc(object, new_size, flags); + return (void *)object; +} + +void __kasan_poison_kfree(void *ptr, unsigned long ip); +static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_poison_kfree(ptr, ip); +} + +void __kasan_kfree_large(void *ptr, unsigned long ip); +static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_kfree_large(ptr, ip); +} bool kasan_save_enable_multi_shot(void); void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ +static inline bool kasan_enabled(void) +{ + return false; +} static inline void kasan_unpoison_range(const void *address, size_t size) {} - static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} - static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} - +static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} @@ -133,36 +256,32 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache, { return (void *)object; } - -static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) +static inline bool kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip) { - return ptr; + return false; +} +static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags) +{ + return object; } -static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} -static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) { return (void *)object; } +static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) +{ + return (void *)ptr; +} static inline void *kasan_krealloc(const void *object, size_t new_size, gfp_t flags) { return (void *)object; } - -static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags) -{ - return object; -} -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip) -{ - return false; -} - -static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } +static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} +static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} #endif /* CONFIG_KASAN */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 024ec0a00c72..5299b90a6c40 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -31,6 +31,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1422,22 +1423,30 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) #endif /* CONFIG_NUMA_BALANCING */ #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) + static inline u8 page_kasan_tag(const struct page *page) { - return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + if (kasan_enabled()) + return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + return 0xff; } static inline void page_kasan_tag_set(struct page *page, u8 tag) { - page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); - page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; + if (kasan_enabled()) { + page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); + page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; + } } static inline void page_kasan_tag_reset(struct page *page) { - page_kasan_tag_set(page, 0xff); + if (kasan_enabled()) + page_kasan_tag_set(page, 0xff); } -#else + +#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ + static inline u8 page_kasan_tag(const struct page *page) { return 0xff; @@ -1445,7 +1454,8 @@ static inline u8 page_kasan_tag(const struct page *page) static inline void page_kasan_tag_set(struct page *page, u8 tag) { } static inline void page_kasan_tag_reset(struct page *page) { } -#endif + +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline struct zone *page_zone(const struct page *page) { diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 219c2979bd3e..ae0130cf9de3 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -58,7 +58,7 @@ void kasan_disable_current(void) } #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -void kasan_unpoison_range(const void *address, size_t size) +void __kasan_unpoison_range(const void *address, size_t size) { unpoison_range(address, size); } @@ -86,7 +86,7 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) } #endif /* CONFIG_KASAN_STACK */ -void kasan_alloc_pages(struct page *page, unsigned int order) +void __kasan_alloc_pages(struct page *page, unsigned int order) { u8 tag; unsigned long i; @@ -100,7 +100,7 @@ void kasan_alloc_pages(struct page *page, unsigned int order) unpoison_range(page_address(page), PAGE_SIZE << order); } -void kasan_free_pages(struct page *page, unsigned int order) +void __kasan_free_pages(struct page *page, unsigned int order) { if (likely(!PageHighMem(page))) poison_range(page_address(page), @@ -127,8 +127,8 @@ static inline unsigned int optimal_redzone(unsigned int object_size) object_size <= (1 << 16) - 1024 ? 1024 : 2048; } -void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, - slab_flags_t *flags) +void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, + slab_flags_t *flags) { unsigned int orig_size = *size; unsigned int redzone_size; @@ -173,7 +173,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, *flags |= SLAB_KASAN; } -size_t kasan_metadata_size(struct kmem_cache *cache) +size_t __kasan_metadata_size(struct kmem_cache *cache) { if (!kasan_stack_collection_enabled()) return 0; @@ -196,7 +196,7 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; } -void kasan_poison_slab(struct page *page) +void __kasan_poison_slab(struct page *page) { unsigned long i; @@ -206,12 +206,12 @@ void kasan_poison_slab(struct page *page) KASAN_KMALLOC_REDZONE); } -void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) +void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) { unpoison_range(object, cache->object_size); } -void kasan_poison_object_data(struct kmem_cache *cache, void *object) +void __kasan_poison_object_data(struct kmem_cache *cache, void *object) { poison_range(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), @@ -264,7 +264,7 @@ static u8 assign_tag(struct kmem_cache *cache, const void *object, #endif } -void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, +void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, const void *object) { struct kasan_alloc_meta *alloc_meta; @@ -283,7 +283,7 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, return (void *)object; } -static bool __kasan_slab_free(struct kmem_cache *cache, void *object, +static bool ____kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool quarantine) { u8 tag; @@ -326,9 +326,9 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object, return IS_ENABLED(CONFIG_KASAN_GENERIC); } -bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) +bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) { - return __kasan_slab_free(cache, object, ip, true); + return ____kasan_slab_free(cache, object, ip, true); } static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) @@ -336,7 +336,7 @@ static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags); } -static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, +static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags, bool keep_tag) { unsigned long redzone_start; @@ -368,20 +368,20 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, return set_tag(object, tag); } -void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, - gfp_t flags) +void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, + void *object, gfp_t flags) { - return __kasan_kmalloc(cache, object, cache->object_size, flags, false); + return ____kasan_kmalloc(cache, object, cache->object_size, flags, false); } -void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags) +void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, + size_t size, gfp_t flags) { - return __kasan_kmalloc(cache, object, size, flags, true); + return ____kasan_kmalloc(cache, object, size, flags, true); } -EXPORT_SYMBOL(kasan_kmalloc); +EXPORT_SYMBOL(__kasan_kmalloc); -void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, +void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) { struct page *page; @@ -406,7 +406,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, return (void *)ptr; } -void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) +void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) { struct page *page; @@ -416,13 +416,13 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) page = virt_to_head_page(object); if (unlikely(!PageSlab(page))) - return kasan_kmalloc_large(object, size, flags); + return __kasan_kmalloc_large(object, size, flags); else - return __kasan_kmalloc(page->slab_cache, object, size, + return ____kasan_kmalloc(page->slab_cache, object, size, flags, true); } -void kasan_poison_kfree(void *ptr, unsigned long ip) +void __kasan_poison_kfree(void *ptr, unsigned long ip) { struct page *page; @@ -435,11 +435,11 @@ void kasan_poison_kfree(void *ptr, unsigned long ip) } poison_range(ptr, page_size(page), KASAN_FREE_PAGE); } else { - __kasan_slab_free(page->slab_cache, ptr, ip, false); + ____kasan_slab_free(page->slab_cache, ptr, ip, false); } } -void kasan_kfree_large(void *ptr, unsigned long ip) +void __kasan_kfree_large(void *ptr, unsigned long ip) { if (ptr != page_address(virt_to_head_page(ptr))) kasan_report_invalid_free(ptr, ip); -- cgit v1.2.3 From eeb3160c2419e0f1045537acac7b19cba64112f4 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:03:13 -0800 Subject: kasan, mm: rename kasan_poison_kfree Rename kasan_poison_kfree() to kasan_slab_free_mempool() as it better reflects what this annotation does. Also add a comment that explains the PageSlab() check. No functional changes. Link: https://lkml.kernel.org/r/141675fb493555e984c5dca555e9d9f768c7bbaa.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I5026f87364e556b506ef1baee725144bb04b8810 Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 16 ++++++++-------- mm/kasan/common.c | 40 +++++++++++++++++++++++----------------- mm/mempool.c | 2 +- 3 files changed, 32 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9176849c4934..6f0c5d9aa43f 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -176,6 +176,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } +void __kasan_slab_free_mempool(void *ptr, unsigned long ip); +static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_slab_free_mempool(ptr, ip); +} + void * __must_check __kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); static __always_inline void * __must_check kasan_slab_alloc( @@ -216,13 +223,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } -void __kasan_poison_kfree(void *ptr, unsigned long ip); -static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip) -{ - if (kasan_enabled()) - __kasan_poison_kfree(ptr, ip); -} - void __kasan_kfree_large(void *ptr, unsigned long ip); static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) { @@ -261,6 +261,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, { return false; } +static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) { @@ -280,7 +281,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } -static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} #endif /* CONFIG_KASAN */ diff --git a/mm/kasan/common.c b/mm/kasan/common.c index ae0130cf9de3..d0f8d7a955cd 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -331,6 +331,29 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) return ____kasan_slab_free(cache, object, ip, true); } +void __kasan_slab_free_mempool(void *ptr, unsigned long ip) +{ + struct page *page; + + page = virt_to_head_page(ptr); + + /* + * Even though this function is only called for kmem_cache_alloc and + * kmalloc backed mempool allocations, those allocations can still be + * !PageSlab() when the size provided to kmalloc is larger than + * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. + */ + if (unlikely(!PageSlab(page))) { + if (ptr != page_address(page)) { + kasan_report_invalid_free(ptr, ip); + return; + } + poison_range(ptr, page_size(page), KASAN_FREE_PAGE); + } else { + ____kasan_slab_free(page->slab_cache, ptr, ip, false); + } +} + static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) { kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags); @@ -422,23 +445,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag flags, true); } -void __kasan_poison_kfree(void *ptr, unsigned long ip) -{ - struct page *page; - - page = virt_to_head_page(ptr); - - if (unlikely(!PageSlab(page))) { - if (ptr != page_address(page)) { - kasan_report_invalid_free(ptr, ip); - return; - } - poison_range(ptr, page_size(page), KASAN_FREE_PAGE); - } else { - ____kasan_slab_free(page->slab_cache, ptr, ip, false); - } -} - void __kasan_kfree_large(void *ptr, unsigned long ip) { if (ptr != page_address(virt_to_head_page(ptr))) diff --git a/mm/mempool.c b/mm/mempool.c index 583a9865b181..624ed51b060f 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element) static __always_inline void kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - kasan_poison_kfree(element, _RET_IP_); + kasan_slab_free_mempool(element, _RET_IP_); else if (pool->alloc == mempool_alloc_pages) kasan_free_pages(element, (unsigned long)pool->pool_data); } -- cgit v1.2.3 From e86f8b09f215e3755cd2d56930487dec2de02433 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:03:31 -0800 Subject: kasan, mm: allow cache merging with no metadata The reason cache merging is disabled with KASAN is because KASAN puts its metadata right after the allocated object. When the merged caches have slightly different sizes, the metadata ends up in different places, which KASAN doesn't support. It might be possible to adjust the metadata allocation algorithm and make it friendly to the cache merging code. Instead this change takes a simpler approach and allows merging caches when no metadata is present. Which is the case for hardware tag-based KASAN with kasan.mode=prod. Link: https://lkml.kernel.org/r/37497e940bfd4b32c0a93a702a9ae4cf061d5392.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/Ia114847dfb2244f297d2cb82d592bf6a07455dba Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Signed-off-by: Andrey Konovalov Reviewed-by: Dmitry Vyukov Reviewed-by: Marco Elver Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 21 +++++++++++++++++++-- mm/kasan/common.c | 11 +++++++++++ mm/slab_common.c | 3 ++- 3 files changed, 32 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6f0c5d9aa43f..5e0655fb2a6f 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -82,17 +82,30 @@ struct kasan_cache { }; #ifdef CONFIG_KASAN_HW_TAGS + DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); + static __always_inline bool kasan_enabled(void) { return static_branch_likely(&kasan_flag_enabled); } -#else + +#else /* CONFIG_KASAN_HW_TAGS */ + static inline bool kasan_enabled(void) { return true; } -#endif + +#endif /* CONFIG_KASAN_HW_TAGS */ + +slab_flags_t __kasan_never_merge(void); +static __always_inline slab_flags_t kasan_never_merge(void) +{ + if (kasan_enabled()) + return __kasan_never_merge(); + return 0; +} void __kasan_unpoison_range(const void *addr, size_t size); static __always_inline void kasan_unpoison_range(const void *addr, size_t size) @@ -239,6 +252,10 @@ static inline bool kasan_enabled(void) { return false; } +static inline slab_flags_t kasan_never_merge(void) +{ + return 0; +} static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 0cd583d2fe1c..b25167664ead 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -86,6 +86,17 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) } #endif /* CONFIG_KASAN_STACK */ +/* + * Only allow cache merging when stack collection is disabled and no metadata + * is present. + */ +slab_flags_t __kasan_never_merge(void) +{ + if (kasan_stack_collection_enabled()) + return SLAB_KASAN; + return 0; +} + void __kasan_alloc_pages(struct page *page, unsigned int order) { u8 tag; diff --git a/mm/slab_common.c b/mm/slab_common.c index 573fbacd9ef5..e981c80d216c 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -53,7 +54,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, */ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB | SLAB_KASAN) + SLAB_FAILSLAB | kasan_never_merge()) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_ACCOUNT) -- cgit v1.2.3