summaryrefslogtreecommitdiff
path: root/kernel/stackleak.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2022-04-27 18:31:19 +0100
committerKees Cook <keescook@chromium.org>2022-05-08 01:33:08 -0700
commit9ec79840d6afaf472294588a6bbe145bcdffa28b (patch)
tree7210d75e5717b1b17ab210be3e3614855d31e0ac /kernel/stackleak.c
parentac7838b4e1c552d54e67f78a29bc1bd7701c13e8 (diff)
downloadlwn-9ec79840d6afaf472294588a6bbe145bcdffa28b.tar.gz
lwn-9ec79840d6afaf472294588a6bbe145bcdffa28b.zip
stackleak: rework stack low bound handling
In stackleak_task_init(), stackleak_track_stack(), and __stackleak_erase(), we open-code skipping the STACK_END_MAGIC at the bottom of the stack. Each case is implemented slightly differently, and only the __stackleak_erase() case is commented. In stackleak_task_init() and stackleak_track_stack() we unconditionally add sizeof(unsigned long) to the lowest stack address. In stackleak_task_init() we use end_of_stack() for this, and in stackleak_track_stack() we use task_stack_page(). In __stackleak_erase() we handle this by detecting if `kstack_ptr` has hit the stack end boundary, and if so, conditionally moving it above the magic. This patch adds a new stackleak_task_low_bound() helper which is used in all three cases, which unconditionally adds sizeof(unsigned long) to the lowest address on the task stack, with commentary as to why. This uses end_of_stack() as stackleak_task_init() did prior to this patch, as this is consistent with the code in kernel/fork.c which initializes the STACK_END_MAGIC value. In __stackleak_erase() we no longer need to check whether we've spilled into the STACK_END_MAGIC value, as stackleak_track_stack() ensures that `current->lowest_stack` stops immediately above this, and similarly the poison scan will stop immediately above this. For stackleak_task_init() and stackleak_track_stack() this results in no change to code generation. For __stackleak_erase() the generated assembly is slightly simpler and shorter. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Alexander Popov <alex.popov@linux.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220427173128.2603085-5-mark.rutland@arm.com
Diffstat (limited to 'kernel/stackleak.c')
-rw-r--r--kernel/stackleak.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index f7a0f8cf73c3..24b7cf01b297 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -72,9 +72,11 @@ late_initcall(stackleak_sysctls_init);
static __always_inline void __stackleak_erase(void)
{
+ const unsigned long task_stack_low = stackleak_task_low_bound(current);
+
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
unsigned long kstack_ptr = current->lowest_stack;
- unsigned long boundary = (unsigned long)end_of_stack(current);
+ unsigned long boundary = task_stack_low;
unsigned int poison_count = 0;
const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
@@ -88,13 +90,6 @@ static __always_inline void __stackleak_erase(void)
kstack_ptr -= sizeof(unsigned long);
}
- /*
- * One 'long int' at the bottom of the thread stack is reserved and
- * should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
- */
- if (kstack_ptr == boundary)
- kstack_ptr += sizeof(unsigned long);
-
#ifdef CONFIG_STACKLEAK_METRICS
current->prev_lowest_stack = kstack_ptr;
#endif
@@ -140,8 +135,7 @@ void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
/* 'lowest_stack' should be aligned on the register width boundary */
sp = ALIGN(sp, sizeof(unsigned long));
if (sp < current->lowest_stack &&
- sp >= (unsigned long)task_stack_page(current) +
- sizeof(unsigned long)) {
+ sp >= stackleak_task_low_bound(current)) {
current->lowest_stack = sp;
}
}