diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-08-14 14:51:00 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-09-25 14:23:07 +0200 |
commit | 1a338ac32ca630f67df25b4a16436cccc314e997 (patch) | |
tree | 3291247c33c17376160821d19d1460c2da1da1a6 /include/linux/preempt.h | |
parent | c2daa3bed53a81171cf8c1a36db798e82b91afe8 (diff) | |
download | lwn-1a338ac32ca630f67df25b4a16436cccc314e997.tar.gz lwn-1a338ac32ca630f67df25b4a16436cccc314e997.zip |
sched, x86: Optimize the preempt_schedule() call
Remove the bloat of the C calling convention out of the
preempt_enable() sites by creating an ASM wrapper which allows us to
do an asm("call ___preempt_schedule") instead.
calling.h bits by Andi Kleen
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-tk7xdi1cvvxewixzke8t8le1@git.kernel.org
[ Fixed build error. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/preempt.h')
-rw-r--r-- | include/linux/preempt.h | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 2343d8715299..a3d9dc8c2c00 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -50,18 +50,17 @@ do { \ #define preempt_enable_no_resched() sched_preempt_enable_no_resched() #ifdef CONFIG_PREEMPT -asmlinkage void preempt_schedule(void); #define preempt_enable() \ do { \ barrier(); \ if (unlikely(preempt_count_dec_and_test())) \ - preempt_schedule(); \ + __preempt_schedule(); \ } while (0) #define preempt_check_resched() \ do { \ if (should_resched()) \ - preempt_schedule(); \ + __preempt_schedule(); \ } while (0) #else @@ -83,17 +82,15 @@ do { \ #ifdef CONFIG_PREEMPT -#ifdef CONFIG_CONTEXT_TRACKING -asmlinkage void preempt_schedule_context(void); -#else -#define preempt_schedule_context() preempt_schedule() +#ifndef CONFIG_CONTEXT_TRACKING +#define __preempt_schedule_context() __preempt_schedule() #endif #define preempt_enable_notrace() \ do { \ barrier(); \ if (unlikely(__preempt_count_dec_and_test())) \ - preempt_schedule_context(); \ + __preempt_schedule_context(); \ } while (0) #else #define preempt_enable_notrace() preempt_enable_no_resched_notrace() |