diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2012-04-30 18:24:46 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-06-01 13:01:51 -0400 |
commit | 44fbbb3dc687c9709a6f2236197316e5c79ab1eb (patch) | |
tree | d3c995b1cb7e3f9ac5af09b8d78f6c839a65f35a | |
parent | 29bf5dd895219e5111099908040aecfc1509f9bb (diff) | |
download | lwn-44fbbb3dc687c9709a6f2236197316e5c79ab1eb.tar.gz lwn-44fbbb3dc687c9709a6f2236197316e5c79ab1eb.zip |
x86: get rid of calling do_notify_resume() when returning to kernel mode
If we end up calling do_notify_resume() with !user_mode(refs), it
does nothing (do_signal() explicitly bails out and we can't get there
with TIF_NOTIFY_RESUME in such situations). Then we jump to
resume_userspace_sig, which rechecks the same thing and bails out
to resume_kernel, thus breaking the loop.
It's easier and cheaper to check *before* calling do_notify_resume()
and bail out to resume_kernel immediately. And kill the check in
do_signal()...
Note that on amd64 we can't get there with !user_mode() at all - asm
glue takes care of that.
Acked-and-reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | arch/x86/kernel/entry_32.S | 13 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 10 |
2 files changed, 10 insertions, 13 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 01ccf9b71473..623f28837476 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -316,7 +316,6 @@ ret_from_exception: preempt_stop(CLBR_ANY) ret_from_intr: GET_THREAD_INFO(%ebp) -resume_userspace_sig: #ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS movb PT_CS(%esp), %al @@ -615,9 +614,13 @@ work_notifysig: # deal with pending signals and # vm86-space TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) + movb PT_CS(%esp), %bl + andb $SEGMENT_RPL_MASK, %bl + cmpb $USER_RPL, %bl + jb resume_kernel xorl %edx, %edx call do_notify_resume - jmp resume_userspace_sig + jmp resume_userspace ALIGN work_notifysig_v86: @@ -630,9 +633,13 @@ work_notifysig_v86: #endif TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) + movb PT_CS(%esp), %bl + andb $SEGMENT_RPL_MASK, %bl + cmpb $USER_RPL, %bl + jb resume_kernel xorl %edx, %edx call do_notify_resume - jmp resume_userspace_sig + jmp resume_userspace END(work_pending) # perform syscall exit tracing diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index e8a89374d356..21af737053aa 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -737,16 +737,6 @@ static void do_signal(struct pt_regs *regs) siginfo_t info; int signr; - /* - * We want the common case to go fast, which is why we may in certain - * cases get here from kernel mode. Just return without doing anything - * if so. - * X86_32: vm86 regs switched out by assembly code before reaching - * here, so testing against kernel CS suffices. - */ - if (!user_mode(regs)) - return; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ |