summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-03-23 12:32:54 -0700
committerIngo Molnar <mingo@kernel.org>2015-03-24 21:08:28 +0100
commitb3494a4ab20f6bdf74cdf2badf7918bb65ee8a00 (patch)
tree2875f99133dd8b019b6aee8d0e81c79428630748
parentbc465aa9d045feb0e13b4a8f32cc33c1943f62d6 (diff)
downloadop-kernel-dev-b3494a4ab20f6bdf74cdf2badf7918bb65ee8a00.zip
op-kernel-dev-b3494a4ab20f6bdf74cdf2badf7918bb65ee8a00.tar.gz
x86/asm/entry: Check for syscall exit work with IRQs disabled
We currently have a race: if we're preempted during syscall exit, we can fail to process syscall return work that is queued up while we're preempted in ret_from_sys_call after checking ti.flags. Fix it by disabling interrupts before checking ti.flags. Reported-by: Stefan Seyfried <stefan.seyfried@googlemail.com> Reported-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Andy Lutomirski <luto@kernel.org> Acked-by: Denys Vlasenko <dvlasenk@redhat.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Tejun Heo <tj@kernel.org> Fixes: 96b6352c1271 ("x86_64, entry: Remove the syscall exit audit") Link: http://lkml.kernel.org/r/189320d42b4d671df78c10555976bb10af1ffc75.1427137498.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/entry_64.S18
1 files changed, 14 insertions, 4 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1d74d16..2babb39 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -364,12 +364,21 @@ system_call_fastpath:
* Has incomplete stack frame and undefined top of stack.
*/
ret_from_sys_call:
- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- jnz int_ret_from_sys_call_fixup /* Go the the slow path */
-
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
+
+ /*
+ * We must check ti flags with interrupts (or at least preemption)
+ * off because we must *never* return to userspace without
+ * processing exit work that is enqueued if we're preempted here.
+ * In particular, returning to userspace with any of the one-shot
+ * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+ * very bad.
+ */
+ testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ jnz int_ret_from_sys_call_fixup /* Go the the slow path */
+
CFI_REMEMBER_STATE
/*
* sysretq will re-enable interrupts:
@@ -386,7 +395,7 @@ ret_from_sys_call:
int_ret_from_sys_call_fixup:
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
- jmp int_ret_from_sys_call
+ jmp int_ret_from_sys_call_irqs_off
/* Do syscall tracing */
tracesys:
@@ -432,6 +441,7 @@ tracesys_phase2:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
+int_ret_from_sys_call_irqs_off:
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
GLOBAL(int_with_check)
OpenPOWER on IntegriCloud