summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-01-19 19:52:12 +0100
committerBorislav Petkov <bp@suse.de>2015-02-23 15:50:45 +0100
commit110d7f7513bbb916b8654da9e2973ac5bed929a9 (patch)
tree923e255abef680d1354c3e18df2044f19d6dbcbb /arch/x86/kernel/process.c
parent4b2e762e2e53c721458a83d547b222178bb72a34 (diff)
downloadop-kernel-dev-110d7f7513bbb916b8654da9e2973ac5bed929a9.zip
op-kernel-dev-110d7f7513bbb916b8654da9e2973ac5bed929a9.tar.gz
x86/fpu: Don't abuse FPU in kernel threads if use_eager_fpu()
AFAICS, there is no reason why kernel threads should have FPU context even if use_eager_fpu() == T. Now that interrupted_kernel_fpu_idle() does not check __thread_has_fpu() in the use_eager_fpu() case, we can remove the init_fpu() code from eager_fpu_init() and change flush_thread() called by do_execve() to initialize FPU. Note: of course, the change in flush_thread() is horrible and must be cleanuped. We need the new helper, and flush_thread() should return the error if init_fpu() fails. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Suresh Siddha <sbsiddha@gmail.com> Cc: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/20150119185212.GD16427@redhat.com Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ce8b103..8348037 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -130,6 +130,7 @@ void flush_thread(void)
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+
drop_init_fpu(tsk);
/*
* Free the FPU state for non xsave platforms. They get reallocated
@@ -137,6 +138,12 @@ void flush_thread(void)
*/
if (!use_eager_fpu())
free_thread_xstate(tsk);
+ else if (!used_math()) {
+ /* kthread execs. TODO: cleanup this horror. */
+ if (WARN_ON(init_fpu(current)))
+ force_sig(SIGKILL, current);
+ math_state_restore();
+ }
}
static void hard_disable_TSC(void)
OpenPOWER on IntegriCloud