Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fpu changes from Ingo Molnar: "Various x86 FPU handling cleanups, refactorings and fixes (Borislav Petkov, Oleg Nesterov, Rik van Riel)" * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits) x86/fpu: Kill eager_fpu_init_bp() x86/fpu: Don't allocate fpu->state for swapper/0 x86/fpu: Rename drop_init_fpu() to fpu_reset_state() x86/fpu: Fold __drop_fpu() into its sole user x86/fpu: Don't abuse drop_init_fpu() in flush_thread() x86/fpu: Use restore_init_xstate() instead of math_state_restore() on kthread exec x86/fpu: Introduce restore_init_xstate() x86/fpu: Document user_fpu_begin() x86/fpu: Factor out memset(xstate, 0) in fpu_finit() paths x86/fpu: Change xstateregs_get()/set() to use ->xsave.i387 rather than ->fxsave x86/fpu: Don't abuse FPU in kernel threads if use_eager_fpu() x86/fpu: Always allow FPU in interrupt if use_eager_fpu() x86/fpu: __kernel_fpu_begin() should clear fpu_owner_task even if use_eager_fpu() x86/fpu: Also check fpu_lazy_restore() when use_eager_fpu() x86/fpu: Use task_disable_lazy_fpu_restore() helper x86/fpu: Use an explicit if/else in switch_fpu_prepare() x86/fpu: Introduce task_disable_lazy_fpu_restore() helper x86/fpu: Move lazy restore functions up a few lines x86/fpu: Change math_error() to use unlazy_fpu(), kill (now) unused save_init_fpu() x86/fpu: Don't do __thread_fpu_end() if use_eager_fpu() ...
This commit is contained in:
@@ -89,8 +89,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
|
||||
dst->thread.fpu_counter = 0;
|
||||
dst->thread.fpu.has_fpu = 0;
|
||||
dst->thread.fpu.last_cpu = ~0;
|
||||
dst->thread.fpu.state = NULL;
|
||||
task_disable_lazy_fpu_restore(dst);
|
||||
if (tsk_used_math(src)) {
|
||||
int err = fpu_alloc(&dst->thread.fpu);
|
||||
if (err)
|
||||
@@ -151,13 +151,18 @@ void flush_thread(void)
|
||||
|
||||
flush_ptrace_hw_breakpoint(tsk);
|
||||
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
|
||||
drop_init_fpu(tsk);
|
||||
/*
|
||||
* Free the FPU state for non xsave platforms. They get reallocated
|
||||
* lazily at the first use.
|
||||
*/
|
||||
if (!use_eager_fpu())
|
||||
|
||||
if (!use_eager_fpu()) {
|
||||
/* FPU state will be reallocated lazily at the first use. */
|
||||
drop_fpu(tsk);
|
||||
free_thread_xstate(tsk);
|
||||
} else if (!used_math()) {
|
||||
/* kthread execs. TODO: cleanup this horror. */
|
||||
if (WARN_ON(init_fpu(tsk)))
|
||||
force_sig(SIGKILL, tsk);
|
||||
user_fpu_begin();
|
||||
restore_init_xstate();
|
||||
}
|
||||
}
|
||||
|
||||
static void hard_disable_TSC(void)
|
||||
|
Reference in New Issue
Block a user