x86/fpu: Remove use_eager_fpu()
This removes all the obvious code paths that depend on lazy FPU mode. It shouldn't change the generated code at all. Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: pbonzini@redhat.com Link: http://lkml.kernel.org/r/1475627678-20788-5-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
2f7fada235
commit
c592b57347
@@ -60,11 +60,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
|
||||
/*
|
||||
* FPU related CPU feature flag helper routines:
|
||||
*/
|
||||
static __always_inline __pure bool use_eager_fpu(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static __always_inline __pure bool use_xsaveopt(void)
|
||||
{
|
||||
return static_cpu_has(X86_FEATURE_XSAVEOPT);
|
||||
@@ -501,24 +496,6 @@ static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
|
||||
* idiom, which is then paired with the sw-flag (fpregs_active) later on:
|
||||
*/
|
||||
|
||||
static inline void __fpregs_activate_hw(void)
|
||||
{
|
||||
if (!use_eager_fpu())
|
||||
clts();
|
||||
}
|
||||
|
||||
static inline void __fpregs_deactivate_hw(void)
|
||||
{
|
||||
if (!use_eager_fpu())
|
||||
stts();
|
||||
}
|
||||
|
||||
/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
|
||||
static inline void __fpregs_deactivate(struct fpu *fpu)
|
||||
{
|
||||
WARN_ON_FPU(!fpu->fpregs_active);
|
||||
@@ -528,7 +505,6 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
|
||||
trace_x86_fpu_regs_deactivated(fpu);
|
||||
}
|
||||
|
||||
/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
|
||||
static inline void __fpregs_activate(struct fpu *fpu)
|
||||
{
|
||||
WARN_ON_FPU(fpu->fpregs_active);
|
||||
@@ -554,22 +530,17 @@ static inline int fpregs_active(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Encapsulate the CR0.TS handling together with the
|
||||
* software flag.
|
||||
*
|
||||
* These generally need preemption protection to work,
|
||||
* do try to avoid using these on their own.
|
||||
*/
|
||||
static inline void fpregs_activate(struct fpu *fpu)
|
||||
{
|
||||
__fpregs_activate_hw();
|
||||
__fpregs_activate(fpu);
|
||||
}
|
||||
|
||||
static inline void fpregs_deactivate(struct fpu *fpu)
|
||||
{
|
||||
__fpregs_deactivate(fpu);
|
||||
__fpregs_deactivate_hw();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -596,8 +567,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
|
||||
* or if the past 5 consecutive context-switches used math.
|
||||
*/
|
||||
fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
|
||||
new_fpu->fpstate_active &&
|
||||
(use_eager_fpu() || new_fpu->counter > 5);
|
||||
new_fpu->fpstate_active;
|
||||
|
||||
if (old_fpu->fpregs_active) {
|
||||
if (!copy_fpregs_to_fpstate(old_fpu))
|
||||
@@ -615,8 +585,6 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
|
||||
__fpregs_activate(new_fpu);
|
||||
trace_x86_fpu_regs_activated(new_fpu);
|
||||
prefetch(&new_fpu->state);
|
||||
} else {
|
||||
__fpregs_deactivate_hw();
|
||||
}
|
||||
} else {
|
||||
old_fpu->counter = 0;
|
||||
|
Reference in New Issue
Block a user