sched.h 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_FPU_SCHED_H
  3. #define _ASM_X86_FPU_SCHED_H
  4. #include <linux/sched.h>
  5. #include <asm/cpufeature.h>
  6. #include <asm/fpu/types.h>
  7. #include <asm/trace/fpu.h>
  8. extern void save_fpregs_to_fpstate(struct fpu *fpu);
  9. extern void fpu__drop(struct fpu *fpu);
  10. extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal);
  11. extern void fpu_flush_thread(void);
  12. /*
  13. * FPU state switching for scheduling.
  14. *
  15. * This is a two-stage process:
  16. *
  17. * - switch_fpu_prepare() saves the old state.
  18. * This is done within the context of the old process.
  19. *
  20. * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
  21. * will get loaded on return to userspace, or when the kernel needs it.
  22. *
  23. * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
  24. * are saved in the current thread's FPU register state.
  25. *
  26. * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
  27. * hold current()'s FPU registers. It is required to load the
  28. * registers before returning to userland or using the content
  29. * otherwise.
  30. *
  31. * The FPU context is only stored/restored for a user task and
  32. * PF_KTHREAD is used to distinguish between kernel and user threads.
  33. */
  34. static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
  35. {
  36. if (cpu_feature_enabled(X86_FEATURE_FPU) &&
  37. !(current->flags & (PF_KTHREAD | PF_IO_WORKER))) {
  38. save_fpregs_to_fpstate(old_fpu);
  39. /*
  40. * The save operation preserved register state, so the
  41. * fpu_fpregs_owner_ctx is still @old_fpu. Store the
  42. * current CPU number in @old_fpu, so the next return
  43. * to user space can avoid the FPU register restore
  44. * when is returns on the same CPU and still owns the
  45. * context.
  46. */
  47. old_fpu->last_cpu = cpu;
  48. trace_x86_fpu_regs_deactivated(old_fpu);
  49. }
  50. }
  51. /*
  52. * Delay loading of the complete FPU state until the return to userland.
  53. * PKRU is handled separately.
  54. */
  55. static inline void switch_fpu_finish(void)
  56. {
  57. if (cpu_feature_enabled(X86_FEATURE_FPU))
  58. set_thread_flag(TIF_NEED_FPU_LOAD);
  59. }
  60. #endif /* _ASM_X86_FPU_SCHED_H */