api.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 1994 Linus Torvalds
  4. *
  5. * Pentium III FXSR, SSE support
  6. * General FPU state handling cleanups
  7. * Gareth Hughes <[email protected]>, May 2000
  8. * x86-64 work by Andi Kleen 2002
  9. */
  10. #ifndef _ASM_X86_FPU_API_H
  11. #define _ASM_X86_FPU_API_H
  12. #include <linux/bottom_half.h>
  13. #include <asm/fpu/types.h>
  14. /*
  15. * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
  16. * disables preemption so be careful if you intend to use it for long periods
  17. * of time.
  18. * If you intend to use the FPU in irq/softirq you need to check first with
  19. * irq_fpu_usable() if it is possible.
  20. */
  21. /* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
  22. #define KFPU_387 _BITUL(0) /* 387 state will be initialized */
  23. #define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
  24. extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
  25. extern void kernel_fpu_end(void);
  26. extern bool irq_fpu_usable(void);
  27. extern void fpregs_mark_activate(void);
  28. /* Code that is unaware of kernel_fpu_begin_mask() can use this */
  29. static inline void kernel_fpu_begin(void)
  30. {
  31. #ifdef CONFIG_X86_64
  32. /*
  33. * Any 64-bit code that uses 387 instructions must explicitly request
  34. * KFPU_387.
  35. */
  36. kernel_fpu_begin_mask(KFPU_MXCSR);
  37. #else
  38. /*
  39. * 32-bit kernel code may use 387 operations as well as SSE2, etc,
  40. * as long as it checks that the CPU has the required capability.
  41. */
  42. kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
  43. #endif
  44. }
  45. /*
  46. * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate.
  47. * A context switch will (and softirq might) save CPU's FPU registers to
  48. * fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
  49. * a random state.
  50. *
  51. * local_bh_disable() protects against both preemption and soft interrupts
  52. * on !RT kernels.
  53. *
  54. * On RT kernels local_bh_disable() is not sufficient because it only
  55. * serializes soft interrupt related sections via a local lock, but stays
  56. * preemptible. Disabling preemption is the right choice here as bottom
  57. * half processing is always in thread context on RT kernels so it
  58. * implicitly prevents bottom half processing as well.
  59. *
  60. * Disabling preemption also serializes against kernel_fpu_begin().
  61. */
  62. static inline void fpregs_lock(void)
  63. {
  64. if (!IS_ENABLED(CONFIG_PREEMPT_RT))
  65. local_bh_disable();
  66. else
  67. preempt_disable();
  68. }
  69. static inline void fpregs_unlock(void)
  70. {
  71. if (!IS_ENABLED(CONFIG_PREEMPT_RT))
  72. local_bh_enable();
  73. else
  74. preempt_enable();
  75. }
  76. #ifdef CONFIG_X86_DEBUG_FPU
  77. extern void fpregs_assert_state_consistent(void);
  78. #else
  79. static inline void fpregs_assert_state_consistent(void) { }
  80. #endif
  81. /*
  82. * Load the task FPU state before returning to userspace.
  83. */
  84. extern void switch_fpu_return(void);
  85. /*
  86. * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
  87. *
  88. * If 'feature_name' is set then put a human-readable description of
  89. * the feature there as well - this can be used to print error (or success)
  90. * messages.
  91. */
  92. extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
  93. /* Trap handling */
  94. extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
  95. extern void fpu_sync_fpstate(struct fpu *fpu);
  96. extern void fpu_reset_from_exception_fixup(void);
  97. /* Boot, hotplug and resume */
  98. extern void fpu__init_cpu(void);
  99. extern void fpu__init_system(void);
  100. extern void fpu__init_check_bugs(void);
  101. extern void fpu__resume_cpu(void);
  102. #ifdef CONFIG_MATH_EMULATION
  103. extern void fpstate_init_soft(struct swregs_state *soft);
  104. #else
  105. static inline void fpstate_init_soft(struct swregs_state *soft) {}
  106. #endif
  107. /* State tracking */
  108. DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
  109. /* Process cleanup */
  110. #ifdef CONFIG_X86_64
  111. extern void fpstate_free(struct fpu *fpu);
  112. #else
  113. static inline void fpstate_free(struct fpu *fpu) { }
  114. #endif
  115. /* fpstate-related functions which are exported to KVM */
  116. extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature);
  117. extern u64 xstate_get_guest_group_perm(void);
  118. /* KVM specific functions */
  119. extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu);
  120. extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu);
  121. extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest);
  122. extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures);
  123. #ifdef CONFIG_X86_64
  124. extern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd);
  125. extern void fpu_sync_guest_vmexit_xfd_state(void);
  126. #else
  127. static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { }
  128. static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
  129. #endif
  130. extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
  131. unsigned int size, u64 xfeatures, u32 pkru);
  132. extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
  133. static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
  134. {
  135. gfpu->fpstate->is_confidential = true;
  136. }
  137. static inline bool fpstate_is_confidential(struct fpu_guest *gfpu)
  138. {
  139. return gfpu->fpstate->is_confidential;
  140. }
  141. /* prctl */
  142. extern long fpu_xstate_prctl(int option, unsigned long arg2);
  143. extern void fpu_idle_fpregs(void);
  144. #endif /* _ASM_X86_FPU_API_H */