legacy.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __X86_KERNEL_FPU_LEGACY_H
  3. #define __X86_KERNEL_FPU_LEGACY_H
  4. #include <asm/fpu/types.h>
  5. extern unsigned int mxcsr_feature_mask;
  6. static inline void ldmxcsr(u32 mxcsr)
  7. {
  8. asm volatile("ldmxcsr %0" :: "m" (mxcsr));
  9. }
  10. /*
  11. * Returns 0 on success or the trap number when the operation raises an
  12. * exception.
  13. */
  14. #define user_insn(insn, output, input...) \
  15. ({ \
  16. int err; \
  17. \
  18. might_fault(); \
  19. \
  20. asm volatile(ASM_STAC "\n" \
  21. "1: " #insn "\n" \
  22. "2: " ASM_CLAC "\n" \
  23. _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE) \
  24. : [err] "=a" (err), output \
  25. : "0"(0), input); \
  26. err; \
  27. })
  28. #define kernel_insn_err(insn, output, input...) \
  29. ({ \
  30. int err; \
  31. asm volatile("1:" #insn "\n\t" \
  32. "2:\n" \
  33. _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[err]) \
  34. : [err] "=r" (err), output \
  35. : "0"(0), input); \
  36. err; \
  37. })
  38. #define kernel_insn(insn, output, input...) \
  39. asm volatile("1:" #insn "\n\t" \
  40. "2:\n" \
  41. _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FPU_RESTORE) \
  42. : output : input)
  43. static inline int fnsave_to_user_sigframe(struct fregs_state __user *fx)
  44. {
  45. return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
  46. }
  47. static inline int fxsave_to_user_sigframe(struct fxregs_state __user *fx)
  48. {
  49. if (IS_ENABLED(CONFIG_X86_32))
  50. return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
  51. else
  52. return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
  53. }
  54. static inline void fxrstor(struct fxregs_state *fx)
  55. {
  56. if (IS_ENABLED(CONFIG_X86_32))
  57. kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  58. else
  59. kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
  60. }
  61. static inline int fxrstor_safe(struct fxregs_state *fx)
  62. {
  63. if (IS_ENABLED(CONFIG_X86_32))
  64. return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  65. else
  66. return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
  67. }
  68. static inline int fxrstor_from_user_sigframe(struct fxregs_state __user *fx)
  69. {
  70. if (IS_ENABLED(CONFIG_X86_32))
  71. return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  72. else
  73. return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
  74. }
  75. static inline void frstor(struct fregs_state *fx)
  76. {
  77. kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  78. }
  79. static inline int frstor_safe(struct fregs_state *fx)
  80. {
  81. return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  82. }
  83. static inline int frstor_from_user_sigframe(struct fregs_state __user *fx)
  84. {
  85. return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  86. }
  87. static inline void fxsave(struct fxregs_state *fx)
  88. {
  89. if (IS_ENABLED(CONFIG_X86_32))
  90. asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
  91. else
  92. asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
  93. }
  94. #endif