fpu.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Author: Huacai Chen <[email protected]>
  4. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5. */
  6. #ifndef _ASM_FPU_H
  7. #define _ASM_FPU_H
  8. #include <linux/sched.h>
  9. #include <linux/sched/task_stack.h>
  10. #include <linux/ptrace.h>
  11. #include <linux/thread_info.h>
  12. #include <linux/bitops.h>
  13. #include <asm/cpu.h>
  14. #include <asm/cpu-features.h>
  15. #include <asm/current.h>
  16. #include <asm/loongarch.h>
  17. #include <asm/processor.h>
  18. #include <asm/ptrace.h>
  19. struct sigcontext;
  20. extern void _init_fpu(unsigned int);
  21. extern void _save_fp(struct loongarch_fpu *);
  22. extern void _restore_fp(struct loongarch_fpu *);
  23. /*
  24. * Mask the FCSR Cause bits according to the Enable bits, observing
  25. * that Unimplemented is always enabled.
  26. */
  27. static inline unsigned long mask_fcsr_x(unsigned long fcsr)
  28. {
  29. return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
  30. (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
  31. }
  32. static inline int is_fp_enabled(void)
  33. {
  34. return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
  35. 1 : 0;
  36. }
  37. #define enable_fpu() set_csr_euen(CSR_EUEN_FPEN)
  38. #define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN)
  39. #define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
  40. static inline int is_fpu_owner(void)
  41. {
  42. return test_thread_flag(TIF_USEDFPU);
  43. }
  44. static inline void __own_fpu(void)
  45. {
  46. enable_fpu();
  47. set_thread_flag(TIF_USEDFPU);
  48. KSTK_EUEN(current) |= CSR_EUEN_FPEN;
  49. }
  50. static inline void own_fpu_inatomic(int restore)
  51. {
  52. if (cpu_has_fpu && !is_fpu_owner()) {
  53. __own_fpu();
  54. if (restore)
  55. _restore_fp(&current->thread.fpu);
  56. }
  57. }
  58. static inline void own_fpu(int restore)
  59. {
  60. preempt_disable();
  61. own_fpu_inatomic(restore);
  62. preempt_enable();
  63. }
  64. static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
  65. {
  66. if (is_fpu_owner()) {
  67. if (save)
  68. _save_fp(&tsk->thread.fpu);
  69. disable_fpu();
  70. clear_tsk_thread_flag(tsk, TIF_USEDFPU);
  71. }
  72. KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
  73. }
  74. static inline void lose_fpu(int save)
  75. {
  76. preempt_disable();
  77. lose_fpu_inatomic(save, current);
  78. preempt_enable();
  79. }
  80. static inline void init_fpu(void)
  81. {
  82. unsigned int fcsr = current->thread.fpu.fcsr;
  83. __own_fpu();
  84. _init_fpu(fcsr);
  85. set_used_math();
  86. }
  87. static inline void save_fp(struct task_struct *tsk)
  88. {
  89. if (cpu_has_fpu)
  90. _save_fp(&tsk->thread.fpu);
  91. }
  92. static inline void restore_fp(struct task_struct *tsk)
  93. {
  94. if (cpu_has_fpu)
  95. _restore_fp(&tsk->thread.fpu);
  96. }
  97. static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
  98. {
  99. if (tsk == current) {
  100. preempt_disable();
  101. if (is_fpu_owner())
  102. _save_fp(&current->thread.fpu);
  103. preempt_enable();
  104. }
  105. return tsk->thread.fpu.fpr;
  106. }
  107. #endif /* _ASM_FPU_H */