init.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * x86 FPU boot time init code:
  4. */
  5. #include <asm/fpu/api.h>
  6. #include <asm/tlbflush.h>
  7. #include <asm/setup.h>
  8. #include <linux/sched.h>
  9. #include <linux/sched/task.h>
  10. #include <linux/init.h>
  11. #include "internal.h"
  12. #include "legacy.h"
  13. #include "xstate.h"
  14. /*
  15. * Initialize the registers found in all CPUs, CR0 and CR4:
  16. */
  17. static void fpu__init_cpu_generic(void)
  18. {
  19. unsigned long cr0;
  20. unsigned long cr4_mask = 0;
  21. if (boot_cpu_has(X86_FEATURE_FXSR))
  22. cr4_mask |= X86_CR4_OSFXSR;
  23. if (boot_cpu_has(X86_FEATURE_XMM))
  24. cr4_mask |= X86_CR4_OSXMMEXCPT;
  25. if (cr4_mask)
  26. cr4_set_bits(cr4_mask);
  27. cr0 = read_cr0();
  28. cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
  29. if (!boot_cpu_has(X86_FEATURE_FPU))
  30. cr0 |= X86_CR0_EM;
  31. write_cr0(cr0);
  32. /* Flush out any pending x87 state: */
  33. #ifdef CONFIG_MATH_EMULATION
  34. if (!boot_cpu_has(X86_FEATURE_FPU))
  35. fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft);
  36. else
  37. #endif
  38. asm volatile ("fninit");
  39. }
  40. /*
  41. * Enable all supported FPU features. Called when a CPU is brought online:
  42. */
  43. void fpu__init_cpu(void)
  44. {
  45. fpu__init_cpu_generic();
  46. fpu__init_cpu_xstate();
  47. }
  48. static bool __init fpu__probe_without_cpuid(void)
  49. {
  50. unsigned long cr0;
  51. u16 fsw, fcw;
  52. fsw = fcw = 0xffff;
  53. cr0 = read_cr0();
  54. cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
  55. write_cr0(cr0);
  56. asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw));
  57. pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw);
  58. return fsw == 0 && (fcw & 0x103f) == 0x003f;
  59. }
  60. static void __init fpu__init_system_early_generic(void)
  61. {
  62. if (!boot_cpu_has(X86_FEATURE_CPUID) &&
  63. !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
  64. if (fpu__probe_without_cpuid())
  65. setup_force_cpu_cap(X86_FEATURE_FPU);
  66. else
  67. setup_clear_cpu_cap(X86_FEATURE_FPU);
  68. }
  69. #ifndef CONFIG_MATH_EMULATION
  70. if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) {
  71. pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
  72. for (;;)
  73. asm volatile("hlt");
  74. }
  75. #endif
  76. }
  77. /*
  78. * Boot time FPU feature detection code:
  79. */
  80. unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu;
  81. EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
  82. static void __init fpu__init_system_mxcsr(void)
  83. {
  84. unsigned int mask = 0;
  85. if (boot_cpu_has(X86_FEATURE_FXSR)) {
  86. /* Static because GCC does not get 16-byte stack alignment right: */
  87. static struct fxregs_state fxregs __initdata;
  88. asm volatile("fxsave %0" : "+m" (fxregs));
  89. mask = fxregs.mxcsr_mask;
  90. /*
  91. * If zero then use the default features mask,
  92. * which has all features set, except the
  93. * denormals-are-zero feature bit:
  94. */
  95. if (mask == 0)
  96. mask = 0x0000ffbf;
  97. }
  98. mxcsr_feature_mask &= mask;
  99. }
  100. /*
  101. * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
  102. */
  103. static void __init fpu__init_system_generic(void)
  104. {
  105. /*
  106. * Set up the legacy init FPU context. Will be updated when the
  107. * CPU supports XSAVE[S].
  108. */
  109. fpstate_init_user(&init_fpstate);
  110. fpu__init_system_mxcsr();
  111. }
  112. /*
  113. * Enforce that 'MEMBER' is the last field of 'TYPE'.
  114. *
  115. * Align the computed size with alignment of the TYPE,
  116. * because that's how C aligns structs.
  117. */
  118. #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
  119. BUILD_BUG_ON(sizeof(TYPE) != \
  120. ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE)))
  121. /*
  122. * We append the 'struct fpu' to the task_struct:
  123. */
  124. static void __init fpu__init_task_struct_size(void)
  125. {
  126. int task_size = sizeof(struct task_struct);
  127. /*
  128. * Subtract off the static size of the register state.
  129. * It potentially has a bunch of padding.
  130. */
  131. task_size -= sizeof(current->thread.fpu.__fpstate.regs);
  132. /*
  133. * Add back the dynamically-calculated register state
  134. * size.
  135. */
  136. task_size += fpu_kernel_cfg.default_size;
  137. /*
  138. * We dynamically size 'struct fpu', so we require that
  139. * it be at the end of 'thread_struct' and that
  140. * 'thread_struct' be at the end of 'task_struct'. If
  141. * you hit a compile error here, check the structure to
  142. * see if something got added to the end.
  143. */
  144. CHECK_MEMBER_AT_END_OF(struct fpu, __fpstate);
  145. CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
  146. CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
  147. arch_task_struct_size = task_size;
  148. }
  149. /*
  150. * Set up the user and kernel xstate sizes based on the legacy FPU context size.
  151. *
  152. * We set this up first, and later it will be overwritten by
  153. * fpu__init_system_xstate() if the CPU knows about xstates.
  154. */
  155. static void __init fpu__init_system_xstate_size_legacy(void)
  156. {
  157. unsigned int size;
  158. /*
  159. * Note that the size configuration might be overwritten later
  160. * during fpu__init_system_xstate().
  161. */
  162. if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
  163. size = sizeof(struct swregs_state);
  164. } else if (cpu_feature_enabled(X86_FEATURE_FXSR)) {
  165. size = sizeof(struct fxregs_state);
  166. fpu_user_cfg.legacy_features = XFEATURE_MASK_FPSSE;
  167. } else {
  168. size = sizeof(struct fregs_state);
  169. fpu_user_cfg.legacy_features = XFEATURE_MASK_FP;
  170. }
  171. fpu_kernel_cfg.max_size = size;
  172. fpu_kernel_cfg.default_size = size;
  173. fpu_user_cfg.max_size = size;
  174. fpu_user_cfg.default_size = size;
  175. fpstate_reset(&current->thread.fpu);
  176. }
  177. /*
  178. * Called on the boot CPU once per system bootup, to set up the initial
  179. * FPU state that is later cloned into all processes:
  180. */
  181. void __init fpu__init_system(void)
  182. {
  183. fpstate_reset(&current->thread.fpu);
  184. fpu__init_system_early_generic();
  185. /*
  186. * The FPU has to be operational for some of the
  187. * later FPU init activities:
  188. */
  189. fpu__init_cpu();
  190. fpu__init_system_generic();
  191. fpu__init_system_xstate_size_legacy();
  192. fpu__init_system_xstate(fpu_kernel_cfg.max_size);
  193. fpu__init_task_struct_size();
  194. }