signal.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * FPU signal frame handling routines.
  4. */
  5. #include <linux/compat.h>
  6. #include <linux/cpu.h>
  7. #include <linux/pagemap.h>
  8. #include <asm/fpu/signal.h>
  9. #include <asm/fpu/regset.h>
  10. #include <asm/fpu/xstate.h>
  11. #include <asm/sigframe.h>
  12. #include <asm/trapnr.h>
  13. #include <asm/trace/fpu.h>
  14. #include "context.h"
  15. #include "internal.h"
  16. #include "legacy.h"
  17. #include "xstate.h"
  18. /*
  19. * Check for the presence of extended state information in the
  20. * user fpstate pointer in the sigcontext.
  21. */
  22. static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
  23. struct _fpx_sw_bytes *fx_sw)
  24. {
  25. int min_xstate_size = sizeof(struct fxregs_state) +
  26. sizeof(struct xstate_header);
  27. void __user *fpstate = fxbuf;
  28. unsigned int magic2;
  29. if (__copy_from_user(fx_sw, &fxbuf->sw_reserved[0], sizeof(*fx_sw)))
  30. return false;
  31. /* Check for the first magic field and other error scenarios. */
  32. if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
  33. fx_sw->xstate_size < min_xstate_size ||
  34. fx_sw->xstate_size > current->thread.fpu.fpstate->user_size ||
  35. fx_sw->xstate_size > fx_sw->extended_size)
  36. goto setfx;
  37. /*
  38. * Check for the presence of second magic word at the end of memory
  39. * layout. This detects the case where the user just copied the legacy
  40. * fpstate layout with out copying the extended state information
  41. * in the memory layout.
  42. */
  43. if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)))
  44. return false;
  45. if (likely(magic2 == FP_XSTATE_MAGIC2))
  46. return true;
  47. setfx:
  48. trace_x86_fpu_xstate_check_failed(&current->thread.fpu);
  49. /* Set the parameters for fx only state */
  50. fx_sw->magic1 = 0;
  51. fx_sw->xstate_size = sizeof(struct fxregs_state);
  52. fx_sw->xfeatures = XFEATURE_MASK_FPSSE;
  53. return true;
  54. }
  55. /*
  56. * Signal frame handlers.
  57. */
  58. static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf)
  59. {
  60. if (use_fxsr()) {
  61. struct xregs_state *xsave = &tsk->thread.fpu.fpstate->regs.xsave;
  62. struct user_i387_ia32_struct env;
  63. struct _fpstate_32 __user *fp = buf;
  64. fpregs_lock();
  65. if (!test_thread_flag(TIF_NEED_FPU_LOAD))
  66. fxsave(&tsk->thread.fpu.fpstate->regs.fxsave);
  67. fpregs_unlock();
  68. convert_from_fxsr(&env, tsk);
  69. if (__copy_to_user(buf, &env, sizeof(env)) ||
  70. __put_user(xsave->i387.swd, &fp->status) ||
  71. __put_user(X86_FXSR_MAGIC, &fp->magic))
  72. return false;
  73. } else {
  74. struct fregs_state __user *fp = buf;
  75. u32 swd;
  76. if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
  77. return false;
  78. }
  79. return true;
  80. }
  81. /*
  82. * Prepare the SW reserved portion of the fxsave memory layout, indicating
  83. * the presence of the extended state information in the memory layout
  84. * pointed to by the fpstate pointer in the sigcontext.
  85. * This is saved when ever the FP and extended state context is
  86. * saved on the user stack during the signal handler delivery to the user.
  87. */
  88. static inline void save_sw_bytes(struct _fpx_sw_bytes *sw_bytes, bool ia32_frame,
  89. struct fpstate *fpstate)
  90. {
  91. sw_bytes->magic1 = FP_XSTATE_MAGIC1;
  92. sw_bytes->extended_size = fpstate->user_size + FP_XSTATE_MAGIC2_SIZE;
  93. sw_bytes->xfeatures = fpstate->user_xfeatures;
  94. sw_bytes->xstate_size = fpstate->user_size;
  95. if (ia32_frame)
  96. sw_bytes->extended_size += sizeof(struct fregs_state);
  97. }
  98. static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
  99. struct fpstate *fpstate)
  100. {
  101. struct xregs_state __user *x = buf;
  102. struct _fpx_sw_bytes sw_bytes = {};
  103. u32 xfeatures;
  104. int err;
  105. /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
  106. save_sw_bytes(&sw_bytes, ia32_frame, fpstate);
  107. err = __copy_to_user(&x->i387.sw_reserved, &sw_bytes, sizeof(sw_bytes));
  108. if (!use_xsave())
  109. return !err;
  110. err |= __put_user(FP_XSTATE_MAGIC2,
  111. (__u32 __user *)(buf + fpstate->user_size));
  112. /*
  113. * Read the xfeatures which we copied (directly from the cpu or
  114. * from the state in task struct) to the user buffers.
  115. */
  116. err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
  117. /*
  118. * For legacy compatible, we always set FP/SSE bits in the bit
  119. * vector while saving the state to the user context. This will
  120. * enable us capturing any changes(during sigreturn) to
  121. * the FP/SSE bits by the legacy applications which don't touch
  122. * xfeatures in the xsave header.
  123. *
  124. * xsave aware apps can change the xfeatures in the xsave
  125. * header as well as change any contents in the memory layout.
  126. * xrestore as part of sigreturn will capture all the changes.
  127. */
  128. xfeatures |= XFEATURE_MASK_FPSSE;
  129. err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
  130. return !err;
  131. }
  132. static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
  133. {
  134. if (use_xsave())
  135. return xsave_to_user_sigframe(buf);
  136. if (use_fxsr())
  137. return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
  138. else
  139. return fnsave_to_user_sigframe((struct fregs_state __user *) buf);
  140. }
  141. /*
  142. * Save the fpu, extended register state to the user signal frame.
  143. *
  144. * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
  145. * state is copied.
  146. * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
  147. *
  148. * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
  149. * buf != buf_fx for 32-bit frames with fxstate.
  150. *
  151. * Save it directly to the user frame with disabled page fault handler. If
  152. * that faults, try to clear the frame which handles the page fault.
  153. *
  154. * If this is a 32-bit frame with fxstate, put a fsave header before
  155. * the aligned state at 'buf_fx'.
  156. *
  157. * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
  158. * indicating the absence/presence of the extended state to the user.
  159. */
  160. bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
  161. {
  162. struct task_struct *tsk = current;
  163. struct fpstate *fpstate = tsk->thread.fpu.fpstate;
  164. bool ia32_fxstate = (buf != buf_fx);
  165. int ret;
  166. ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
  167. IS_ENABLED(CONFIG_IA32_EMULATION));
  168. if (!static_cpu_has(X86_FEATURE_FPU)) {
  169. struct user_i387_ia32_struct fp;
  170. fpregs_soft_get(current, NULL, (struct membuf){.p = &fp,
  171. .left = sizeof(fp)});
  172. return !copy_to_user(buf, &fp, sizeof(fp));
  173. }
  174. if (!access_ok(buf, size))
  175. return false;
  176. if (use_xsave()) {
  177. struct xregs_state __user *xbuf = buf_fx;
  178. /*
  179. * Clear the xsave header first, so that reserved fields are
  180. * initialized to zero.
  181. */
  182. if (__clear_user(&xbuf->header, sizeof(xbuf->header)))
  183. return false;
  184. }
  185. retry:
  186. /*
  187. * Load the FPU registers if they are not valid for the current task.
  188. * With a valid FPU state we can attempt to save the state directly to
  189. * userland's stack frame which will likely succeed. If it does not,
  190. * resolve the fault in the user memory and try again.
  191. */
  192. fpregs_lock();
  193. if (test_thread_flag(TIF_NEED_FPU_LOAD))
  194. fpregs_restore_userregs();
  195. pagefault_disable();
  196. ret = copy_fpregs_to_sigframe(buf_fx);
  197. pagefault_enable();
  198. fpregs_unlock();
  199. if (ret) {
  200. if (!__clear_user(buf_fx, fpstate->user_size))
  201. goto retry;
  202. return false;
  203. }
  204. /* Save the fsave header for the 32-bit frames. */
  205. if ((ia32_fxstate || !use_fxsr()) && !save_fsave_header(tsk, buf))
  206. return false;
  207. if (use_fxsr() && !save_xstate_epilog(buf_fx, ia32_fxstate, fpstate))
  208. return false;
  209. return true;
  210. }
  211. static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures,
  212. u64 xrestore, bool fx_only)
  213. {
  214. if (use_xsave()) {
  215. u64 init_bv = ufeatures & ~xrestore;
  216. int ret;
  217. if (likely(!fx_only))
  218. ret = xrstor_from_user_sigframe(buf, xrestore);
  219. else
  220. ret = fxrstor_from_user_sigframe(buf);
  221. if (!ret && unlikely(init_bv))
  222. os_xrstor(&init_fpstate, init_bv);
  223. return ret;
  224. } else if (use_fxsr()) {
  225. return fxrstor_from_user_sigframe(buf);
  226. } else {
  227. return frstor_from_user_sigframe(buf);
  228. }
  229. }
  230. /*
  231. * Attempt to restore the FPU registers directly from user memory.
  232. * Pagefaults are handled and any errors returned are fatal.
  233. */
  234. static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
  235. bool fx_only, unsigned int size)
  236. {
  237. struct fpu *fpu = &current->thread.fpu;
  238. int ret;
  239. retry:
  240. fpregs_lock();
  241. /* Ensure that XFD is up to date */
  242. xfd_update_state(fpu->fpstate);
  243. pagefault_disable();
  244. ret = __restore_fpregs_from_user(buf, fpu->fpstate->user_xfeatures,
  245. xrestore, fx_only);
  246. pagefault_enable();
  247. if (unlikely(ret)) {
  248. /*
  249. * The above did an FPU restore operation, restricted to
  250. * the user portion of the registers, and failed, but the
  251. * microcode might have modified the FPU registers
  252. * nevertheless.
  253. *
  254. * If the FPU registers do not belong to current, then
  255. * invalidate the FPU register state otherwise the task
  256. * might preempt current and return to user space with
  257. * corrupted FPU registers.
  258. */
  259. if (test_thread_flag(TIF_NEED_FPU_LOAD))
  260. __cpu_invalidate_fpregs_state();
  261. fpregs_unlock();
  262. /* Try to handle #PF, but anything else is fatal. */
  263. if (ret != X86_TRAP_PF)
  264. return false;
  265. if (!fault_in_readable(buf, size))
  266. goto retry;
  267. return false;
  268. }
  269. /*
  270. * Restore supervisor states: previous context switch etc has done
  271. * XSAVES and saved the supervisor states in the kernel buffer from
  272. * which they can be restored now.
  273. *
  274. * It would be optimal to handle this with a single XRSTORS, but
  275. * this does not work because the rest of the FPU registers have
  276. * been restored from a user buffer directly.
  277. */
  278. if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor())
  279. os_xrstor_supervisor(fpu->fpstate);
  280. fpregs_mark_activate();
  281. fpregs_unlock();
  282. return true;
  283. }
  284. static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
  285. bool ia32_fxstate)
  286. {
  287. struct task_struct *tsk = current;
  288. struct fpu *fpu = &tsk->thread.fpu;
  289. struct user_i387_ia32_struct env;
  290. bool success, fx_only = false;
  291. union fpregs_state *fpregs;
  292. unsigned int state_size;
  293. u64 user_xfeatures = 0;
  294. if (use_xsave()) {
  295. struct _fpx_sw_bytes fx_sw_user;
  296. if (!check_xstate_in_sigframe(buf_fx, &fx_sw_user))
  297. return false;
  298. fx_only = !fx_sw_user.magic1;
  299. state_size = fx_sw_user.xstate_size;
  300. user_xfeatures = fx_sw_user.xfeatures;
  301. } else {
  302. user_xfeatures = XFEATURE_MASK_FPSSE;
  303. state_size = fpu->fpstate->user_size;
  304. }
  305. if (likely(!ia32_fxstate)) {
  306. /* Restore the FPU registers directly from user memory. */
  307. return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
  308. state_size);
  309. }
  310. /*
  311. * Copy the legacy state because the FP portion of the FX frame has
  312. * to be ignored for histerical raisins. The legacy state is folded
  313. * in once the larger state has been copied.
  314. */
  315. if (__copy_from_user(&env, buf, sizeof(env)))
  316. return false;
  317. /*
  318. * By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
  319. * not modified on context switch and that the xstate is considered
  320. * to be loaded again on return to userland (overriding last_cpu avoids
  321. * the optimisation).
  322. */
  323. fpregs_lock();
  324. if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
  325. /*
  326. * If supervisor states are available then save the
  327. * hardware state in current's fpstate so that the
  328. * supervisor state is preserved. Save the full state for
  329. * simplicity. There is no point in optimizing this by only
  330. * saving the supervisor states and then shuffle them to
  331. * the right place in memory. It's ia32 mode. Shrug.
  332. */
  333. if (xfeatures_mask_supervisor())
  334. os_xsave(fpu->fpstate);
  335. set_thread_flag(TIF_NEED_FPU_LOAD);
  336. }
  337. __fpu_invalidate_fpregs_state(fpu);
  338. __cpu_invalidate_fpregs_state();
  339. fpregs_unlock();
  340. fpregs = &fpu->fpstate->regs;
  341. if (use_xsave() && !fx_only) {
  342. if (copy_sigframe_from_user_to_xstate(tsk, buf_fx))
  343. return false;
  344. } else {
  345. if (__copy_from_user(&fpregs->fxsave, buf_fx,
  346. sizeof(fpregs->fxsave)))
  347. return false;
  348. if (IS_ENABLED(CONFIG_X86_64)) {
  349. /* Reject invalid MXCSR values. */
  350. if (fpregs->fxsave.mxcsr & ~mxcsr_feature_mask)
  351. return false;
  352. } else {
  353. /* Mask invalid bits out for historical reasons (broken hardware). */
  354. fpregs->fxsave.mxcsr &= mxcsr_feature_mask;
  355. }
  356. /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
  357. if (use_xsave())
  358. fpregs->xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
  359. }
  360. /* Fold the legacy FP storage */
  361. convert_to_fxsr(&fpregs->fxsave, &env);
  362. fpregs_lock();
  363. if (use_xsave()) {
  364. /*
  365. * Remove all UABI feature bits not set in user_xfeatures
  366. * from the memory xstate header which makes the full
  367. * restore below bring them into init state. This works for
  368. * fx_only mode as well because that has only FP and SSE
  369. * set in user_xfeatures.
  370. *
  371. * Preserve supervisor states!
  372. */
  373. u64 mask = user_xfeatures | xfeatures_mask_supervisor();
  374. fpregs->xsave.header.xfeatures &= mask;
  375. success = !os_xrstor_safe(fpu->fpstate,
  376. fpu_kernel_cfg.max_features);
  377. } else {
  378. success = !fxrstor_safe(&fpregs->fxsave);
  379. }
  380. if (likely(success))
  381. fpregs_mark_activate();
  382. fpregs_unlock();
  383. return success;
  384. }
  385. static inline unsigned int xstate_sigframe_size(struct fpstate *fpstate)
  386. {
  387. unsigned int size = fpstate->user_size;
  388. return use_xsave() ? size + FP_XSTATE_MAGIC2_SIZE : size;
  389. }
  390. /*
  391. * Restore FPU state from a sigframe:
  392. */
  393. bool fpu__restore_sig(void __user *buf, int ia32_frame)
  394. {
  395. struct fpu *fpu = &current->thread.fpu;
  396. void __user *buf_fx = buf;
  397. bool ia32_fxstate = false;
  398. bool success = false;
  399. unsigned int size;
  400. if (unlikely(!buf)) {
  401. fpu__clear_user_states(fpu);
  402. return true;
  403. }
  404. size = xstate_sigframe_size(fpu->fpstate);
  405. ia32_frame &= (IS_ENABLED(CONFIG_X86_32) ||
  406. IS_ENABLED(CONFIG_IA32_EMULATION));
  407. /*
  408. * Only FXSR enabled systems need the FX state quirk.
  409. * FRSTOR does not need it and can use the fast path.
  410. */
  411. if (ia32_frame && use_fxsr()) {
  412. buf_fx = buf + sizeof(struct fregs_state);
  413. size += sizeof(struct fregs_state);
  414. ia32_fxstate = true;
  415. }
  416. if (!access_ok(buf, size))
  417. goto out;
  418. if (!IS_ENABLED(CONFIG_X86_64) && !cpu_feature_enabled(X86_FEATURE_FPU)) {
  419. success = !fpregs_soft_set(current, NULL, 0,
  420. sizeof(struct user_i387_ia32_struct),
  421. NULL, buf);
  422. } else {
  423. success = __fpu_restore_sig(buf, buf_fx, ia32_fxstate);
  424. }
  425. out:
  426. if (unlikely(!success))
  427. fpu__clear_user_states(fpu);
  428. return success;
  429. }
  430. unsigned long
  431. fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
  432. unsigned long *buf_fx, unsigned long *size)
  433. {
  434. unsigned long frame_size = xstate_sigframe_size(current->thread.fpu.fpstate);
  435. *buf_fx = sp = round_down(sp - frame_size, 64);
  436. if (ia32_frame && use_fxsr()) {
  437. frame_size += sizeof(struct fregs_state);
  438. sp -= sizeof(struct fregs_state);
  439. }
  440. *size = frame_size;
  441. return sp;
  442. }
  443. unsigned long __init fpu__get_fpstate_size(void)
  444. {
  445. unsigned long ret = fpu_user_cfg.max_size;
  446. if (use_xsave())
  447. ret += FP_XSTATE_MAGIC2_SIZE;
  448. /*
  449. * This space is needed on (most) 32-bit kernels, or when a 32-bit
  450. * app is running on a 64-bit kernel. To keep things simple, just
  451. * assume the worst case and always include space for 'freg_state',
  452. * even for 64-bit apps on 64-bit kernels. This wastes a bit of
  453. * space, but keeps the code simple.
  454. */
  455. if ((IS_ENABLED(CONFIG_IA32_EMULATION) ||
  456. IS_ENABLED(CONFIG_X86_32)) && use_fxsr())
  457. ret += sizeof(struct fregs_state);
  458. return ret;
  459. }