signal.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  5. *
  6. * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
  7. * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
  8. * 2000-2002 x86-64 support by Andi Kleen
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/sched.h>
  12. #include <linux/sched/task_stack.h>
  13. #include <linux/mm.h>
  14. #include <linux/smp.h>
  15. #include <linux/kernel.h>
  16. #include <linux/kstrtox.h>
  17. #include <linux/errno.h>
  18. #include <linux/wait.h>
  19. #include <linux/unistd.h>
  20. #include <linux/stddef.h>
  21. #include <linux/personality.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/user-return-notifier.h>
  24. #include <linux/uprobes.h>
  25. #include <linux/context_tracking.h>
  26. #include <linux/entry-common.h>
  27. #include <linux/syscalls.h>
  28. #include <asm/processor.h>
  29. #include <asm/ucontext.h>
  30. #include <asm/fpu/signal.h>
  31. #include <asm/fpu/xstate.h>
  32. #include <asm/vdso.h>
  33. #include <asm/mce.h>
  34. #include <asm/sighandling.h>
  35. #include <asm/vm86.h>
  36. #ifdef CONFIG_X86_64
  37. #include <linux/compat.h>
  38. #include <asm/proto.h>
  39. #include <asm/ia32_unistd.h>
  40. #include <asm/fpu/xstate.h>
  41. #endif /* CONFIG_X86_64 */
  42. #include <asm/syscall.h>
  43. #include <asm/sigframe.h>
  44. #include <asm/signal.h>
  45. #ifdef CONFIG_X86_64
  46. /*
  47. * If regs->ss will cause an IRET fault, change it. Otherwise leave it
  48. * alone. Using this generally makes no sense unless
  49. * user_64bit_mode(regs) would return true.
  50. */
  51. static void force_valid_ss(struct pt_regs *regs)
  52. {
  53. u32 ar;
  54. asm volatile ("lar %[old_ss], %[ar]\n\t"
  55. "jz 1f\n\t" /* If invalid: */
  56. "xorl %[ar], %[ar]\n\t" /* set ar = 0 */
  57. "1:"
  58. : [ar] "=r" (ar)
  59. : [old_ss] "rm" ((u16)regs->ss));
  60. /*
  61. * For a valid 64-bit user context, we need DPL 3, type
  62. * read-write data or read-write exp-down data, and S and P
  63. * set. We can't use VERW because VERW doesn't check the
  64. * P bit.
  65. */
  66. ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK;
  67. if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) &&
  68. ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN))
  69. regs->ss = __USER_DS;
  70. }
  71. # define CONTEXT_COPY_SIZE offsetof(struct sigcontext, reserved1)
  72. #else
  73. # define CONTEXT_COPY_SIZE sizeof(struct sigcontext)
  74. #endif
  75. static bool restore_sigcontext(struct pt_regs *regs,
  76. struct sigcontext __user *usc,
  77. unsigned long uc_flags)
  78. {
  79. struct sigcontext sc;
  80. /* Always make any pending restarted system calls return -EINTR */
  81. current->restart_block.fn = do_no_restart_syscall;
  82. if (copy_from_user(&sc, usc, CONTEXT_COPY_SIZE))
  83. return false;
  84. #ifdef CONFIG_X86_32
  85. loadsegment(gs, sc.gs);
  86. regs->fs = sc.fs;
  87. regs->es = sc.es;
  88. regs->ds = sc.ds;
  89. #endif /* CONFIG_X86_32 */
  90. regs->bx = sc.bx;
  91. regs->cx = sc.cx;
  92. regs->dx = sc.dx;
  93. regs->si = sc.si;
  94. regs->di = sc.di;
  95. regs->bp = sc.bp;
  96. regs->ax = sc.ax;
  97. regs->sp = sc.sp;
  98. regs->ip = sc.ip;
  99. #ifdef CONFIG_X86_64
  100. regs->r8 = sc.r8;
  101. regs->r9 = sc.r9;
  102. regs->r10 = sc.r10;
  103. regs->r11 = sc.r11;
  104. regs->r12 = sc.r12;
  105. regs->r13 = sc.r13;
  106. regs->r14 = sc.r14;
  107. regs->r15 = sc.r15;
  108. #endif /* CONFIG_X86_64 */
  109. /* Get CS/SS and force CPL3 */
  110. regs->cs = sc.cs | 0x03;
  111. regs->ss = sc.ss | 0x03;
  112. regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS);
  113. /* disable syscall checks */
  114. regs->orig_ax = -1;
  115. #ifdef CONFIG_X86_64
  116. /*
  117. * Fix up SS if needed for the benefit of old DOSEMU and
  118. * CRIU.
  119. */
  120. if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
  121. force_valid_ss(regs);
  122. #endif
  123. return fpu__restore_sig((void __user *)sc.fpstate,
  124. IS_ENABLED(CONFIG_X86_32));
  125. }
  126. static __always_inline int
  127. __unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
  128. struct pt_regs *regs, unsigned long mask)
  129. {
  130. #ifdef CONFIG_X86_32
  131. unsigned int gs;
  132. savesegment(gs, gs);
  133. unsafe_put_user(gs, (unsigned int __user *)&sc->gs, Efault);
  134. unsafe_put_user(regs->fs, (unsigned int __user *)&sc->fs, Efault);
  135. unsafe_put_user(regs->es, (unsigned int __user *)&sc->es, Efault);
  136. unsafe_put_user(regs->ds, (unsigned int __user *)&sc->ds, Efault);
  137. #endif /* CONFIG_X86_32 */
  138. unsafe_put_user(regs->di, &sc->di, Efault);
  139. unsafe_put_user(regs->si, &sc->si, Efault);
  140. unsafe_put_user(regs->bp, &sc->bp, Efault);
  141. unsafe_put_user(regs->sp, &sc->sp, Efault);
  142. unsafe_put_user(regs->bx, &sc->bx, Efault);
  143. unsafe_put_user(regs->dx, &sc->dx, Efault);
  144. unsafe_put_user(regs->cx, &sc->cx, Efault);
  145. unsafe_put_user(regs->ax, &sc->ax, Efault);
  146. #ifdef CONFIG_X86_64
  147. unsafe_put_user(regs->r8, &sc->r8, Efault);
  148. unsafe_put_user(regs->r9, &sc->r9, Efault);
  149. unsafe_put_user(regs->r10, &sc->r10, Efault);
  150. unsafe_put_user(regs->r11, &sc->r11, Efault);
  151. unsafe_put_user(regs->r12, &sc->r12, Efault);
  152. unsafe_put_user(regs->r13, &sc->r13, Efault);
  153. unsafe_put_user(regs->r14, &sc->r14, Efault);
  154. unsafe_put_user(regs->r15, &sc->r15, Efault);
  155. #endif /* CONFIG_X86_64 */
  156. unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault);
  157. unsafe_put_user(current->thread.error_code, &sc->err, Efault);
  158. unsafe_put_user(regs->ip, &sc->ip, Efault);
  159. #ifdef CONFIG_X86_32
  160. unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault);
  161. unsafe_put_user(regs->flags, &sc->flags, Efault);
  162. unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault);
  163. unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault);
  164. #else /* !CONFIG_X86_32 */
  165. unsafe_put_user(regs->flags, &sc->flags, Efault);
  166. unsafe_put_user(regs->cs, &sc->cs, Efault);
  167. unsafe_put_user(0, &sc->gs, Efault);
  168. unsafe_put_user(0, &sc->fs, Efault);
  169. unsafe_put_user(regs->ss, &sc->ss, Efault);
  170. #endif /* CONFIG_X86_32 */
  171. unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault);
  172. /* non-iBCS2 extensions.. */
  173. unsafe_put_user(mask, &sc->oldmask, Efault);
  174. unsafe_put_user(current->thread.cr2, &sc->cr2, Efault);
  175. return 0;
  176. Efault:
  177. return -EFAULT;
  178. }
  179. #define unsafe_put_sigcontext(sc, fp, regs, set, label) \
  180. do { \
  181. if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0])) \
  182. goto label; \
  183. } while(0);
  184. #define unsafe_put_sigmask(set, frame, label) \
  185. unsafe_put_user(*(__u64 *)(set), \
  186. (__u64 __user *)&(frame)->uc.uc_sigmask, \
  187. label)
  188. /*
  189. * Set up a signal frame.
  190. */
  191. /* x86 ABI requires 16-byte alignment */
  192. #define FRAME_ALIGNMENT 16UL
  193. #define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1)
  194. /*
  195. * Determine which stack to use..
  196. */
  197. static unsigned long align_sigframe(unsigned long sp)
  198. {
  199. #ifdef CONFIG_X86_32
  200. /*
  201. * Align the stack pointer according to the i386 ABI,
  202. * i.e. so that on function entry ((sp + 4) & 15) == 0.
  203. */
  204. sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4;
  205. #else /* !CONFIG_X86_32 */
  206. sp = round_down(sp, FRAME_ALIGNMENT) - 8;
  207. #endif
  208. return sp;
  209. }
  210. static void __user *
  211. get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
  212. void __user **fpstate)
  213. {
  214. /* Default to using normal stack */
  215. bool nested_altstack = on_sig_stack(regs->sp);
  216. bool entering_altstack = false;
  217. unsigned long math_size = 0;
  218. unsigned long sp = regs->sp;
  219. unsigned long buf_fx = 0;
  220. /* redzone */
  221. if (IS_ENABLED(CONFIG_X86_64))
  222. sp -= 128;
  223. /* This is the X/Open sanctioned signal stack switching. */
  224. if (ka->sa.sa_flags & SA_ONSTACK) {
  225. /*
  226. * This checks nested_altstack via sas_ss_flags(). Sensible
  227. * programs use SS_AUTODISARM, which disables that check, and
  228. * programs that don't use SS_AUTODISARM get compatible.
  229. */
  230. if (sas_ss_flags(sp) == 0) {
  231. sp = current->sas_ss_sp + current->sas_ss_size;
  232. entering_altstack = true;
  233. }
  234. } else if (IS_ENABLED(CONFIG_X86_32) &&
  235. !nested_altstack &&
  236. regs->ss != __USER_DS &&
  237. !(ka->sa.sa_flags & SA_RESTORER) &&
  238. ka->sa.sa_restorer) {
  239. /* This is the legacy signal stack switching. */
  240. sp = (unsigned long) ka->sa.sa_restorer;
  241. entering_altstack = true;
  242. }
  243. sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
  244. &buf_fx, &math_size);
  245. *fpstate = (void __user *)sp;
  246. sp = align_sigframe(sp - frame_size);
  247. /*
  248. * If we are on the alternate signal stack and would overflow it, don't.
  249. * Return an always-bogus address instead so we will die with SIGSEGV.
  250. */
  251. if (unlikely((nested_altstack || entering_altstack) &&
  252. !__on_sig_stack(sp))) {
  253. if (show_unhandled_signals && printk_ratelimit())
  254. pr_info("%s[%d] overflowed sigaltstack\n",
  255. current->comm, task_pid_nr(current));
  256. return (void __user *)-1L;
  257. }
  258. /* save i387 and extended state */
  259. if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size))
  260. return (void __user *)-1L;
  261. return (void __user *)sp;
  262. }
  263. #ifdef CONFIG_X86_32
  264. static const struct {
  265. u16 poplmovl;
  266. u32 val;
  267. u16 int80;
  268. } __attribute__((packed)) retcode = {
  269. 0xb858, /* popl %eax; movl $..., %eax */
  270. __NR_sigreturn,
  271. 0x80cd, /* int $0x80 */
  272. };
  273. static const struct {
  274. u8 movl;
  275. u32 val;
  276. u16 int80;
  277. u8 pad;
  278. } __attribute__((packed)) rt_retcode = {
  279. 0xb8, /* movl $..., %eax */
  280. __NR_rt_sigreturn,
  281. 0x80cd, /* int $0x80 */
  282. 0
  283. };
  284. static int
  285. __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
  286. struct pt_regs *regs)
  287. {
  288. struct sigframe __user *frame;
  289. void __user *restorer;
  290. void __user *fp = NULL;
  291. frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
  292. if (!user_access_begin(frame, sizeof(*frame)))
  293. return -EFAULT;
  294. unsafe_put_user(sig, &frame->sig, Efault);
  295. unsafe_put_sigcontext(&frame->sc, fp, regs, set, Efault);
  296. unsafe_put_user(set->sig[1], &frame->extramask[0], Efault);
  297. if (current->mm->context.vdso)
  298. restorer = current->mm->context.vdso +
  299. vdso_image_32.sym___kernel_sigreturn;
  300. else
  301. restorer = &frame->retcode;
  302. if (ksig->ka.sa.sa_flags & SA_RESTORER)
  303. restorer = ksig->ka.sa.sa_restorer;
  304. /* Set up to return from userspace. */
  305. unsafe_put_user(restorer, &frame->pretcode, Efault);
  306. /*
  307. * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
  308. *
  309. * WE DO NOT USE IT ANY MORE! It's only left here for historical
  310. * reasons and because gdb uses it as a signature to notice
  311. * signal handler stack frames.
  312. */
  313. unsafe_put_user(*((u64 *)&retcode), (u64 *)frame->retcode, Efault);
  314. user_access_end();
  315. /* Set up registers for signal handler */
  316. regs->sp = (unsigned long)frame;
  317. regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
  318. regs->ax = (unsigned long)sig;
  319. regs->dx = 0;
  320. regs->cx = 0;
  321. regs->ds = __USER_DS;
  322. regs->es = __USER_DS;
  323. regs->ss = __USER_DS;
  324. regs->cs = __USER_CS;
  325. return 0;
  326. Efault:
  327. user_access_end();
  328. return -EFAULT;
  329. }
  330. static int __setup_rt_frame(int sig, struct ksignal *ksig,
  331. sigset_t *set, struct pt_regs *regs)
  332. {
  333. struct rt_sigframe __user *frame;
  334. void __user *restorer;
  335. void __user *fp = NULL;
  336. frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
  337. if (!user_access_begin(frame, sizeof(*frame)))
  338. return -EFAULT;
  339. unsafe_put_user(sig, &frame->sig, Efault);
  340. unsafe_put_user(&frame->info, &frame->pinfo, Efault);
  341. unsafe_put_user(&frame->uc, &frame->puc, Efault);
  342. /* Create the ucontext. */
  343. if (static_cpu_has(X86_FEATURE_XSAVE))
  344. unsafe_put_user(UC_FP_XSTATE, &frame->uc.uc_flags, Efault);
  345. else
  346. unsafe_put_user(0, &frame->uc.uc_flags, Efault);
  347. unsafe_put_user(0, &frame->uc.uc_link, Efault);
  348. unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
  349. /* Set up to return from userspace. */
  350. restorer = current->mm->context.vdso +
  351. vdso_image_32.sym___kernel_rt_sigreturn;
  352. if (ksig->ka.sa.sa_flags & SA_RESTORER)
  353. restorer = ksig->ka.sa.sa_restorer;
  354. unsafe_put_user(restorer, &frame->pretcode, Efault);
  355. /*
  356. * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
  357. *
  358. * WE DO NOT USE IT ANY MORE! It's only left here for historical
  359. * reasons and because gdb uses it as a signature to notice
  360. * signal handler stack frames.
  361. */
  362. unsafe_put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode, Efault);
  363. unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
  364. unsafe_put_sigmask(set, frame, Efault);
  365. user_access_end();
  366. if (copy_siginfo_to_user(&frame->info, &ksig->info))
  367. return -EFAULT;
  368. /* Set up registers for signal handler */
  369. regs->sp = (unsigned long)frame;
  370. regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
  371. regs->ax = (unsigned long)sig;
  372. regs->dx = (unsigned long)&frame->info;
  373. regs->cx = (unsigned long)&frame->uc;
  374. regs->ds = __USER_DS;
  375. regs->es = __USER_DS;
  376. regs->ss = __USER_DS;
  377. regs->cs = __USER_CS;
  378. return 0;
  379. Efault:
  380. user_access_end();
  381. return -EFAULT;
  382. }
  383. #else /* !CONFIG_X86_32 */
  384. static unsigned long frame_uc_flags(struct pt_regs *regs)
  385. {
  386. unsigned long flags;
  387. if (boot_cpu_has(X86_FEATURE_XSAVE))
  388. flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
  389. else
  390. flags = UC_SIGCONTEXT_SS;
  391. if (likely(user_64bit_mode(regs)))
  392. flags |= UC_STRICT_RESTORE_SS;
  393. return flags;
  394. }
  395. static int __setup_rt_frame(int sig, struct ksignal *ksig,
  396. sigset_t *set, struct pt_regs *regs)
  397. {
  398. struct rt_sigframe __user *frame;
  399. void __user *fp = NULL;
  400. unsigned long uc_flags;
  401. /* x86-64 should always use SA_RESTORER. */
  402. if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
  403. return -EFAULT;
  404. frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
  405. uc_flags = frame_uc_flags(regs);
  406. if (!user_access_begin(frame, sizeof(*frame)))
  407. return -EFAULT;
  408. /* Create the ucontext. */
  409. unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
  410. unsafe_put_user(0, &frame->uc.uc_link, Efault);
  411. unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
  412. /* Set up to return from userspace. If provided, use a stub
  413. already in userspace. */
  414. unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault);
  415. unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
  416. unsafe_put_sigmask(set, frame, Efault);
  417. user_access_end();
  418. if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
  419. if (copy_siginfo_to_user(&frame->info, &ksig->info))
  420. return -EFAULT;
  421. }
  422. /* Set up registers for signal handler */
  423. regs->di = sig;
  424. /* In case the signal handler was declared without prototypes */
  425. regs->ax = 0;
  426. /* This also works for non SA_SIGINFO handlers because they expect the
  427. next argument after the signal number on the stack. */
  428. regs->si = (unsigned long)&frame->info;
  429. regs->dx = (unsigned long)&frame->uc;
  430. regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
  431. regs->sp = (unsigned long)frame;
  432. /*
  433. * Set up the CS and SS registers to run signal handlers in
  434. * 64-bit mode, even if the handler happens to be interrupting
  435. * 32-bit or 16-bit code.
  436. *
  437. * SS is subtle. In 64-bit mode, we don't need any particular
  438. * SS descriptor, but we do need SS to be valid. It's possible
  439. * that the old SS is entirely bogus -- this can happen if the
  440. * signal we're trying to deliver is #GP or #SS caused by a bad
  441. * SS value. We also have a compatibility issue here: DOSEMU
  442. * relies on the contents of the SS register indicating the
  443. * SS value at the time of the signal, even though that code in
  444. * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU
  445. * avoids relying on sigreturn to restore SS; instead it uses
  446. * a trampoline.) So we do our best: if the old SS was valid,
  447. * we keep it. Otherwise we replace it.
  448. */
  449. regs->cs = __USER_CS;
  450. if (unlikely(regs->ss != __USER_DS))
  451. force_valid_ss(regs);
  452. return 0;
  453. Efault:
  454. user_access_end();
  455. return -EFAULT;
  456. }
  457. #endif /* CONFIG_X86_32 */
  458. #ifdef CONFIG_X86_X32_ABI
  459. static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to,
  460. const struct kernel_siginfo *from)
  461. {
  462. struct compat_siginfo new;
  463. copy_siginfo_to_external32(&new, from);
  464. if (from->si_signo == SIGCHLD) {
  465. new._sifields._sigchld_x32._utime = from->si_utime;
  466. new._sifields._sigchld_x32._stime = from->si_stime;
  467. }
  468. if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
  469. return -EFAULT;
  470. return 0;
  471. }
  472. int copy_siginfo_to_user32(struct compat_siginfo __user *to,
  473. const struct kernel_siginfo *from)
  474. {
  475. if (in_x32_syscall())
  476. return x32_copy_siginfo_to_user(to, from);
  477. return __copy_siginfo_to_user32(to, from);
  478. }
  479. #endif /* CONFIG_X86_X32_ABI */
  480. static int x32_setup_rt_frame(struct ksignal *ksig,
  481. compat_sigset_t *set,
  482. struct pt_regs *regs)
  483. {
  484. #ifdef CONFIG_X86_X32_ABI
  485. struct rt_sigframe_x32 __user *frame;
  486. unsigned long uc_flags;
  487. void __user *restorer;
  488. void __user *fp = NULL;
  489. if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
  490. return -EFAULT;
  491. frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
  492. uc_flags = frame_uc_flags(regs);
  493. if (!user_access_begin(frame, sizeof(*frame)))
  494. return -EFAULT;
  495. /* Create the ucontext. */
  496. unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
  497. unsafe_put_user(0, &frame->uc.uc_link, Efault);
  498. unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
  499. unsafe_put_user(0, &frame->uc.uc__pad0, Efault);
  500. restorer = ksig->ka.sa.sa_restorer;
  501. unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault);
  502. unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
  503. unsafe_put_sigmask(set, frame, Efault);
  504. user_access_end();
  505. if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
  506. if (x32_copy_siginfo_to_user(&frame->info, &ksig->info))
  507. return -EFAULT;
  508. }
  509. /* Set up registers for signal handler */
  510. regs->sp = (unsigned long) frame;
  511. regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
  512. /* We use the x32 calling convention here... */
  513. regs->di = ksig->sig;
  514. regs->si = (unsigned long) &frame->info;
  515. regs->dx = (unsigned long) &frame->uc;
  516. loadsegment(ds, __USER_DS);
  517. loadsegment(es, __USER_DS);
  518. regs->cs = __USER_CS;
  519. regs->ss = __USER_DS;
  520. #endif /* CONFIG_X86_X32_ABI */
  521. return 0;
  522. #ifdef CONFIG_X86_X32_ABI
  523. Efault:
  524. user_access_end();
  525. return -EFAULT;
  526. #endif
  527. }
  528. /*
  529. * Do a signal return; undo the signal stack.
  530. */
  531. #ifdef CONFIG_X86_32
  532. SYSCALL_DEFINE0(sigreturn)
  533. {
  534. struct pt_regs *regs = current_pt_regs();
  535. struct sigframe __user *frame;
  536. sigset_t set;
  537. frame = (struct sigframe __user *)(regs->sp - 8);
  538. if (!access_ok(frame, sizeof(*frame)))
  539. goto badframe;
  540. if (__get_user(set.sig[0], &frame->sc.oldmask) ||
  541. __get_user(set.sig[1], &frame->extramask[0]))
  542. goto badframe;
  543. set_current_blocked(&set);
  544. /*
  545. * x86_32 has no uc_flags bits relevant to restore_sigcontext.
  546. * Save a few cycles by skipping the __get_user.
  547. */
  548. if (!restore_sigcontext(regs, &frame->sc, 0))
  549. goto badframe;
  550. return regs->ax;
  551. badframe:
  552. signal_fault(regs, frame, "sigreturn");
  553. return 0;
  554. }
  555. #endif /* CONFIG_X86_32 */
  556. SYSCALL_DEFINE0(rt_sigreturn)
  557. {
  558. struct pt_regs *regs = current_pt_regs();
  559. struct rt_sigframe __user *frame;
  560. sigset_t set;
  561. unsigned long uc_flags;
  562. frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
  563. if (!access_ok(frame, sizeof(*frame)))
  564. goto badframe;
  565. if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask))
  566. goto badframe;
  567. if (__get_user(uc_flags, &frame->uc.uc_flags))
  568. goto badframe;
  569. set_current_blocked(&set);
  570. if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
  571. goto badframe;
  572. if (restore_altstack(&frame->uc.uc_stack))
  573. goto badframe;
  574. return regs->ax;
  575. badframe:
  576. signal_fault(regs, frame, "rt_sigreturn");
  577. return 0;
  578. }
  579. /*
  580. * There are four different struct types for signal frame: sigframe_ia32,
  581. * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case
  582. * -- the largest size. It means the size for 64-bit apps is a bit more
  583. * than needed, but this keeps the code simple.
  584. */
  585. #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
  586. # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32)
  587. #else
  588. # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe)
  589. #endif
  590. /*
  591. * The FP state frame contains an XSAVE buffer which must be 64-byte aligned.
  592. * If a signal frame starts at an unaligned address, extra space is required.
  593. * This is the max alignment padding, conservatively.
  594. */
  595. #define MAX_XSAVE_PADDING 63UL
  596. /*
  597. * The frame data is composed of the following areas and laid out as:
  598. *
  599. * -------------------------
  600. * | alignment padding |
  601. * -------------------------
  602. * | (f)xsave frame |
  603. * -------------------------
  604. * | fsave header |
  605. * -------------------------
  606. * | alignment padding |
  607. * -------------------------
  608. * | siginfo + ucontext |
  609. * -------------------------
  610. */
  611. /* max_frame_size tells userspace the worst case signal stack size. */
  612. static unsigned long __ro_after_init max_frame_size;
  613. static unsigned int __ro_after_init fpu_default_state_size;
  614. static int __init init_sigframe_size(void)
  615. {
  616. fpu_default_state_size = fpu__get_fpstate_size();
  617. max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING;
  618. max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING;
  619. /* Userspace expects an aligned size. */
  620. max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT);
  621. pr_info("max sigframe size: %lu\n", max_frame_size);
  622. return 0;
  623. }
  624. early_initcall(init_sigframe_size);
  625. unsigned long get_sigframe_size(void)
  626. {
  627. return max_frame_size;
  628. }
  629. static inline int is_ia32_compat_frame(struct ksignal *ksig)
  630. {
  631. return IS_ENABLED(CONFIG_IA32_EMULATION) &&
  632. ksig->ka.sa.sa_flags & SA_IA32_ABI;
  633. }
  634. static inline int is_ia32_frame(struct ksignal *ksig)
  635. {
  636. return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig);
  637. }
  638. static inline int is_x32_frame(struct ksignal *ksig)
  639. {
  640. return IS_ENABLED(CONFIG_X86_X32_ABI) &&
  641. ksig->ka.sa.sa_flags & SA_X32_ABI;
  642. }
  643. static int
  644. setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
  645. {
  646. int usig = ksig->sig;
  647. sigset_t *set = sigmask_to_save();
  648. compat_sigset_t *cset = (compat_sigset_t *) set;
  649. /* Perform fixup for the pre-signal frame. */
  650. rseq_signal_deliver(ksig, regs);
  651. /* Set up the stack frame */
  652. if (is_ia32_frame(ksig)) {
  653. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  654. return ia32_setup_rt_frame(usig, ksig, cset, regs);
  655. else
  656. return ia32_setup_frame(usig, ksig, cset, regs);
  657. } else if (is_x32_frame(ksig)) {
  658. return x32_setup_rt_frame(ksig, cset, regs);
  659. } else {
  660. return __setup_rt_frame(ksig->sig, ksig, set, regs);
  661. }
  662. }
  663. static void
  664. handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  665. {
  666. bool stepping, failed;
  667. struct fpu *fpu = &current->thread.fpu;
  668. if (v8086_mode(regs))
  669. save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
  670. /* Are we from a system call? */
  671. if (syscall_get_nr(current, regs) != -1) {
  672. /* If so, check system call restarting.. */
  673. switch (syscall_get_error(current, regs)) {
  674. case -ERESTART_RESTARTBLOCK:
  675. case -ERESTARTNOHAND:
  676. regs->ax = -EINTR;
  677. break;
  678. case -ERESTARTSYS:
  679. if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
  680. regs->ax = -EINTR;
  681. break;
  682. }
  683. fallthrough;
  684. case -ERESTARTNOINTR:
  685. regs->ax = regs->orig_ax;
  686. regs->ip -= 2;
  687. break;
  688. }
  689. }
  690. /*
  691. * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now
  692. * so that register information in the sigcontext is correct and
  693. * then notify the tracer before entering the signal handler.
  694. */
  695. stepping = test_thread_flag(TIF_SINGLESTEP);
  696. if (stepping)
  697. user_disable_single_step(current);
  698. failed = (setup_rt_frame(ksig, regs) < 0);
  699. if (!failed) {
  700. /*
  701. * Clear the direction flag as per the ABI for function entry.
  702. *
  703. * Clear RF when entering the signal handler, because
  704. * it might disable possible debug exception from the
  705. * signal handler.
  706. *
  707. * Clear TF for the case when it wasn't set by debugger to
  708. * avoid the recursive send_sigtrap() in SIGTRAP handler.
  709. */
  710. regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
  711. /*
  712. * Ensure the signal handler starts with the new fpu state.
  713. */
  714. fpu__clear_user_states(fpu);
  715. }
  716. signal_setup_done(failed, ksig, stepping);
  717. }
  718. static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
  719. {
  720. #ifdef CONFIG_IA32_EMULATION
  721. if (current->restart_block.arch_data & TS_COMPAT)
  722. return __NR_ia32_restart_syscall;
  723. #endif
  724. #ifdef CONFIG_X86_X32_ABI
  725. return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
  726. #else
  727. return __NR_restart_syscall;
  728. #endif
  729. }
  730. /*
  731. * Note that 'init' is a special process: it doesn't get signals it doesn't
  732. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  733. * mistake.
  734. */
  735. void arch_do_signal_or_restart(struct pt_regs *regs)
  736. {
  737. struct ksignal ksig;
  738. if (get_signal(&ksig)) {
  739. /* Whee! Actually deliver the signal. */
  740. handle_signal(&ksig, regs);
  741. return;
  742. }
  743. /* Did we come from a system call? */
  744. if (syscall_get_nr(current, regs) != -1) {
  745. /* Restart the system call - no handlers present */
  746. switch (syscall_get_error(current, regs)) {
  747. case -ERESTARTNOHAND:
  748. case -ERESTARTSYS:
  749. case -ERESTARTNOINTR:
  750. regs->ax = regs->orig_ax;
  751. regs->ip -= 2;
  752. break;
  753. case -ERESTART_RESTARTBLOCK:
  754. regs->ax = get_nr_restart_syscall(regs);
  755. regs->ip -= 2;
  756. break;
  757. }
  758. }
  759. /*
  760. * If there's no signal to deliver, we just put the saved sigmask
  761. * back.
  762. */
  763. restore_saved_sigmask();
  764. }
  765. void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
  766. {
  767. struct task_struct *me = current;
  768. if (show_unhandled_signals && printk_ratelimit()) {
  769. printk("%s"
  770. "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
  771. task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
  772. me->comm, me->pid, where, frame,
  773. regs->ip, regs->sp, regs->orig_ax);
  774. print_vma_addr(KERN_CONT " in ", regs->ip);
  775. pr_cont("\n");
  776. }
  777. force_sig(SIGSEGV);
  778. }
  779. #ifdef CONFIG_DYNAMIC_SIGFRAME
  780. #ifdef CONFIG_STRICT_SIGALTSTACK_SIZE
  781. static bool strict_sigaltstack_size __ro_after_init = true;
  782. #else
  783. static bool strict_sigaltstack_size __ro_after_init = false;
  784. #endif
  785. static int __init strict_sas_size(char *arg)
  786. {
  787. return kstrtobool(arg, &strict_sigaltstack_size) == 0;
  788. }
  789. __setup("strict_sas_size", strict_sas_size);
  790. /*
  791. * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512
  792. * exceeds that size already. As such programs might never use the
  793. * sigaltstack they just continued to work. While always checking against
  794. * the real size would be correct, this might be considered a regression.
  795. *
  796. * Therefore avoid the sanity check, unless enforced by kernel
  797. * configuration or command line option.
  798. *
  799. * When dynamic FPU features are supported, the check is also enforced when
  800. * the task has permissions to use dynamic features. Tasks which have no
  801. * permission are checked against the size of the non-dynamic feature set
  802. * if strict checking is enabled. This avoids forcing all tasks on the
  803. * system to allocate large sigaltstacks even if they are never going
  804. * to use a dynamic feature. As this is serialized via sighand::siglock
  805. * any permission request for a dynamic feature either happened already
  806. * or will see the newly install sigaltstack size in the permission checks.
  807. */
  808. bool sigaltstack_size_valid(size_t ss_size)
  809. {
  810. unsigned long fsize = max_frame_size - fpu_default_state_size;
  811. u64 mask;
  812. lockdep_assert_held(&current->sighand->siglock);
  813. if (!fpu_state_size_dynamic() && !strict_sigaltstack_size)
  814. return true;
  815. fsize += current->group_leader->thread.fpu.perm.__user_state_size;
  816. if (likely(ss_size > fsize))
  817. return true;
  818. if (strict_sigaltstack_size)
  819. return ss_size > fsize;
  820. mask = current->group_leader->thread.fpu.perm.__state_perm;
  821. if (mask & XFEATURE_MASK_USER_DYNAMIC)
  822. return ss_size > fsize;
  823. return true;
  824. }
  825. #endif /* CONFIG_DYNAMIC_SIGFRAME */
  826. #ifdef CONFIG_X86_X32_ABI
  827. COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
  828. {
  829. struct pt_regs *regs = current_pt_regs();
  830. struct rt_sigframe_x32 __user *frame;
  831. sigset_t set;
  832. unsigned long uc_flags;
  833. frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
  834. if (!access_ok(frame, sizeof(*frame)))
  835. goto badframe;
  836. if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask))
  837. goto badframe;
  838. if (__get_user(uc_flags, &frame->uc.uc_flags))
  839. goto badframe;
  840. set_current_blocked(&set);
  841. if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
  842. goto badframe;
  843. if (compat_restore_altstack(&frame->uc.uc_stack))
  844. goto badframe;
  845. return regs->ax;
  846. badframe:
  847. signal_fault(regs, frame, "x32 rt_sigreturn");
  848. return 0;
  849. }
  850. #endif