signal.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/kernel/signal.c
  4. *
  5. * Copyright (C) 1995-2009 Russell King
  6. */
  7. #include <linux/errno.h>
  8. #include <linux/random.h>
  9. #include <linux/signal.h>
  10. #include <linux/personality.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/resume_user_mode.h>
  13. #include <linux/uprobes.h>
  14. #include <linux/syscalls.h>
  15. #include <asm/elf.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/traps.h>
  18. #include <asm/unistd.h>
  19. #include <asm/vfp.h>
  20. #include "signal.h"
  21. extern const unsigned long sigreturn_codes[17];
  22. static unsigned long signal_return_offset;
  23. #ifdef CONFIG_IWMMXT
  24. static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
  25. {
  26. char kbuf[sizeof(*frame) + 8];
  27. struct iwmmxt_sigframe *kframe;
  28. int err = 0;
  29. /* the iWMMXt context must be 64 bit aligned */
  30. kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  31. if (test_thread_flag(TIF_USING_IWMMXT)) {
  32. kframe->magic = IWMMXT_MAGIC;
  33. kframe->size = IWMMXT_STORAGE_SIZE;
  34. iwmmxt_task_copy(current_thread_info(), &kframe->storage);
  35. } else {
  36. /*
  37. * For bug-compatibility with older kernels, some space
  38. * has to be reserved for iWMMXt even if it's not used.
  39. * Set the magic and size appropriately so that properly
  40. * written userspace can skip it reliably:
  41. */
  42. *kframe = (struct iwmmxt_sigframe) {
  43. .magic = DUMMY_MAGIC,
  44. .size = IWMMXT_STORAGE_SIZE,
  45. };
  46. }
  47. err = __copy_to_user(frame, kframe, sizeof(*kframe));
  48. return err;
  49. }
  50. static int restore_iwmmxt_context(char __user **auxp)
  51. {
  52. struct iwmmxt_sigframe __user *frame =
  53. (struct iwmmxt_sigframe __user *)*auxp;
  54. char kbuf[sizeof(*frame) + 8];
  55. struct iwmmxt_sigframe *kframe;
  56. /* the iWMMXt context must be 64 bit aligned */
  57. kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  58. if (__copy_from_user(kframe, frame, sizeof(*frame)))
  59. return -1;
  60. /*
  61. * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
  62. * block is discarded for compatibility with setup_sigframe() if
  63. * present, but we don't mandate its presence. If some other
  64. * magic is here, it's not for us:
  65. */
  66. if (!test_thread_flag(TIF_USING_IWMMXT) &&
  67. kframe->magic != DUMMY_MAGIC)
  68. return 0;
  69. if (kframe->size != IWMMXT_STORAGE_SIZE)
  70. return -1;
  71. if (test_thread_flag(TIF_USING_IWMMXT)) {
  72. if (kframe->magic != IWMMXT_MAGIC)
  73. return -1;
  74. iwmmxt_task_restore(current_thread_info(), &kframe->storage);
  75. }
  76. *auxp += IWMMXT_STORAGE_SIZE;
  77. return 0;
  78. }
  79. #endif
  80. #ifdef CONFIG_VFP
  81. static int preserve_vfp_context(struct vfp_sigframe __user *frame)
  82. {
  83. struct vfp_sigframe kframe;
  84. int err = 0;
  85. memset(&kframe, 0, sizeof(kframe));
  86. kframe.magic = VFP_MAGIC;
  87. kframe.size = VFP_STORAGE_SIZE;
  88. err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
  89. if (err)
  90. return err;
  91. return __copy_to_user(frame, &kframe, sizeof(kframe));
  92. }
  93. static int restore_vfp_context(char __user **auxp)
  94. {
  95. struct vfp_sigframe frame;
  96. int err;
  97. err = __copy_from_user(&frame, *auxp, sizeof(frame));
  98. if (err)
  99. return err;
  100. if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
  101. return -EINVAL;
  102. *auxp += sizeof(frame);
  103. return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
  104. }
  105. #endif
  106. /*
  107. * Do a signal return; undo the signal stack. These are aligned to 64-bit.
  108. */
  109. static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
  110. {
  111. struct sigcontext context;
  112. char __user *aux;
  113. sigset_t set;
  114. int err;
  115. err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
  116. if (err == 0)
  117. set_current_blocked(&set);
  118. err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
  119. if (err == 0) {
  120. regs->ARM_r0 = context.arm_r0;
  121. regs->ARM_r1 = context.arm_r1;
  122. regs->ARM_r2 = context.arm_r2;
  123. regs->ARM_r3 = context.arm_r3;
  124. regs->ARM_r4 = context.arm_r4;
  125. regs->ARM_r5 = context.arm_r5;
  126. regs->ARM_r6 = context.arm_r6;
  127. regs->ARM_r7 = context.arm_r7;
  128. regs->ARM_r8 = context.arm_r8;
  129. regs->ARM_r9 = context.arm_r9;
  130. regs->ARM_r10 = context.arm_r10;
  131. regs->ARM_fp = context.arm_fp;
  132. regs->ARM_ip = context.arm_ip;
  133. regs->ARM_sp = context.arm_sp;
  134. regs->ARM_lr = context.arm_lr;
  135. regs->ARM_pc = context.arm_pc;
  136. regs->ARM_cpsr = context.arm_cpsr;
  137. }
  138. err |= !valid_user_regs(regs);
  139. aux = (char __user *) sf->uc.uc_regspace;
  140. #ifdef CONFIG_IWMMXT
  141. if (err == 0)
  142. err |= restore_iwmmxt_context(&aux);
  143. #endif
  144. #ifdef CONFIG_VFP
  145. if (err == 0)
  146. err |= restore_vfp_context(&aux);
  147. #endif
  148. return err;
  149. }
  150. asmlinkage int sys_sigreturn(struct pt_regs *regs)
  151. {
  152. struct sigframe __user *frame;
  153. /* Always make any pending restarted system calls return -EINTR */
  154. current->restart_block.fn = do_no_restart_syscall;
  155. /*
  156. * Since we stacked the signal on a 64-bit boundary,
  157. * then 'sp' should be word aligned here. If it's
  158. * not, then the user is trying to mess with us.
  159. */
  160. if (regs->ARM_sp & 7)
  161. goto badframe;
  162. frame = (struct sigframe __user *)regs->ARM_sp;
  163. if (!access_ok(frame, sizeof (*frame)))
  164. goto badframe;
  165. if (restore_sigframe(regs, frame))
  166. goto badframe;
  167. return regs->ARM_r0;
  168. badframe:
  169. force_sig(SIGSEGV);
  170. return 0;
  171. }
  172. asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
  173. {
  174. struct rt_sigframe __user *frame;
  175. /* Always make any pending restarted system calls return -EINTR */
  176. current->restart_block.fn = do_no_restart_syscall;
  177. /*
  178. * Since we stacked the signal on a 64-bit boundary,
  179. * then 'sp' should be word aligned here. If it's
  180. * not, then the user is trying to mess with us.
  181. */
  182. if (regs->ARM_sp & 7)
  183. goto badframe;
  184. frame = (struct rt_sigframe __user *)regs->ARM_sp;
  185. if (!access_ok(frame, sizeof (*frame)))
  186. goto badframe;
  187. if (restore_sigframe(regs, &frame->sig))
  188. goto badframe;
  189. if (restore_altstack(&frame->sig.uc.uc_stack))
  190. goto badframe;
  191. return regs->ARM_r0;
  192. badframe:
  193. force_sig(SIGSEGV);
  194. return 0;
  195. }
  196. static int
  197. setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
  198. {
  199. struct aux_sigframe __user *aux;
  200. struct sigcontext context;
  201. int err = 0;
  202. context = (struct sigcontext) {
  203. .arm_r0 = regs->ARM_r0,
  204. .arm_r1 = regs->ARM_r1,
  205. .arm_r2 = regs->ARM_r2,
  206. .arm_r3 = regs->ARM_r3,
  207. .arm_r4 = regs->ARM_r4,
  208. .arm_r5 = regs->ARM_r5,
  209. .arm_r6 = regs->ARM_r6,
  210. .arm_r7 = regs->ARM_r7,
  211. .arm_r8 = regs->ARM_r8,
  212. .arm_r9 = regs->ARM_r9,
  213. .arm_r10 = regs->ARM_r10,
  214. .arm_fp = regs->ARM_fp,
  215. .arm_ip = regs->ARM_ip,
  216. .arm_sp = regs->ARM_sp,
  217. .arm_lr = regs->ARM_lr,
  218. .arm_pc = regs->ARM_pc,
  219. .arm_cpsr = regs->ARM_cpsr,
  220. .trap_no = current->thread.trap_no,
  221. .error_code = current->thread.error_code,
  222. .fault_address = current->thread.address,
  223. .oldmask = set->sig[0],
  224. };
  225. err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
  226. err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
  227. aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
  228. #ifdef CONFIG_IWMMXT
  229. if (err == 0)
  230. err |= preserve_iwmmxt_context(&aux->iwmmxt);
  231. #endif
  232. #ifdef CONFIG_VFP
  233. if (err == 0)
  234. err |= preserve_vfp_context(&aux->vfp);
  235. #endif
  236. err |= __put_user(0, &aux->end_magic);
  237. return err;
  238. }
  239. static inline void __user *
  240. get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
  241. {
  242. unsigned long sp = sigsp(regs->ARM_sp, ksig);
  243. void __user *frame;
  244. /*
  245. * ATPCS B01 mandates 8-byte alignment
  246. */
  247. frame = (void __user *)((sp - framesize) & ~7);
  248. /*
  249. * Check that we can actually write to the signal frame.
  250. */
  251. if (!access_ok(frame, framesize))
  252. frame = NULL;
  253. return frame;
  254. }
  255. static int
  256. setup_return(struct pt_regs *regs, struct ksignal *ksig,
  257. unsigned long __user *rc, void __user *frame)
  258. {
  259. unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
  260. unsigned long handler_fdpic_GOT = 0;
  261. unsigned long retcode;
  262. unsigned int idx, thumb = 0;
  263. unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
  264. bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
  265. (current->personality & FDPIC_FUNCPTRS);
  266. if (fdpic) {
  267. unsigned long __user *fdpic_func_desc =
  268. (unsigned long __user *)handler;
  269. if (__get_user(handler, &fdpic_func_desc[0]) ||
  270. __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
  271. return 1;
  272. }
  273. cpsr |= PSR_ENDSTATE;
  274. /*
  275. * Maybe we need to deliver a 32-bit signal to a 26-bit task.
  276. */
  277. if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
  278. cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
  279. #ifdef CONFIG_ARM_THUMB
  280. if (elf_hwcap & HWCAP_THUMB) {
  281. /*
  282. * The LSB of the handler determines if we're going to
  283. * be using THUMB or ARM mode for this signal handler.
  284. */
  285. thumb = handler & 1;
  286. /*
  287. * Clear the If-Then Thumb-2 execution state. ARM spec
  288. * requires this to be all 000s in ARM mode. Snapdragon
  289. * S4/Krait misbehaves on a Thumb=>ARM signal transition
  290. * without this.
  291. *
  292. * We must do this whenever we are running on a Thumb-2
  293. * capable CPU, which includes ARMv6T2. However, we elect
  294. * to always do this to simplify the code; this field is
  295. * marked UNK/SBZP for older architectures.
  296. */
  297. cpsr &= ~PSR_IT_MASK;
  298. if (thumb) {
  299. cpsr |= PSR_T_BIT;
  300. } else
  301. cpsr &= ~PSR_T_BIT;
  302. }
  303. #endif
  304. if (ksig->ka.sa.sa_flags & SA_RESTORER) {
  305. retcode = (unsigned long)ksig->ka.sa.sa_restorer;
  306. if (fdpic) {
  307. /*
  308. * We need code to load the function descriptor.
  309. * That code follows the standard sigreturn code
  310. * (6 words), and is made of 3 + 2 words for each
  311. * variant. The 4th copied word is the actual FD
  312. * address that the assembly code expects.
  313. */
  314. idx = 6 + thumb * 3;
  315. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  316. idx += 5;
  317. if (__put_user(sigreturn_codes[idx], rc ) ||
  318. __put_user(sigreturn_codes[idx+1], rc+1) ||
  319. __put_user(sigreturn_codes[idx+2], rc+2) ||
  320. __put_user(retcode, rc+3))
  321. return 1;
  322. goto rc_finish;
  323. }
  324. } else {
  325. idx = thumb << 1;
  326. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  327. idx += 3;
  328. /*
  329. * Put the sigreturn code on the stack no matter which return
  330. * mechanism we use in order to remain ABI compliant
  331. */
  332. if (__put_user(sigreturn_codes[idx], rc) ||
  333. __put_user(sigreturn_codes[idx+1], rc+1))
  334. return 1;
  335. rc_finish:
  336. #ifdef CONFIG_MMU
  337. if (cpsr & MODE32_BIT) {
  338. struct mm_struct *mm = current->mm;
  339. /*
  340. * 32-bit code can use the signal return page
  341. * except when the MPU has protected the vectors
  342. * page from PL0
  343. */
  344. retcode = mm->context.sigpage + signal_return_offset +
  345. (idx << 2) + thumb;
  346. } else
  347. #endif
  348. {
  349. /*
  350. * Ensure that the instruction cache sees
  351. * the return code written onto the stack.
  352. */
  353. flush_icache_range((unsigned long)rc,
  354. (unsigned long)(rc + 3));
  355. retcode = ((unsigned long)rc) + thumb;
  356. }
  357. }
  358. regs->ARM_r0 = ksig->sig;
  359. regs->ARM_sp = (unsigned long)frame;
  360. regs->ARM_lr = retcode;
  361. regs->ARM_pc = handler;
  362. if (fdpic)
  363. regs->ARM_r9 = handler_fdpic_GOT;
  364. regs->ARM_cpsr = cpsr;
  365. return 0;
  366. }
  367. static int
  368. setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
  369. {
  370. struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
  371. int err = 0;
  372. if (!frame)
  373. return 1;
  374. /*
  375. * Set uc.uc_flags to a value which sc.trap_no would never have.
  376. */
  377. err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
  378. err |= setup_sigframe(frame, regs, set);
  379. if (err == 0)
  380. err = setup_return(regs, ksig, frame->retcode, frame);
  381. return err;
  382. }
  383. static int
  384. setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
  385. {
  386. struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
  387. int err = 0;
  388. if (!frame)
  389. return 1;
  390. err |= copy_siginfo_to_user(&frame->info, &ksig->info);
  391. err |= __put_user(0, &frame->sig.uc.uc_flags);
  392. err |= __put_user(NULL, &frame->sig.uc.uc_link);
  393. err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
  394. err |= setup_sigframe(&frame->sig, regs, set);
  395. if (err == 0)
  396. err = setup_return(regs, ksig, frame->sig.retcode, frame);
  397. if (err == 0) {
  398. /*
  399. * For realtime signals we must also set the second and third
  400. * arguments for the signal handler.
  401. * -- Peter Maydell <[email protected]> 2000-12-06
  402. */
  403. regs->ARM_r1 = (unsigned long)&frame->info;
  404. regs->ARM_r2 = (unsigned long)&frame->sig.uc;
  405. }
  406. return err;
  407. }
  408. /*
  409. * OK, we're invoking a handler
  410. */
  411. static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  412. {
  413. sigset_t *oldset = sigmask_to_save();
  414. int ret;
  415. /*
  416. * Perform fixup for the pre-signal frame.
  417. */
  418. rseq_signal_deliver(ksig, regs);
  419. /*
  420. * Set up the stack frame
  421. */
  422. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  423. ret = setup_rt_frame(ksig, oldset, regs);
  424. else
  425. ret = setup_frame(ksig, oldset, regs);
  426. /*
  427. * Check that the resulting registers are actually sane.
  428. */
  429. ret |= !valid_user_regs(regs);
  430. signal_setup_done(ret, ksig, 0);
  431. }
  432. /*
  433. * Note that 'init' is a special process: it doesn't get signals it doesn't
  434. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  435. * mistake.
  436. *
  437. * Note that we go through the signals twice: once to check the signals that
  438. * the kernel can handle, and then we build all the user-level signal handling
  439. * stack-frames in one go after that.
  440. */
  441. static int do_signal(struct pt_regs *regs, int syscall)
  442. {
  443. unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
  444. struct ksignal ksig;
  445. int restart = 0;
  446. /*
  447. * If we were from a system call, check for system call restarting...
  448. */
  449. if (syscall) {
  450. continue_addr = regs->ARM_pc;
  451. restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
  452. retval = regs->ARM_r0;
  453. /*
  454. * Prepare for system call restart. We do this here so that a
  455. * debugger will see the already changed PSW.
  456. */
  457. switch (retval) {
  458. case -ERESTART_RESTARTBLOCK:
  459. restart -= 2;
  460. fallthrough;
  461. case -ERESTARTNOHAND:
  462. case -ERESTARTSYS:
  463. case -ERESTARTNOINTR:
  464. restart++;
  465. regs->ARM_r0 = regs->ARM_ORIG_r0;
  466. regs->ARM_pc = restart_addr;
  467. break;
  468. }
  469. }
  470. /*
  471. * Get the signal to deliver. When running under ptrace, at this
  472. * point the debugger may change all our registers ...
  473. */
  474. /*
  475. * Depending on the signal settings we may need to revert the
  476. * decision to restart the system call. But skip this if a
  477. * debugger has chosen to restart at a different PC.
  478. */
  479. if (get_signal(&ksig)) {
  480. /* handler */
  481. if (unlikely(restart) && regs->ARM_pc == restart_addr) {
  482. if (retval == -ERESTARTNOHAND ||
  483. retval == -ERESTART_RESTARTBLOCK
  484. || (retval == -ERESTARTSYS
  485. && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
  486. regs->ARM_r0 = -EINTR;
  487. regs->ARM_pc = continue_addr;
  488. }
  489. }
  490. handle_signal(&ksig, regs);
  491. } else {
  492. /* no handler */
  493. restore_saved_sigmask();
  494. if (unlikely(restart) && regs->ARM_pc == restart_addr) {
  495. regs->ARM_pc = continue_addr;
  496. return restart;
  497. }
  498. }
  499. return 0;
  500. }
  501. asmlinkage int
  502. do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
  503. {
  504. /*
  505. * The assembly code enters us with IRQs off, but it hasn't
  506. * informed the tracing code of that for efficiency reasons.
  507. * Update the trace code with the current status.
  508. */
  509. trace_hardirqs_off();
  510. do {
  511. if (likely(thread_flags & _TIF_NEED_RESCHED)) {
  512. schedule();
  513. } else {
  514. if (unlikely(!user_mode(regs)))
  515. return 0;
  516. local_irq_enable();
  517. if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
  518. int restart = do_signal(regs, syscall);
  519. if (unlikely(restart)) {
  520. /*
  521. * Restart without handlers.
  522. * Deal with it without leaving
  523. * the kernel space.
  524. */
  525. return restart;
  526. }
  527. syscall = 0;
  528. } else if (thread_flags & _TIF_UPROBE) {
  529. uprobe_notify_resume(regs);
  530. } else {
  531. resume_user_mode_work(regs);
  532. }
  533. }
  534. local_irq_disable();
  535. thread_flags = read_thread_flags();
  536. } while (thread_flags & _TIF_WORK_MASK);
  537. return 0;
  538. }
  539. struct page *get_signal_page(void)
  540. {
  541. unsigned long ptr;
  542. unsigned offset;
  543. struct page *page;
  544. void *addr;
  545. page = alloc_pages(GFP_KERNEL, 0);
  546. if (!page)
  547. return NULL;
  548. addr = page_address(page);
  549. /* Poison the entire page */
  550. memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
  551. PAGE_SIZE / sizeof(u32));
  552. /* Give the signal return code some randomness */
  553. offset = 0x200 + (get_random_u16() & 0x7fc);
  554. signal_return_offset = offset;
  555. /* Copy signal return handlers into the page */
  556. memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
  557. /* Flush out all instructions in this page */
  558. ptr = (unsigned long)addr;
  559. flush_icache_range(ptr, ptr + PAGE_SIZE);
  560. return page;
  561. }
  562. #ifdef CONFIG_DEBUG_RSEQ
  563. asmlinkage void do_rseq_syscall(struct pt_regs *regs)
  564. {
  565. rseq_syscall(regs);
  566. }
  567. #endif
  568. /*
  569. * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
  570. * changes likely come with new fields that should be added below.
  571. */
  572. static_assert(NSIGILL == 11);
  573. static_assert(NSIGFPE == 15);
  574. static_assert(NSIGSEGV == 9);
  575. static_assert(NSIGBUS == 5);
  576. static_assert(NSIGTRAP == 6);
  577. static_assert(NSIGCHLD == 6);
  578. static_assert(NSIGSYS == 2);
  579. static_assert(sizeof(siginfo_t) == 128);
  580. static_assert(__alignof__(siginfo_t) == 4);
  581. static_assert(offsetof(siginfo_t, si_signo) == 0x00);
  582. static_assert(offsetof(siginfo_t, si_errno) == 0x04);
  583. static_assert(offsetof(siginfo_t, si_code) == 0x08);
  584. static_assert(offsetof(siginfo_t, si_pid) == 0x0c);
  585. static_assert(offsetof(siginfo_t, si_uid) == 0x10);
  586. static_assert(offsetof(siginfo_t, si_tid) == 0x0c);
  587. static_assert(offsetof(siginfo_t, si_overrun) == 0x10);
  588. static_assert(offsetof(siginfo_t, si_status) == 0x14);
  589. static_assert(offsetof(siginfo_t, si_utime) == 0x18);
  590. static_assert(offsetof(siginfo_t, si_stime) == 0x1c);
  591. static_assert(offsetof(siginfo_t, si_value) == 0x14);
  592. static_assert(offsetof(siginfo_t, si_int) == 0x14);
  593. static_assert(offsetof(siginfo_t, si_ptr) == 0x14);
  594. static_assert(offsetof(siginfo_t, si_addr) == 0x0c);
  595. static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x10);
  596. static_assert(offsetof(siginfo_t, si_lower) == 0x14);
  597. static_assert(offsetof(siginfo_t, si_upper) == 0x18);
  598. static_assert(offsetof(siginfo_t, si_pkey) == 0x14);
  599. static_assert(offsetof(siginfo_t, si_perf_data) == 0x10);
  600. static_assert(offsetof(siginfo_t, si_perf_type) == 0x14);
  601. static_assert(offsetof(siginfo_t, si_perf_flags) == 0x18);
  602. static_assert(offsetof(siginfo_t, si_band) == 0x0c);
  603. static_assert(offsetof(siginfo_t, si_fd) == 0x10);
  604. static_assert(offsetof(siginfo_t, si_call_addr) == 0x0c);
  605. static_assert(offsetof(siginfo_t, si_syscall) == 0x10);
  606. static_assert(offsetof(siginfo_t, si_arch) == 0x14);