signal.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Author: Hanlu Li <[email protected]>
  4. * Huacai Chen <[email protected]>
  5. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  6. *
  7. * Derived from MIPS:
  8. * Copyright (C) 1991, 1992 Linus Torvalds
  9. * Copyright (C) 1994 - 2000 Ralf Baechle
  10. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  11. * Copyright (C) 2014, Imagination Technologies Ltd.
  12. */
  13. #include <linux/audit.h>
  14. #include <linux/cache.h>
  15. #include <linux/context_tracking.h>
  16. #include <linux/irqflags.h>
  17. #include <linux/sched.h>
  18. #include <linux/mm.h>
  19. #include <linux/personality.h>
  20. #include <linux/smp.h>
  21. #include <linux/kernel.h>
  22. #include <linux/signal.h>
  23. #include <linux/errno.h>
  24. #include <linux/wait.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/unistd.h>
  27. #include <linux/compiler.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/uaccess.h>
  30. #include <asm/asm.h>
  31. #include <asm/cacheflush.h>
  32. #include <asm/cpu-features.h>
  33. #include <asm/fpu.h>
  34. #include <asm/ucontext.h>
  35. #include <asm/vdso.h>
  36. #ifdef DEBUG_SIG
  37. # define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
  38. #else
  39. # define DEBUGP(fmt, args...)
  40. #endif
  41. /* Make sure we will not lose FPU ownership */
  42. #define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
  43. #define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
  44. /* Assembly functions to move context to/from the FPU */
  45. extern asmlinkage int
  46. _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
  47. extern asmlinkage int
  48. _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
  49. struct rt_sigframe {
  50. struct siginfo rs_info;
  51. struct ucontext rs_uctx;
  52. };
  53. struct _ctx_layout {
  54. struct sctx_info *addr;
  55. unsigned int size;
  56. };
  57. struct extctx_layout {
  58. unsigned long size;
  59. unsigned int flags;
  60. struct _ctx_layout fpu;
  61. struct _ctx_layout end;
  62. };
  63. static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
  64. {
  65. return (void __user *)((char *)info + sizeof(struct sctx_info));
  66. }
  67. /*
  68. * Thread saved context copy to/from a signal context presumed to be on the
  69. * user stack, and therefore accessed with appropriate macros from uaccess.h.
  70. */
  71. static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
  72. {
  73. int i;
  74. int err = 0;
  75. uint64_t __user *regs = (uint64_t *)&ctx->regs;
  76. uint64_t __user *fcc = &ctx->fcc;
  77. uint32_t __user *fcsr = &ctx->fcsr;
  78. for (i = 0; i < NUM_FPU_REGS; i++) {
  79. err |=
  80. __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
  81. &regs[i]);
  82. }
  83. err |= __put_user(current->thread.fpu.fcc, fcc);
  84. err |= __put_user(current->thread.fpu.fcsr, fcsr);
  85. return err;
  86. }
  87. static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
  88. {
  89. int i;
  90. int err = 0;
  91. u64 fpr_val;
  92. uint64_t __user *regs = (uint64_t *)&ctx->regs;
  93. uint64_t __user *fcc = &ctx->fcc;
  94. uint32_t __user *fcsr = &ctx->fcsr;
  95. for (i = 0; i < NUM_FPU_REGS; i++) {
  96. err |= __get_user(fpr_val, &regs[i]);
  97. set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
  98. }
  99. err |= __get_user(current->thread.fpu.fcc, fcc);
  100. err |= __get_user(current->thread.fpu.fcsr, fcsr);
  101. return err;
  102. }
  103. /*
  104. * Wrappers for the assembly _{save,restore}_fp_context functions.
  105. */
  106. static int save_hw_fpu_context(struct fpu_context __user *ctx)
  107. {
  108. uint64_t __user *regs = (uint64_t *)&ctx->regs;
  109. uint64_t __user *fcc = &ctx->fcc;
  110. uint32_t __user *fcsr = &ctx->fcsr;
  111. return _save_fp_context(regs, fcc, fcsr);
  112. }
  113. static int restore_hw_fpu_context(struct fpu_context __user *ctx)
  114. {
  115. uint64_t __user *regs = (uint64_t *)&ctx->regs;
  116. uint64_t __user *fcc = &ctx->fcc;
  117. uint32_t __user *fcsr = &ctx->fcsr;
  118. return _restore_fp_context(regs, fcc, fcsr);
  119. }
  120. static int fcsr_pending(unsigned int __user *fcsr)
  121. {
  122. int err, sig = 0;
  123. unsigned int csr, enabled;
  124. err = __get_user(csr, fcsr);
  125. enabled = ((csr & FPU_CSR_ALL_E) << 24);
  126. /*
  127. * If the signal handler set some FPU exceptions, clear it and
  128. * send SIGFPE.
  129. */
  130. if (csr & enabled) {
  131. csr &= ~enabled;
  132. err |= __put_user(csr, fcsr);
  133. sig = SIGFPE;
  134. }
  135. return err ?: sig;
  136. }
  137. /*
  138. * Helper routines
  139. */
  140. static int protected_save_fpu_context(struct extctx_layout *extctx)
  141. {
  142. int err = 0;
  143. struct sctx_info __user *info = extctx->fpu.addr;
  144. struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
  145. uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
  146. uint64_t __user *fcc = &fpu_ctx->fcc;
  147. uint32_t __user *fcsr = &fpu_ctx->fcsr;
  148. while (1) {
  149. lock_fpu_owner();
  150. if (is_fpu_owner())
  151. err = save_hw_fpu_context(fpu_ctx);
  152. else
  153. err = copy_fpu_to_sigcontext(fpu_ctx);
  154. unlock_fpu_owner();
  155. err |= __put_user(FPU_CTX_MAGIC, &info->magic);
  156. err |= __put_user(extctx->fpu.size, &info->size);
  157. if (likely(!err))
  158. break;
  159. /* Touch the FPU context and try again */
  160. err = __put_user(0, &regs[0]) |
  161. __put_user(0, &regs[31]) |
  162. __put_user(0, fcc) |
  163. __put_user(0, fcsr);
  164. if (err)
  165. return err; /* really bad sigcontext */
  166. }
  167. return err;
  168. }
  169. static int protected_restore_fpu_context(struct extctx_layout *extctx)
  170. {
  171. int err = 0, sig = 0, tmp __maybe_unused;
  172. struct sctx_info __user *info = extctx->fpu.addr;
  173. struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
  174. uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
  175. uint64_t __user *fcc = &fpu_ctx->fcc;
  176. uint32_t __user *fcsr = &fpu_ctx->fcsr;
  177. err = sig = fcsr_pending(fcsr);
  178. if (err < 0)
  179. return err;
  180. while (1) {
  181. lock_fpu_owner();
  182. if (is_fpu_owner())
  183. err = restore_hw_fpu_context(fpu_ctx);
  184. else
  185. err = copy_fpu_from_sigcontext(fpu_ctx);
  186. unlock_fpu_owner();
  187. if (likely(!err))
  188. break;
  189. /* Touch the FPU context and try again */
  190. err = __get_user(tmp, &regs[0]) |
  191. __get_user(tmp, &regs[31]) |
  192. __get_user(tmp, fcc) |
  193. __get_user(tmp, fcsr);
  194. if (err)
  195. break; /* really bad sigcontext */
  196. }
  197. return err ?: sig;
  198. }
  199. static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
  200. struct extctx_layout *extctx)
  201. {
  202. int i, err = 0;
  203. struct sctx_info __user *info;
  204. err |= __put_user(regs->csr_era, &sc->sc_pc);
  205. err |= __put_user(extctx->flags, &sc->sc_flags);
  206. err |= __put_user(0, &sc->sc_regs[0]);
  207. for (i = 1; i < 32; i++)
  208. err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
  209. if (extctx->fpu.addr)
  210. err |= protected_save_fpu_context(extctx);
  211. /* Set the "end" magic */
  212. info = (struct sctx_info *)extctx->end.addr;
  213. err |= __put_user(0, &info->magic);
  214. err |= __put_user(0, &info->size);
  215. return err;
  216. }
  217. static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
  218. {
  219. int err = 0;
  220. unsigned int magic, size;
  221. struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
  222. while(1) {
  223. err |= __get_user(magic, &info->magic);
  224. err |= __get_user(size, &info->size);
  225. if (err)
  226. return err;
  227. switch (magic) {
  228. case 0: /* END */
  229. goto done;
  230. case FPU_CTX_MAGIC:
  231. if (size < (sizeof(struct sctx_info) +
  232. sizeof(struct fpu_context)))
  233. goto invalid;
  234. extctx->fpu.addr = info;
  235. break;
  236. default:
  237. goto invalid;
  238. }
  239. info = (struct sctx_info *)((char *)info + size);
  240. }
  241. done:
  242. return 0;
  243. invalid:
  244. return -EINVAL;
  245. }
  246. static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
  247. {
  248. int i, err = 0;
  249. struct extctx_layout extctx;
  250. memset(&extctx, 0, sizeof(struct extctx_layout));
  251. err = __get_user(extctx.flags, &sc->sc_flags);
  252. if (err)
  253. goto bad;
  254. err = parse_extcontext(sc, &extctx);
  255. if (err)
  256. goto bad;
  257. conditional_used_math(extctx.flags & SC_USED_FP);
  258. /*
  259. * The signal handler may have used FPU; give it up if the program
  260. * doesn't want it following sigreturn.
  261. */
  262. if (!(extctx.flags & SC_USED_FP))
  263. lose_fpu(0);
  264. /* Always make any pending restarted system calls return -EINTR */
  265. current->restart_block.fn = do_no_restart_syscall;
  266. err |= __get_user(regs->csr_era, &sc->sc_pc);
  267. for (i = 1; i < 32; i++)
  268. err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
  269. if (extctx.fpu.addr)
  270. err |= protected_restore_fpu_context(&extctx);
  271. bad:
  272. return err;
  273. }
  274. static unsigned int handle_flags(void)
  275. {
  276. unsigned int flags = 0;
  277. flags = used_math() ? SC_USED_FP : 0;
  278. switch (current->thread.error_code) {
  279. case 1:
  280. flags |= SC_ADDRERR_RD;
  281. break;
  282. case 2:
  283. flags |= SC_ADDRERR_WR;
  284. break;
  285. }
  286. return flags;
  287. }
  288. static unsigned long extframe_alloc(struct extctx_layout *extctx,
  289. struct _ctx_layout *layout,
  290. size_t size, unsigned int align, unsigned long base)
  291. {
  292. unsigned long new_base = base - size;
  293. new_base = round_down(new_base, (align < 16 ? 16 : align));
  294. new_base -= sizeof(struct sctx_info);
  295. layout->addr = (void *)new_base;
  296. layout->size = (unsigned int)(base - new_base);
  297. extctx->size += layout->size;
  298. return new_base;
  299. }
  300. static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
  301. {
  302. unsigned long new_sp = sp;
  303. memset(extctx, 0, sizeof(struct extctx_layout));
  304. extctx->flags = handle_flags();
  305. /* Grow down, alloc "end" context info first. */
  306. new_sp -= sizeof(struct sctx_info);
  307. extctx->end.addr = (void *)new_sp;
  308. extctx->end.size = (unsigned int)sizeof(struct sctx_info);
  309. extctx->size += extctx->end.size;
  310. if (extctx->flags & SC_USED_FP) {
  311. if (cpu_has_fpu)
  312. new_sp = extframe_alloc(extctx, &extctx->fpu,
  313. sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
  314. }
  315. return new_sp;
  316. }
  317. void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
  318. struct extctx_layout *extctx)
  319. {
  320. unsigned long sp;
  321. /* Default to using normal stack */
  322. sp = regs->regs[3];
  323. /*
  324. * If we are on the alternate signal stack and would overflow it, don't.
  325. * Return an always-bogus address instead so we will die with SIGSEGV.
  326. */
  327. if (on_sig_stack(sp) &&
  328. !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
  329. return (void __user __force *)(-1UL);
  330. sp = sigsp(sp, ksig);
  331. sp = round_down(sp, 16);
  332. sp = setup_extcontext(extctx, sp);
  333. sp -= sizeof(struct rt_sigframe);
  334. if (!IS_ALIGNED(sp, 16))
  335. BUG();
  336. return (void __user *)sp;
  337. }
  338. /*
  339. * Atomically swap in the new signal mask, and wait for a signal.
  340. */
  341. asmlinkage long sys_rt_sigreturn(void)
  342. {
  343. int sig;
  344. sigset_t set;
  345. struct pt_regs *regs;
  346. struct rt_sigframe __user *frame;
  347. regs = current_pt_regs();
  348. frame = (struct rt_sigframe __user *)regs->regs[3];
  349. if (!access_ok(frame, sizeof(*frame)))
  350. goto badframe;
  351. if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
  352. goto badframe;
  353. set_current_blocked(&set);
  354. sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
  355. if (sig < 0)
  356. goto badframe;
  357. else if (sig)
  358. force_sig(sig);
  359. regs->regs[0] = 0; /* No syscall restarting */
  360. if (restore_altstack(&frame->rs_uctx.uc_stack))
  361. goto badframe;
  362. return regs->regs[4];
  363. badframe:
  364. force_sig(SIGSEGV);
  365. return 0;
  366. }
  367. static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
  368. struct pt_regs *regs, sigset_t *set)
  369. {
  370. int err = 0;
  371. struct extctx_layout extctx;
  372. struct rt_sigframe __user *frame;
  373. frame = get_sigframe(ksig, regs, &extctx);
  374. if (!access_ok(frame, sizeof(*frame) + extctx.size))
  375. return -EFAULT;
  376. /* Create siginfo. */
  377. err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
  378. /* Create the ucontext. */
  379. err |= __put_user(0, &frame->rs_uctx.uc_flags);
  380. err |= __put_user(NULL, &frame->rs_uctx.uc_link);
  381. err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
  382. err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
  383. err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
  384. if (err)
  385. return -EFAULT;
  386. /*
  387. * Arguments to signal handler:
  388. *
  389. * a0 = signal number
  390. * a1 = pointer to siginfo
  391. * a2 = pointer to ucontext
  392. *
  393. * c0_era point to the signal handler, $r3 (sp) points to
  394. * the struct rt_sigframe.
  395. */
  396. regs->regs[4] = ksig->sig;
  397. regs->regs[5] = (unsigned long) &frame->rs_info;
  398. regs->regs[6] = (unsigned long) &frame->rs_uctx;
  399. regs->regs[3] = (unsigned long) frame;
  400. regs->regs[1] = (unsigned long) sig_return;
  401. regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
  402. DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
  403. current->comm, current->pid,
  404. frame, regs->csr_era, regs->regs[1]);
  405. return 0;
  406. }
  407. static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  408. {
  409. int ret;
  410. sigset_t *oldset = sigmask_to_save();
  411. void *vdso = current->mm->context.vdso;
  412. /* Are we from a system call? */
  413. if (regs->regs[0]) {
  414. switch (regs->regs[4]) {
  415. case -ERESTART_RESTARTBLOCK:
  416. case -ERESTARTNOHAND:
  417. regs->regs[4] = -EINTR;
  418. break;
  419. case -ERESTARTSYS:
  420. if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
  421. regs->regs[4] = -EINTR;
  422. break;
  423. }
  424. fallthrough;
  425. case -ERESTARTNOINTR:
  426. regs->regs[4] = regs->orig_a0;
  427. regs->csr_era -= 4;
  428. }
  429. regs->regs[0] = 0; /* Don't deal with this again. */
  430. }
  431. rseq_signal_deliver(ksig, regs);
  432. ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
  433. signal_setup_done(ret, ksig, 0);
  434. }
  435. void arch_do_signal_or_restart(struct pt_regs *regs)
  436. {
  437. struct ksignal ksig;
  438. if (get_signal(&ksig)) {
  439. /* Whee! Actually deliver the signal. */
  440. handle_signal(&ksig, regs);
  441. return;
  442. }
  443. /* Are we from a system call? */
  444. if (regs->regs[0]) {
  445. switch (regs->regs[4]) {
  446. case -ERESTARTNOHAND:
  447. case -ERESTARTSYS:
  448. case -ERESTARTNOINTR:
  449. regs->regs[4] = regs->orig_a0;
  450. regs->csr_era -= 4;
  451. break;
  452. case -ERESTART_RESTARTBLOCK:
  453. regs->regs[4] = regs->orig_a0;
  454. regs->regs[11] = __NR_restart_syscall;
  455. regs->csr_era -= 4;
  456. break;
  457. }
  458. regs->regs[0] = 0; /* Don't deal with this again. */
  459. }
  460. /*
  461. * If there's no signal to deliver, we just put the saved sigmask
  462. * back
  463. */
  464. restore_saved_sigmask();
  465. }