signal.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Based on arch/arm/kernel/signal.c
  4. *
  5. * Copyright (C) 1995-2009 Russell King
  6. * Copyright (C) 2012 ARM Ltd.
  7. */
  8. #include <linux/cache.h>
  9. #include <linux/compat.h>
  10. #include <linux/errno.h>
  11. #include <linux/kernel.h>
  12. #include <linux/signal.h>
  13. #include <linux/freezer.h>
  14. #include <linux/stddef.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/sizes.h>
  17. #include <linux/string.h>
  18. #include <linux/resume_user_mode.h>
  19. #include <linux/ratelimit.h>
  20. #include <linux/syscalls.h>
  21. #include <asm/daifflags.h>
  22. #include <asm/debug-monitors.h>
  23. #include <asm/elf.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/ucontext.h>
  26. #include <asm/unistd.h>
  27. #include <asm/fpsimd.h>
  28. #include <asm/ptrace.h>
  29. #include <asm/syscall.h>
  30. #include <asm/signal32.h>
  31. #include <asm/traps.h>
  32. #include <asm/vdso.h>
  33. /*
  34. * Do a signal return; undo the signal stack. These are aligned to 128-bit.
  35. */
  36. struct rt_sigframe {
  37. struct siginfo info;
  38. struct ucontext uc;
  39. };
  40. struct frame_record {
  41. u64 fp;
  42. u64 lr;
  43. };
  44. struct rt_sigframe_user_layout {
  45. struct rt_sigframe __user *sigframe;
  46. struct frame_record __user *next_frame;
  47. unsigned long size; /* size of allocated sigframe data */
  48. unsigned long limit; /* largest allowed size */
  49. unsigned long fpsimd_offset;
  50. unsigned long esr_offset;
  51. unsigned long sve_offset;
  52. unsigned long za_offset;
  53. unsigned long extra_offset;
  54. unsigned long end_offset;
  55. };
  56. #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
  57. #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
  58. #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
  59. static void init_user_layout(struct rt_sigframe_user_layout *user)
  60. {
  61. const size_t reserved_size =
  62. sizeof(user->sigframe->uc.uc_mcontext.__reserved);
  63. memset(user, 0, sizeof(*user));
  64. user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
  65. user->limit = user->size + reserved_size;
  66. user->limit -= TERMINATOR_SIZE;
  67. user->limit -= EXTRA_CONTEXT_SIZE;
  68. /* Reserve space for extension and terminator ^ */
  69. }
  70. static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
  71. {
  72. return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
  73. }
  74. /*
  75. * Sanity limit on the approximate maximum size of signal frame we'll
  76. * try to generate. Stack alignment padding and the frame record are
  77. * not taken into account. This limit is not a guarantee and is
  78. * NOT ABI.
  79. */
  80. #define SIGFRAME_MAXSZ SZ_256K
  81. static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
  82. unsigned long *offset, size_t size, bool extend)
  83. {
  84. size_t padded_size = round_up(size, 16);
  85. if (padded_size > user->limit - user->size &&
  86. !user->extra_offset &&
  87. extend) {
  88. int ret;
  89. user->limit += EXTRA_CONTEXT_SIZE;
  90. ret = __sigframe_alloc(user, &user->extra_offset,
  91. sizeof(struct extra_context), false);
  92. if (ret) {
  93. user->limit -= EXTRA_CONTEXT_SIZE;
  94. return ret;
  95. }
  96. /* Reserve space for the __reserved[] terminator */
  97. user->size += TERMINATOR_SIZE;
  98. /*
  99. * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
  100. * the terminator:
  101. */
  102. user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
  103. }
  104. /* Still not enough space? Bad luck! */
  105. if (padded_size > user->limit - user->size)
  106. return -ENOMEM;
  107. *offset = user->size;
  108. user->size += padded_size;
  109. return 0;
  110. }
  111. /*
  112. * Allocate space for an optional record of <size> bytes in the user
  113. * signal frame. The offset from the signal frame base address to the
  114. * allocated block is assigned to *offset.
  115. */
  116. static int sigframe_alloc(struct rt_sigframe_user_layout *user,
  117. unsigned long *offset, size_t size)
  118. {
  119. return __sigframe_alloc(user, offset, size, true);
  120. }
  121. /* Allocate the null terminator record and prevent further allocations */
  122. static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
  123. {
  124. int ret;
  125. /* Un-reserve the space reserved for the terminator: */
  126. user->limit += TERMINATOR_SIZE;
  127. ret = sigframe_alloc(user, &user->end_offset,
  128. sizeof(struct _aarch64_ctx));
  129. if (ret)
  130. return ret;
  131. /* Prevent further allocation: */
  132. user->limit = user->size;
  133. return 0;
  134. }
  135. static void __user *apply_user_offset(
  136. struct rt_sigframe_user_layout const *user, unsigned long offset)
  137. {
  138. char __user *base = (char __user *)user->sigframe;
  139. return base + offset;
  140. }
  141. static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
  142. {
  143. struct user_fpsimd_state const *fpsimd =
  144. &current->thread.uw.fpsimd_state;
  145. int err;
  146. /* copy the FP and status/control registers */
  147. err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
  148. __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
  149. __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
  150. /* copy the magic/size information */
  151. __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
  152. __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
  153. return err ? -EFAULT : 0;
  154. }
  155. static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
  156. {
  157. struct user_fpsimd_state fpsimd;
  158. __u32 magic, size;
  159. int err = 0;
  160. /* check the magic/size information */
  161. __get_user_error(magic, &ctx->head.magic, err);
  162. __get_user_error(size, &ctx->head.size, err);
  163. if (err)
  164. return -EFAULT;
  165. if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
  166. return -EINVAL;
  167. /* copy the FP and status/control registers */
  168. err = __copy_from_user(fpsimd.vregs, ctx->vregs,
  169. sizeof(fpsimd.vregs));
  170. __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
  171. __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
  172. clear_thread_flag(TIF_SVE);
  173. /* load the hardware registers from the fpsimd_state structure */
  174. if (!err)
  175. fpsimd_update_current_state(&fpsimd);
  176. return err ? -EFAULT : 0;
  177. }
  178. struct user_ctxs {
  179. struct fpsimd_context __user *fpsimd;
  180. struct sve_context __user *sve;
  181. struct za_context __user *za;
  182. };
  183. #ifdef CONFIG_ARM64_SVE
  184. static int preserve_sve_context(struct sve_context __user *ctx)
  185. {
  186. int err = 0;
  187. u16 reserved[ARRAY_SIZE(ctx->__reserved)];
  188. u16 flags = 0;
  189. unsigned int vl = task_get_sve_vl(current);
  190. unsigned int vq = 0;
  191. if (thread_sm_enabled(&current->thread)) {
  192. vl = task_get_sme_vl(current);
  193. vq = sve_vq_from_vl(vl);
  194. flags |= SVE_SIG_FLAG_SM;
  195. } else if (test_thread_flag(TIF_SVE)) {
  196. vq = sve_vq_from_vl(vl);
  197. }
  198. memset(reserved, 0, sizeof(reserved));
  199. __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
  200. __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
  201. &ctx->head.size, err);
  202. __put_user_error(vl, &ctx->vl, err);
  203. __put_user_error(flags, &ctx->flags, err);
  204. BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
  205. err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
  206. if (vq) {
  207. /*
  208. * This assumes that the SVE state has already been saved to
  209. * the task struct by calling the function
  210. * fpsimd_signal_preserve_current_state().
  211. */
  212. err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
  213. current->thread.sve_state,
  214. SVE_SIG_REGS_SIZE(vq));
  215. }
  216. return err ? -EFAULT : 0;
  217. }
  218. static int restore_sve_fpsimd_context(struct user_ctxs *user)
  219. {
  220. int err;
  221. unsigned int vl, vq;
  222. struct user_fpsimd_state fpsimd;
  223. struct sve_context sve;
  224. if (__copy_from_user(&sve, user->sve, sizeof(sve)))
  225. return -EFAULT;
  226. if (sve.flags & SVE_SIG_FLAG_SM) {
  227. if (!system_supports_sme())
  228. return -EINVAL;
  229. vl = task_get_sme_vl(current);
  230. } else {
  231. /*
  232. * A SME only system use SVE for streaming mode so can
  233. * have a SVE formatted context with a zero VL and no
  234. * payload data.
  235. */
  236. if (!system_supports_sve() && !system_supports_sme())
  237. return -EINVAL;
  238. vl = task_get_sve_vl(current);
  239. }
  240. if (sve.vl != vl)
  241. return -EINVAL;
  242. if (sve.head.size <= sizeof(*user->sve)) {
  243. clear_thread_flag(TIF_SVE);
  244. current->thread.svcr &= ~SVCR_SM_MASK;
  245. goto fpsimd_only;
  246. }
  247. vq = sve_vq_from_vl(sve.vl);
  248. if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
  249. return -EINVAL;
  250. /*
  251. * Careful: we are about __copy_from_user() directly into
  252. * thread.sve_state with preemption enabled, so protection is
  253. * needed to prevent a racing context switch from writing stale
  254. * registers back over the new data.
  255. */
  256. fpsimd_flush_task_state(current);
  257. /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
  258. sve_alloc(current, true);
  259. if (!current->thread.sve_state) {
  260. clear_thread_flag(TIF_SVE);
  261. return -ENOMEM;
  262. }
  263. err = __copy_from_user(current->thread.sve_state,
  264. (char __user const *)user->sve +
  265. SVE_SIG_REGS_OFFSET,
  266. SVE_SIG_REGS_SIZE(vq));
  267. if (err)
  268. return -EFAULT;
  269. if (sve.flags & SVE_SIG_FLAG_SM)
  270. current->thread.svcr |= SVCR_SM_MASK;
  271. else
  272. set_thread_flag(TIF_SVE);
  273. fpsimd_only:
  274. /* copy the FP and status/control registers */
  275. /* restore_sigframe() already checked that user->fpsimd != NULL. */
  276. err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
  277. sizeof(fpsimd.vregs));
  278. __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
  279. __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
  280. /* load the hardware registers from the fpsimd_state structure */
  281. if (!err)
  282. fpsimd_update_current_state(&fpsimd);
  283. return err ? -EFAULT : 0;
  284. }
  285. #else /* ! CONFIG_ARM64_SVE */
  286. static int restore_sve_fpsimd_context(struct user_ctxs *user)
  287. {
  288. WARN_ON_ONCE(1);
  289. return -EINVAL;
  290. }
  291. /* Turn any non-optimised out attempts to use this into a link error: */
  292. extern int preserve_sve_context(void __user *ctx);
  293. #endif /* ! CONFIG_ARM64_SVE */
  294. #ifdef CONFIG_ARM64_SME
  295. static int preserve_za_context(struct za_context __user *ctx)
  296. {
  297. int err = 0;
  298. u16 reserved[ARRAY_SIZE(ctx->__reserved)];
  299. unsigned int vl = task_get_sme_vl(current);
  300. unsigned int vq;
  301. if (thread_za_enabled(&current->thread))
  302. vq = sve_vq_from_vl(vl);
  303. else
  304. vq = 0;
  305. memset(reserved, 0, sizeof(reserved));
  306. __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
  307. __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
  308. &ctx->head.size, err);
  309. __put_user_error(vl, &ctx->vl, err);
  310. BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
  311. err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
  312. if (vq) {
  313. /*
  314. * This assumes that the ZA state has already been saved to
  315. * the task struct by calling the function
  316. * fpsimd_signal_preserve_current_state().
  317. */
  318. err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
  319. current->thread.za_state,
  320. ZA_SIG_REGS_SIZE(vq));
  321. }
  322. return err ? -EFAULT : 0;
  323. }
  324. static int restore_za_context(struct user_ctxs *user)
  325. {
  326. int err;
  327. unsigned int vq;
  328. struct za_context za;
  329. if (__copy_from_user(&za, user->za, sizeof(za)))
  330. return -EFAULT;
  331. if (za.vl != task_get_sme_vl(current))
  332. return -EINVAL;
  333. if (za.head.size <= sizeof(*user->za)) {
  334. current->thread.svcr &= ~SVCR_ZA_MASK;
  335. return 0;
  336. }
  337. vq = sve_vq_from_vl(za.vl);
  338. if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
  339. return -EINVAL;
  340. /*
  341. * Careful: we are about __copy_from_user() directly into
  342. * thread.za_state with preemption enabled, so protection is
  343. * needed to prevent a racing context switch from writing stale
  344. * registers back over the new data.
  345. */
  346. fpsimd_flush_task_state(current);
  347. /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
  348. sme_alloc(current, true);
  349. if (!current->thread.za_state) {
  350. current->thread.svcr &= ~SVCR_ZA_MASK;
  351. clear_thread_flag(TIF_SME);
  352. return -ENOMEM;
  353. }
  354. err = __copy_from_user(current->thread.za_state,
  355. (char __user const *)user->za +
  356. ZA_SIG_REGS_OFFSET,
  357. ZA_SIG_REGS_SIZE(vq));
  358. if (err)
  359. return -EFAULT;
  360. set_thread_flag(TIF_SME);
  361. current->thread.svcr |= SVCR_ZA_MASK;
  362. return 0;
  363. }
  364. #else /* ! CONFIG_ARM64_SME */
  365. /* Turn any non-optimised out attempts to use these into a link error: */
  366. extern int preserve_za_context(void __user *ctx);
  367. extern int restore_za_context(struct user_ctxs *user);
  368. #endif /* ! CONFIG_ARM64_SME */
  369. static int parse_user_sigframe(struct user_ctxs *user,
  370. struct rt_sigframe __user *sf)
  371. {
  372. struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
  373. struct _aarch64_ctx __user *head;
  374. char __user *base = (char __user *)&sc->__reserved;
  375. size_t offset = 0;
  376. size_t limit = sizeof(sc->__reserved);
  377. bool have_extra_context = false;
  378. char const __user *const sfp = (char const __user *)sf;
  379. user->fpsimd = NULL;
  380. user->sve = NULL;
  381. user->za = NULL;
  382. if (!IS_ALIGNED((unsigned long)base, 16))
  383. goto invalid;
  384. while (1) {
  385. int err = 0;
  386. u32 magic, size;
  387. char const __user *userp;
  388. struct extra_context const __user *extra;
  389. u64 extra_datap;
  390. u32 extra_size;
  391. struct _aarch64_ctx const __user *end;
  392. u32 end_magic, end_size;
  393. if (limit - offset < sizeof(*head))
  394. goto invalid;
  395. if (!IS_ALIGNED(offset, 16))
  396. goto invalid;
  397. head = (struct _aarch64_ctx __user *)(base + offset);
  398. __get_user_error(magic, &head->magic, err);
  399. __get_user_error(size, &head->size, err);
  400. if (err)
  401. return err;
  402. if (limit - offset < size)
  403. goto invalid;
  404. switch (magic) {
  405. case 0:
  406. if (size)
  407. goto invalid;
  408. goto done;
  409. case FPSIMD_MAGIC:
  410. if (!system_supports_fpsimd())
  411. goto invalid;
  412. if (user->fpsimd)
  413. goto invalid;
  414. if (size < sizeof(*user->fpsimd))
  415. goto invalid;
  416. user->fpsimd = (struct fpsimd_context __user *)head;
  417. break;
  418. case ESR_MAGIC:
  419. /* ignore */
  420. break;
  421. case SVE_MAGIC:
  422. if (!system_supports_sve() && !system_supports_sme())
  423. goto invalid;
  424. if (user->sve)
  425. goto invalid;
  426. if (size < sizeof(*user->sve))
  427. goto invalid;
  428. user->sve = (struct sve_context __user *)head;
  429. break;
  430. case ZA_MAGIC:
  431. if (!system_supports_sme())
  432. goto invalid;
  433. if (user->za)
  434. goto invalid;
  435. if (size < sizeof(*user->za))
  436. goto invalid;
  437. user->za = (struct za_context __user *)head;
  438. break;
  439. case EXTRA_MAGIC:
  440. if (have_extra_context)
  441. goto invalid;
  442. if (size < sizeof(*extra))
  443. goto invalid;
  444. userp = (char const __user *)head;
  445. extra = (struct extra_context const __user *)userp;
  446. userp += size;
  447. __get_user_error(extra_datap, &extra->datap, err);
  448. __get_user_error(extra_size, &extra->size, err);
  449. if (err)
  450. return err;
  451. /* Check for the dummy terminator in __reserved[]: */
  452. if (limit - offset - size < TERMINATOR_SIZE)
  453. goto invalid;
  454. end = (struct _aarch64_ctx const __user *)userp;
  455. userp += TERMINATOR_SIZE;
  456. __get_user_error(end_magic, &end->magic, err);
  457. __get_user_error(end_size, &end->size, err);
  458. if (err)
  459. return err;
  460. if (end_magic || end_size)
  461. goto invalid;
  462. /* Prevent looping/repeated parsing of extra_context */
  463. have_extra_context = true;
  464. base = (__force void __user *)extra_datap;
  465. if (!IS_ALIGNED((unsigned long)base, 16))
  466. goto invalid;
  467. if (!IS_ALIGNED(extra_size, 16))
  468. goto invalid;
  469. if (base != userp)
  470. goto invalid;
  471. /* Reject "unreasonably large" frames: */
  472. if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
  473. goto invalid;
  474. /*
  475. * Ignore trailing terminator in __reserved[]
  476. * and start parsing extra data:
  477. */
  478. offset = 0;
  479. limit = extra_size;
  480. if (!access_ok(base, limit))
  481. goto invalid;
  482. continue;
  483. default:
  484. goto invalid;
  485. }
  486. if (size < sizeof(*head))
  487. goto invalid;
  488. if (limit - offset < size)
  489. goto invalid;
  490. offset += size;
  491. }
  492. done:
  493. return 0;
  494. invalid:
  495. return -EINVAL;
  496. }
  497. static int restore_sigframe(struct pt_regs *regs,
  498. struct rt_sigframe __user *sf)
  499. {
  500. sigset_t set;
  501. int i, err;
  502. struct user_ctxs user;
  503. err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
  504. if (err == 0)
  505. set_current_blocked(&set);
  506. for (i = 0; i < 31; i++)
  507. __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
  508. err);
  509. __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
  510. __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
  511. __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
  512. /*
  513. * Avoid sys_rt_sigreturn() restarting.
  514. */
  515. forget_syscall(regs);
  516. err |= !valid_user_regs(&regs->user_regs, current);
  517. if (err == 0)
  518. err = parse_user_sigframe(&user, sf);
  519. if (err == 0 && system_supports_fpsimd()) {
  520. if (!user.fpsimd)
  521. return -EINVAL;
  522. if (user.sve)
  523. err = restore_sve_fpsimd_context(&user);
  524. else
  525. err = restore_fpsimd_context(user.fpsimd);
  526. }
  527. if (err == 0 && system_supports_sme() && user.za)
  528. err = restore_za_context(&user);
  529. return err;
  530. }
  531. SYSCALL_DEFINE0(rt_sigreturn)
  532. {
  533. struct pt_regs *regs = current_pt_regs();
  534. struct rt_sigframe __user *frame;
  535. /* Always make any pending restarted system calls return -EINTR */
  536. current->restart_block.fn = do_no_restart_syscall;
  537. /*
  538. * Since we stacked the signal on a 128-bit boundary, then 'sp' should
  539. * be word aligned here.
  540. */
  541. if (regs->sp & 15)
  542. goto badframe;
  543. frame = (struct rt_sigframe __user *)regs->sp;
  544. if (!access_ok(frame, sizeof (*frame)))
  545. goto badframe;
  546. if (restore_sigframe(regs, frame))
  547. goto badframe;
  548. if (restore_altstack(&frame->uc.uc_stack))
  549. goto badframe;
  550. return regs->regs[0];
  551. badframe:
  552. arm64_notify_segfault(regs->sp);
  553. return 0;
  554. }
  555. /*
  556. * Determine the layout of optional records in the signal frame
  557. *
  558. * add_all: if true, lays out the biggest possible signal frame for
  559. * this task; otherwise, generates a layout for the current state
  560. * of the task.
  561. */
  562. static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
  563. bool add_all)
  564. {
  565. int err;
  566. if (system_supports_fpsimd()) {
  567. err = sigframe_alloc(user, &user->fpsimd_offset,
  568. sizeof(struct fpsimd_context));
  569. if (err)
  570. return err;
  571. }
  572. /* fault information, if valid */
  573. if (add_all || current->thread.fault_code) {
  574. err = sigframe_alloc(user, &user->esr_offset,
  575. sizeof(struct esr_context));
  576. if (err)
  577. return err;
  578. }
  579. if (system_supports_sve() || system_supports_sme()) {
  580. unsigned int vq = 0;
  581. if (add_all || test_thread_flag(TIF_SVE) ||
  582. thread_sm_enabled(&current->thread)) {
  583. int vl = max(sve_max_vl(), sme_max_vl());
  584. if (!add_all)
  585. vl = thread_get_cur_vl(&current->thread);
  586. vq = sve_vq_from_vl(vl);
  587. }
  588. err = sigframe_alloc(user, &user->sve_offset,
  589. SVE_SIG_CONTEXT_SIZE(vq));
  590. if (err)
  591. return err;
  592. }
  593. if (system_supports_sme()) {
  594. unsigned int vl;
  595. unsigned int vq = 0;
  596. if (add_all)
  597. vl = sme_max_vl();
  598. else
  599. vl = task_get_sme_vl(current);
  600. if (thread_za_enabled(&current->thread))
  601. vq = sve_vq_from_vl(vl);
  602. err = sigframe_alloc(user, &user->za_offset,
  603. ZA_SIG_CONTEXT_SIZE(vq));
  604. if (err)
  605. return err;
  606. }
  607. return sigframe_alloc_end(user);
  608. }
  609. static int setup_sigframe(struct rt_sigframe_user_layout *user,
  610. struct pt_regs *regs, sigset_t *set)
  611. {
  612. int i, err = 0;
  613. struct rt_sigframe __user *sf = user->sigframe;
  614. /* set up the stack frame for unwinding */
  615. __put_user_error(regs->regs[29], &user->next_frame->fp, err);
  616. __put_user_error(regs->regs[30], &user->next_frame->lr, err);
  617. for (i = 0; i < 31; i++)
  618. __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
  619. err);
  620. __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
  621. __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
  622. __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
  623. __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
  624. err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
  625. if (err == 0 && system_supports_fpsimd()) {
  626. struct fpsimd_context __user *fpsimd_ctx =
  627. apply_user_offset(user, user->fpsimd_offset);
  628. err |= preserve_fpsimd_context(fpsimd_ctx);
  629. }
  630. /* fault information, if valid */
  631. if (err == 0 && user->esr_offset) {
  632. struct esr_context __user *esr_ctx =
  633. apply_user_offset(user, user->esr_offset);
  634. __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
  635. __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
  636. __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
  637. }
  638. /* Scalable Vector Extension state (including streaming), if present */
  639. if ((system_supports_sve() || system_supports_sme()) &&
  640. err == 0 && user->sve_offset) {
  641. struct sve_context __user *sve_ctx =
  642. apply_user_offset(user, user->sve_offset);
  643. err |= preserve_sve_context(sve_ctx);
  644. }
  645. /* ZA state if present */
  646. if (system_supports_sme() && err == 0 && user->za_offset) {
  647. struct za_context __user *za_ctx =
  648. apply_user_offset(user, user->za_offset);
  649. err |= preserve_za_context(za_ctx);
  650. }
  651. if (err == 0 && user->extra_offset) {
  652. char __user *sfp = (char __user *)user->sigframe;
  653. char __user *userp =
  654. apply_user_offset(user, user->extra_offset);
  655. struct extra_context __user *extra;
  656. struct _aarch64_ctx __user *end;
  657. u64 extra_datap;
  658. u32 extra_size;
  659. extra = (struct extra_context __user *)userp;
  660. userp += EXTRA_CONTEXT_SIZE;
  661. end = (struct _aarch64_ctx __user *)userp;
  662. userp += TERMINATOR_SIZE;
  663. /*
  664. * extra_datap is just written to the signal frame.
  665. * The value gets cast back to a void __user *
  666. * during sigreturn.
  667. */
  668. extra_datap = (__force u64)userp;
  669. extra_size = sfp + round_up(user->size, 16) - userp;
  670. __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
  671. __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
  672. __put_user_error(extra_datap, &extra->datap, err);
  673. __put_user_error(extra_size, &extra->size, err);
  674. /* Add the terminator */
  675. __put_user_error(0, &end->magic, err);
  676. __put_user_error(0, &end->size, err);
  677. }
  678. /* set the "end" magic */
  679. if (err == 0) {
  680. struct _aarch64_ctx __user *end =
  681. apply_user_offset(user, user->end_offset);
  682. __put_user_error(0, &end->magic, err);
  683. __put_user_error(0, &end->size, err);
  684. }
  685. return err;
  686. }
  687. static int get_sigframe(struct rt_sigframe_user_layout *user,
  688. struct ksignal *ksig, struct pt_regs *regs)
  689. {
  690. unsigned long sp, sp_top;
  691. int err;
  692. init_user_layout(user);
  693. err = setup_sigframe_layout(user, false);
  694. if (err)
  695. return err;
  696. sp = sp_top = sigsp(regs->sp, ksig);
  697. sp = round_down(sp - sizeof(struct frame_record), 16);
  698. user->next_frame = (struct frame_record __user *)sp;
  699. sp = round_down(sp, 16) - sigframe_size(user);
  700. user->sigframe = (struct rt_sigframe __user *)sp;
  701. /*
  702. * Check that we can actually write to the signal frame.
  703. */
  704. if (!access_ok(user->sigframe, sp_top - sp))
  705. return -EFAULT;
  706. return 0;
  707. }
  708. static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
  709. struct rt_sigframe_user_layout *user, int usig)
  710. {
  711. __sigrestore_t sigtramp;
  712. regs->regs[0] = usig;
  713. regs->sp = (unsigned long)user->sigframe;
  714. regs->regs[29] = (unsigned long)&user->next_frame->fp;
  715. regs->pc = (unsigned long)ka->sa.sa_handler;
  716. /*
  717. * Signal delivery is a (wacky) indirect function call in
  718. * userspace, so simulate the same setting of BTYPE as a BLR
  719. * <register containing the signal handler entry point>.
  720. * Signal delivery to a location in a PROT_BTI guarded page
  721. * that is not a function entry point will now trigger a
  722. * SIGILL in userspace.
  723. *
  724. * If the signal handler entry point is not in a PROT_BTI
  725. * guarded page, this is harmless.
  726. */
  727. if (system_supports_bti()) {
  728. regs->pstate &= ~PSR_BTYPE_MASK;
  729. regs->pstate |= PSR_BTYPE_C;
  730. }
  731. /* TCO (Tag Check Override) always cleared for signal handlers */
  732. regs->pstate &= ~PSR_TCO_BIT;
  733. /* Signal handlers are invoked with ZA and streaming mode disabled */
  734. if (system_supports_sme()) {
  735. /*
  736. * If we were in streaming mode the saved register
  737. * state was SVE but we will exit SM and use the
  738. * FPSIMD register state - flush the saved FPSIMD
  739. * register state in case it gets loaded.
  740. */
  741. if (current->thread.svcr & SVCR_SM_MASK)
  742. memset(&current->thread.uw.fpsimd_state, 0,
  743. sizeof(current->thread.uw.fpsimd_state));
  744. current->thread.svcr &= ~(SVCR_ZA_MASK |
  745. SVCR_SM_MASK);
  746. sme_smstop();
  747. }
  748. if (ka->sa.sa_flags & SA_RESTORER)
  749. sigtramp = ka->sa.sa_restorer;
  750. else
  751. sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
  752. regs->regs[30] = (unsigned long)sigtramp;
  753. }
  754. static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
  755. struct pt_regs *regs)
  756. {
  757. struct rt_sigframe_user_layout user;
  758. struct rt_sigframe __user *frame;
  759. int err = 0;
  760. fpsimd_signal_preserve_current_state();
  761. if (get_sigframe(&user, ksig, regs))
  762. return 1;
  763. frame = user.sigframe;
  764. __put_user_error(0, &frame->uc.uc_flags, err);
  765. __put_user_error(NULL, &frame->uc.uc_link, err);
  766. err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
  767. err |= setup_sigframe(&user, regs, set);
  768. if (err == 0) {
  769. setup_return(regs, &ksig->ka, &user, usig);
  770. if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
  771. err |= copy_siginfo_to_user(&frame->info, &ksig->info);
  772. regs->regs[1] = (unsigned long)&frame->info;
  773. regs->regs[2] = (unsigned long)&frame->uc;
  774. }
  775. }
  776. return err;
  777. }
  778. static void setup_restart_syscall(struct pt_regs *regs)
  779. {
  780. if (is_compat_task())
  781. compat_setup_restart_syscall(regs);
  782. else
  783. regs->regs[8] = __NR_restart_syscall;
  784. }
  785. /*
  786. * OK, we're invoking a handler
  787. */
  788. static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  789. {
  790. sigset_t *oldset = sigmask_to_save();
  791. int usig = ksig->sig;
  792. int ret;
  793. rseq_signal_deliver(ksig, regs);
  794. /*
  795. * Set up the stack frame
  796. */
  797. if (is_compat_task()) {
  798. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  799. ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
  800. else
  801. ret = compat_setup_frame(usig, ksig, oldset, regs);
  802. } else {
  803. ret = setup_rt_frame(usig, ksig, oldset, regs);
  804. }
  805. /*
  806. * Check that the resulting registers are actually sane.
  807. */
  808. ret |= !valid_user_regs(&regs->user_regs, current);
  809. /* Step into the signal handler if we are stepping */
  810. signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
  811. }
  812. /*
  813. * Note that 'init' is a special process: it doesn't get signals it doesn't
  814. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  815. * mistake.
  816. *
  817. * Note that we go through the signals twice: once to check the signals that
  818. * the kernel can handle, and then we build all the user-level signal handling
  819. * stack-frames in one go after that.
  820. */
  821. static void do_signal(struct pt_regs *regs)
  822. {
  823. unsigned long continue_addr = 0, restart_addr = 0;
  824. int retval = 0;
  825. struct ksignal ksig;
  826. bool syscall = in_syscall(regs);
  827. /*
  828. * If we were from a system call, check for system call restarting...
  829. */
  830. if (syscall) {
  831. continue_addr = regs->pc;
  832. restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
  833. retval = regs->regs[0];
  834. /*
  835. * Avoid additional syscall restarting via ret_to_user.
  836. */
  837. forget_syscall(regs);
  838. /*
  839. * Prepare for system call restart. We do this here so that a
  840. * debugger will see the already changed PC.
  841. */
  842. switch (retval) {
  843. case -ERESTARTNOHAND:
  844. case -ERESTARTSYS:
  845. case -ERESTARTNOINTR:
  846. case -ERESTART_RESTARTBLOCK:
  847. regs->regs[0] = regs->orig_x0;
  848. regs->pc = restart_addr;
  849. break;
  850. }
  851. }
  852. /*
  853. * Get the signal to deliver. When running under ptrace, at this point
  854. * the debugger may change all of our registers.
  855. */
  856. if (get_signal(&ksig)) {
  857. /*
  858. * Depending on the signal settings, we may need to revert the
  859. * decision to restart the system call, but skip this if a
  860. * debugger has chosen to restart at a different PC.
  861. */
  862. if (regs->pc == restart_addr &&
  863. (retval == -ERESTARTNOHAND ||
  864. retval == -ERESTART_RESTARTBLOCK ||
  865. (retval == -ERESTARTSYS &&
  866. !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
  867. syscall_set_return_value(current, regs, -EINTR, 0);
  868. regs->pc = continue_addr;
  869. }
  870. handle_signal(&ksig, regs);
  871. return;
  872. }
  873. /*
  874. * Handle restarting a different system call. As above, if a debugger
  875. * has chosen to restart at a different PC, ignore the restart.
  876. */
  877. if (syscall && regs->pc == restart_addr) {
  878. if (retval == -ERESTART_RESTARTBLOCK)
  879. setup_restart_syscall(regs);
  880. user_rewind_single_step(current);
  881. }
  882. restore_saved_sigmask();
  883. }
  884. void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
  885. {
  886. do {
  887. if (thread_flags & _TIF_NEED_RESCHED) {
  888. /* Unmask Debug and SError for the next task */
  889. local_daif_restore(DAIF_PROCCTX_NOIRQ);
  890. schedule();
  891. } else {
  892. local_daif_restore(DAIF_PROCCTX);
  893. if (thread_flags & _TIF_UPROBE)
  894. uprobe_notify_resume(regs);
  895. if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
  896. clear_thread_flag(TIF_MTE_ASYNC_FAULT);
  897. send_sig_fault(SIGSEGV, SEGV_MTEAERR,
  898. (void __user *)NULL, current);
  899. }
  900. if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
  901. do_signal(regs);
  902. if (thread_flags & _TIF_NOTIFY_RESUME)
  903. resume_user_mode_work(regs);
  904. if (thread_flags & _TIF_FOREIGN_FPSTATE)
  905. fpsimd_restore_current_state();
  906. }
  907. local_daif_mask();
  908. thread_flags = read_thread_flags();
  909. } while (thread_flags & _TIF_WORK_MASK);
  910. }
  911. unsigned long __ro_after_init signal_minsigstksz;
  912. /*
  913. * Determine the stack space required for guaranteed signal devliery.
  914. * This function is used to populate AT_MINSIGSTKSZ at process startup.
  915. * cpufeatures setup is assumed to be complete.
  916. */
  917. void __init minsigstksz_setup(void)
  918. {
  919. struct rt_sigframe_user_layout user;
  920. init_user_layout(&user);
  921. /*
  922. * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
  923. * be big enough, but it's our best guess:
  924. */
  925. if (WARN_ON(setup_sigframe_layout(&user, true)))
  926. return;
  927. signal_minsigstksz = sigframe_size(&user) +
  928. round_up(sizeof(struct frame_record), 16) +
  929. 16; /* max alignment padding */
  930. }
  931. /*
  932. * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
  933. * changes likely come with new fields that should be added below.
  934. */
  935. static_assert(NSIGILL == 11);
  936. static_assert(NSIGFPE == 15);
  937. static_assert(NSIGSEGV == 9);
  938. static_assert(NSIGBUS == 5);
  939. static_assert(NSIGTRAP == 6);
  940. static_assert(NSIGCHLD == 6);
  941. static_assert(NSIGSYS == 2);
  942. static_assert(sizeof(siginfo_t) == 128);
  943. static_assert(__alignof__(siginfo_t) == 8);
  944. static_assert(offsetof(siginfo_t, si_signo) == 0x00);
  945. static_assert(offsetof(siginfo_t, si_errno) == 0x04);
  946. static_assert(offsetof(siginfo_t, si_code) == 0x08);
  947. static_assert(offsetof(siginfo_t, si_pid) == 0x10);
  948. static_assert(offsetof(siginfo_t, si_uid) == 0x14);
  949. static_assert(offsetof(siginfo_t, si_tid) == 0x10);
  950. static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
  951. static_assert(offsetof(siginfo_t, si_status) == 0x18);
  952. static_assert(offsetof(siginfo_t, si_utime) == 0x20);
  953. static_assert(offsetof(siginfo_t, si_stime) == 0x28);
  954. static_assert(offsetof(siginfo_t, si_value) == 0x18);
  955. static_assert(offsetof(siginfo_t, si_int) == 0x18);
  956. static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
  957. static_assert(offsetof(siginfo_t, si_addr) == 0x10);
  958. static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
  959. static_assert(offsetof(siginfo_t, si_lower) == 0x20);
  960. static_assert(offsetof(siginfo_t, si_upper) == 0x28);
  961. static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
  962. static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
  963. static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
  964. static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
  965. static_assert(offsetof(siginfo_t, si_band) == 0x10);
  966. static_assert(offsetof(siginfo_t, si_fd) == 0x18);
  967. static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
  968. static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
  969. static_assert(offsetof(siginfo_t, si_arch) == 0x1c);