signal_32.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
  4. *
  5. * PowerPC version
  6. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  7. * Copyright (C) 2001 IBM
  8. * Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
  9. * Copyright (C) 1997 David S. Miller ([email protected])
  10. *
  11. * Derived from "arch/i386/kernel/signal.c"
  12. * Copyright (C) 1991, 1992 Linus Torvalds
  13. * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
  14. */
  15. #include <linux/sched.h>
  16. #include <linux/mm.h>
  17. #include <linux/smp.h>
  18. #include <linux/kernel.h>
  19. #include <linux/signal.h>
  20. #include <linux/errno.h>
  21. #include <linux/elf.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/ratelimit.h>
  25. #include <linux/syscalls.h>
  26. #ifdef CONFIG_PPC64
  27. #include <linux/compat.h>
  28. #else
  29. #include <linux/wait.h>
  30. #include <linux/unistd.h>
  31. #include <linux/stddef.h>
  32. #include <linux/tty.h>
  33. #include <linux/binfmts.h>
  34. #endif
  35. #include <linux/uaccess.h>
  36. #include <asm/cacheflush.h>
  37. #include <asm/syscalls.h>
  38. #include <asm/sigcontext.h>
  39. #include <asm/vdso.h>
  40. #include <asm/switch_to.h>
  41. #include <asm/tm.h>
  42. #include <asm/asm-prototypes.h>
  43. #ifdef CONFIG_PPC64
  44. #include <asm/syscalls_32.h>
  45. #include <asm/unistd.h>
  46. #else
  47. #include <asm/ucontext.h>
  48. #endif
  49. #include "signal.h"
  50. #ifdef CONFIG_PPC64
  51. #define old_sigaction old_sigaction32
  52. #define sigcontext sigcontext32
  53. #define mcontext mcontext32
  54. #define ucontext ucontext32
  55. /*
  56. * Userspace code may pass a ucontext which doesn't include VSX added
  57. * at the end. We need to check for this case.
  58. */
  59. #define UCONTEXTSIZEWITHOUTVSX \
  60. (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
  61. /*
  62. * Returning 0 means we return to userspace via
  63. * ret_from_except and thus restore all user
  64. * registers from *regs. This is what we need
  65. * to do when a signal has been delivered.
  66. */
  67. #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
  68. #undef __SIGNAL_FRAMESIZE
  69. #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
  70. #undef ELF_NVRREG
  71. #define ELF_NVRREG ELF_NVRREG32
  72. /*
  73. * Functions for flipping sigsets (thanks to brain dead generic
  74. * implementation that makes things simple for little endian only)
  75. */
  76. #define unsafe_put_sigset_t unsafe_put_compat_sigset
  77. #define unsafe_get_sigset_t unsafe_get_compat_sigset
  78. #define to_user_ptr(p) ptr_to_compat(p)
  79. #define from_user_ptr(p) compat_ptr(p)
  80. static __always_inline int
  81. __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
  82. {
  83. elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
  84. int val, i;
  85. for (i = 0; i <= PT_RESULT; i ++) {
  86. /* Force usr to alway see softe as 1 (interrupts enabled) */
  87. if (i == PT_SOFTE)
  88. val = 1;
  89. else
  90. val = gregs[i];
  91. unsafe_put_user(val, &frame->mc_gregs[i], failed);
  92. }
  93. return 0;
  94. failed:
  95. return 1;
  96. }
  97. static __always_inline int
  98. __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
  99. {
  100. elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
  101. int i;
  102. for (i = 0; i <= PT_RESULT; i++) {
  103. if ((i == PT_MSR) || (i == PT_SOFTE))
  104. continue;
  105. unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
  106. }
  107. return 0;
  108. failed:
  109. return 1;
  110. }
  111. #else /* CONFIG_PPC64 */
  112. #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
  113. #define unsafe_put_sigset_t(uset, set, label) do { \
  114. sigset_t __user *__us = uset ; \
  115. const sigset_t *__s = set; \
  116. \
  117. unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \
  118. } while (0)
  119. #define unsafe_get_sigset_t unsafe_get_user_sigset
  120. #define to_user_ptr(p) ((unsigned long)(p))
  121. #define from_user_ptr(p) ((void __user *)(p))
  122. static __always_inline int
  123. __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
  124. {
  125. unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
  126. return 0;
  127. failed:
  128. return 1;
  129. }
  130. static __always_inline
  131. int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
  132. {
  133. /* copy up to but not including MSR */
  134. unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
  135. /* copy from orig_r3 (the word after the MSR) up to the end */
  136. unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
  137. GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
  138. return 0;
  139. failed:
  140. return 1;
  141. }
  142. #endif
  143. #define unsafe_save_general_regs(regs, frame, label) do { \
  144. if (__unsafe_save_general_regs(regs, frame)) \
  145. goto label; \
  146. } while (0)
  147. #define unsafe_restore_general_regs(regs, frame, label) do { \
  148. if (__unsafe_restore_general_regs(regs, frame)) \
  149. goto label; \
  150. } while (0)
  151. /*
  152. * When we have signals to deliver, we set up on the
  153. * user stack, going down from the original stack pointer:
  154. * an ABI gap of 56 words
  155. * an mcontext struct
  156. * a sigcontext struct
  157. * a gap of __SIGNAL_FRAMESIZE bytes
  158. *
  159. * Each of these things must be a multiple of 16 bytes in size. The following
  160. * structure represent all of this except the __SIGNAL_FRAMESIZE gap
  161. *
  162. */
  163. struct sigframe {
  164. struct sigcontext sctx; /* the sigcontext */
  165. struct mcontext mctx; /* all the register values */
  166. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  167. struct sigcontext sctx_transact;
  168. struct mcontext mctx_transact;
  169. #endif
  170. /*
  171. * Programs using the rs6000/xcoff abi can save up to 19 gp
  172. * regs and 18 fp regs below sp before decrementing it.
  173. */
  174. int abigap[56];
  175. };
  176. /*
  177. * When we have rt signals to deliver, we set up on the
  178. * user stack, going down from the original stack pointer:
  179. * one rt_sigframe struct (siginfo + ucontext + ABI gap)
  180. * a gap of __SIGNAL_FRAMESIZE+16 bytes
  181. * (the +16 is to get the siginfo and ucontext in the same
  182. * positions as in older kernels).
  183. *
  184. * Each of these things must be a multiple of 16 bytes in size.
  185. *
  186. */
  187. struct rt_sigframe {
  188. #ifdef CONFIG_PPC64
  189. compat_siginfo_t info;
  190. #else
  191. struct siginfo info;
  192. #endif
  193. struct ucontext uc;
  194. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  195. struct ucontext uc_transact;
  196. #endif
  197. /*
  198. * Programs using the rs6000/xcoff abi can save up to 19 gp
  199. * regs and 18 fp regs below sp before decrementing it.
  200. */
  201. int abigap[56];
  202. };
  203. unsigned long get_min_sigframe_size_32(void)
  204. {
  205. return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
  206. sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
  207. }
  208. /*
  209. * Save the current user registers on the user stack.
  210. * We only save the altivec/spe registers if the process has used
  211. * altivec/spe instructions at some point.
  212. */
  213. static void prepare_save_user_regs(int ctx_has_vsx_region)
  214. {
  215. /* Make sure floating point registers are stored in regs */
  216. flush_fp_to_thread(current);
  217. #ifdef CONFIG_ALTIVEC
  218. if (current->thread.used_vr)
  219. flush_altivec_to_thread(current);
  220. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  221. current->thread.vrsave = mfspr(SPRN_VRSAVE);
  222. #endif
  223. #ifdef CONFIG_VSX
  224. if (current->thread.used_vsr && ctx_has_vsx_region)
  225. flush_vsx_to_thread(current);
  226. #endif
  227. #ifdef CONFIG_SPE
  228. if (current->thread.used_spe)
  229. flush_spe_to_thread(current);
  230. #endif
  231. }
  232. static __always_inline int
  233. __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
  234. struct mcontext __user *tm_frame, int ctx_has_vsx_region)
  235. {
  236. unsigned long msr = regs->msr;
  237. /* save general registers */
  238. unsafe_save_general_regs(regs, frame, failed);
  239. #ifdef CONFIG_ALTIVEC
  240. /* save altivec registers */
  241. if (current->thread.used_vr) {
  242. unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
  243. ELF_NVRREG * sizeof(vector128), failed);
  244. /* set MSR_VEC in the saved MSR value to indicate that
  245. frame->mc_vregs contains valid data */
  246. msr |= MSR_VEC;
  247. }
  248. /* else assert((regs->msr & MSR_VEC) == 0) */
  249. /* We always copy to/from vrsave, it's 0 if we don't have or don't
  250. * use altivec. Since VSCR only contains 32 bits saved in the least
  251. * significant bits of a vector, we "cheat" and stuff VRSAVE in the
  252. * most significant bits of that same vector. --BenH
  253. * Note that the current VRSAVE value is in the SPR at this point.
  254. */
  255. unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
  256. failed);
  257. #endif /* CONFIG_ALTIVEC */
  258. unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
  259. /*
  260. * Clear the MSR VSX bit to indicate there is no valid state attached
  261. * to this context, except in the specific case below where we set it.
  262. */
  263. msr &= ~MSR_VSX;
  264. #ifdef CONFIG_VSX
  265. /*
  266. * Copy VSR 0-31 upper half from thread_struct to local
  267. * buffer, then write that to userspace. Also set MSR_VSX in
  268. * the saved MSR value to indicate that frame->mc_vregs
  269. * contains valid data
  270. */
  271. if (current->thread.used_vsr && ctx_has_vsx_region) {
  272. unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
  273. msr |= MSR_VSX;
  274. }
  275. #endif /* CONFIG_VSX */
  276. #ifdef CONFIG_SPE
  277. /* save spe registers */
  278. if (current->thread.used_spe) {
  279. unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
  280. ELF_NEVRREG * sizeof(u32), failed);
  281. /* set MSR_SPE in the saved MSR value to indicate that
  282. frame->mc_vregs contains valid data */
  283. msr |= MSR_SPE;
  284. }
  285. /* else assert((regs->msr & MSR_SPE) == 0) */
  286. /* We always copy to/from spefscr */
  287. unsafe_put_user(current->thread.spefscr,
  288. (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
  289. #endif /* CONFIG_SPE */
  290. unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
  291. /* We need to write 0 the MSR top 32 bits in the tm frame so that we
  292. * can check it on the restore to see if TM is active
  293. */
  294. if (tm_frame)
  295. unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
  296. return 0;
  297. failed:
  298. return 1;
  299. }
  300. #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
  301. if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \
  302. goto label; \
  303. } while (0)
  304. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  305. /*
  306. * Save the current user registers on the user stack.
  307. * We only save the altivec/spe registers if the process has used
  308. * altivec/spe instructions at some point.
  309. * We also save the transactional registers to a second ucontext in the
  310. * frame.
  311. *
  312. * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
  313. */
  314. static void prepare_save_tm_user_regs(void)
  315. {
  316. WARN_ON(tm_suspend_disabled);
  317. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  318. current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
  319. }
  320. static __always_inline int
  321. save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
  322. struct mcontext __user *tm_frame, unsigned long msr)
  323. {
  324. /* Save both sets of general registers */
  325. unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
  326. unsafe_save_general_regs(regs, tm_frame, failed);
  327. /* Stash the top half of the 64bit MSR into the 32bit MSR word
  328. * of the transactional mcontext. This way we have a backward-compatible
  329. * MSR in the 'normal' (checkpointed) mcontext and additionally one can
  330. * also look at what type of transaction (T or S) was active at the
  331. * time of the signal.
  332. */
  333. unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
  334. /* save altivec registers */
  335. if (current->thread.used_vr) {
  336. unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
  337. ELF_NVRREG * sizeof(vector128), failed);
  338. if (msr & MSR_VEC)
  339. unsafe_copy_to_user(&tm_frame->mc_vregs,
  340. &current->thread.vr_state,
  341. ELF_NVRREG * sizeof(vector128), failed);
  342. else
  343. unsafe_copy_to_user(&tm_frame->mc_vregs,
  344. &current->thread.ckvr_state,
  345. ELF_NVRREG * sizeof(vector128), failed);
  346. /* set MSR_VEC in the saved MSR value to indicate that
  347. * frame->mc_vregs contains valid data
  348. */
  349. msr |= MSR_VEC;
  350. }
  351. /* We always copy to/from vrsave, it's 0 if we don't have or don't
  352. * use altivec. Since VSCR only contains 32 bits saved in the least
  353. * significant bits of a vector, we "cheat" and stuff VRSAVE in the
  354. * most significant bits of that same vector. --BenH
  355. */
  356. unsafe_put_user(current->thread.ckvrsave,
  357. (u32 __user *)&frame->mc_vregs[32], failed);
  358. if (msr & MSR_VEC)
  359. unsafe_put_user(current->thread.vrsave,
  360. (u32 __user *)&tm_frame->mc_vregs[32], failed);
  361. else
  362. unsafe_put_user(current->thread.ckvrsave,
  363. (u32 __user *)&tm_frame->mc_vregs[32], failed);
  364. unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
  365. if (msr & MSR_FP)
  366. unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
  367. else
  368. unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
  369. /*
  370. * Copy VSR 0-31 upper half from thread_struct to local
  371. * buffer, then write that to userspace. Also set MSR_VSX in
  372. * the saved MSR value to indicate that frame->mc_vregs
  373. * contains valid data
  374. */
  375. if (current->thread.used_vsr) {
  376. unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
  377. if (msr & MSR_VSX)
  378. unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
  379. else
  380. unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
  381. msr |= MSR_VSX;
  382. }
  383. unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
  384. return 0;
  385. failed:
  386. return 1;
  387. }
  388. #else
  389. static void prepare_save_tm_user_regs(void) { }
  390. static __always_inline int
  391. save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
  392. struct mcontext __user *tm_frame, unsigned long msr)
  393. {
  394. return 0;
  395. }
  396. #endif
  397. #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
  398. if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr)) \
  399. goto label; \
  400. } while (0)
  401. /*
  402. * Restore the current user register values from the user stack,
  403. * (except for MSR).
  404. */
  405. static long restore_user_regs(struct pt_regs *regs,
  406. struct mcontext __user *sr, int sig)
  407. {
  408. unsigned int save_r2 = 0;
  409. unsigned long msr;
  410. #ifdef CONFIG_VSX
  411. int i;
  412. #endif
  413. if (!user_read_access_begin(sr, sizeof(*sr)))
  414. return 1;
  415. /*
  416. * restore general registers but not including MSR or SOFTE. Also
  417. * take care of keeping r2 (TLS) intact if not a signal
  418. */
  419. if (!sig)
  420. save_r2 = (unsigned int)regs->gpr[2];
  421. unsafe_restore_general_regs(regs, sr, failed);
  422. set_trap_norestart(regs);
  423. unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
  424. if (!sig)
  425. regs->gpr[2] = (unsigned long) save_r2;
  426. /* if doing signal return, restore the previous little-endian mode */
  427. if (sig)
  428. regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
  429. #ifdef CONFIG_ALTIVEC
  430. /*
  431. * Force the process to reload the altivec registers from
  432. * current->thread when it next does altivec instructions
  433. */
  434. regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
  435. if (msr & MSR_VEC) {
  436. /* restore altivec registers from the stack */
  437. unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
  438. sizeof(sr->mc_vregs), failed);
  439. current->thread.used_vr = true;
  440. } else if (current->thread.used_vr)
  441. memset(&current->thread.vr_state, 0,
  442. ELF_NVRREG * sizeof(vector128));
  443. /* Always get VRSAVE back */
  444. unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
  445. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  446. mtspr(SPRN_VRSAVE, current->thread.vrsave);
  447. #endif /* CONFIG_ALTIVEC */
  448. unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
  449. #ifdef CONFIG_VSX
  450. /*
  451. * Force the process to reload the VSX registers from
  452. * current->thread when it next does VSX instruction.
  453. */
  454. regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
  455. if (msr & MSR_VSX) {
  456. /*
  457. * Restore altivec registers from the stack to a local
  458. * buffer, then write this out to the thread_struct
  459. */
  460. unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
  461. current->thread.used_vsr = true;
  462. } else if (current->thread.used_vsr)
  463. for (i = 0; i < 32 ; i++)
  464. current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
  465. #endif /* CONFIG_VSX */
  466. /*
  467. * force the process to reload the FP registers from
  468. * current->thread when it next does FP instructions
  469. */
  470. regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
  471. #ifdef CONFIG_SPE
  472. /*
  473. * Force the process to reload the spe registers from
  474. * current->thread when it next does spe instructions.
  475. * Since this is user ABI, we must enforce the sizing.
  476. */
  477. BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
  478. regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
  479. if (msr & MSR_SPE) {
  480. /* restore spe registers from the stack */
  481. unsafe_copy_from_user(&current->thread.spe, &sr->mc_vregs,
  482. sizeof(current->thread.spe), failed);
  483. current->thread.used_spe = true;
  484. } else if (current->thread.used_spe)
  485. memset(&current->thread.spe, 0, sizeof(current->thread.spe));
  486. /* Always get SPEFSCR back */
  487. unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
  488. #endif /* CONFIG_SPE */
  489. user_read_access_end();
  490. return 0;
  491. failed:
  492. user_read_access_end();
  493. return 1;
  494. }
  495. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  496. /*
  497. * Restore the current user register values from the user stack, except for
  498. * MSR, and recheckpoint the original checkpointed register state for processes
  499. * in transactions.
  500. */
  501. static long restore_tm_user_regs(struct pt_regs *regs,
  502. struct mcontext __user *sr,
  503. struct mcontext __user *tm_sr)
  504. {
  505. unsigned long msr, msr_hi;
  506. int i;
  507. if (tm_suspend_disabled)
  508. return 1;
  509. /*
  510. * restore general registers but not including MSR or SOFTE. Also
  511. * take care of keeping r2 (TLS) intact if not a signal.
  512. * See comment in signal_64.c:restore_tm_sigcontexts();
  513. * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
  514. * were set by the signal delivery.
  515. */
  516. if (!user_read_access_begin(sr, sizeof(*sr)))
  517. return 1;
  518. unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
  519. unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
  520. unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
  521. /* Restore the previous little-endian mode */
  522. regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
  523. regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
  524. if (msr & MSR_VEC) {
  525. /* restore altivec registers from the stack */
  526. unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
  527. sizeof(sr->mc_vregs), failed);
  528. current->thread.used_vr = true;
  529. } else if (current->thread.used_vr) {
  530. memset(&current->thread.vr_state, 0,
  531. ELF_NVRREG * sizeof(vector128));
  532. memset(&current->thread.ckvr_state, 0,
  533. ELF_NVRREG * sizeof(vector128));
  534. }
  535. /* Always get VRSAVE back */
  536. unsafe_get_user(current->thread.ckvrsave,
  537. (u32 __user *)&sr->mc_vregs[32], failed);
  538. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  539. mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
  540. regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
  541. unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
  542. regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
  543. if (msr & MSR_VSX) {
  544. /*
  545. * Restore altivec registers from the stack to a local
  546. * buffer, then write this out to the thread_struct
  547. */
  548. unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
  549. current->thread.used_vsr = true;
  550. } else if (current->thread.used_vsr)
  551. for (i = 0; i < 32 ; i++) {
  552. current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
  553. current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
  554. }
  555. user_read_access_end();
  556. if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
  557. return 1;
  558. unsafe_restore_general_regs(regs, tm_sr, failed);
  559. /* restore altivec registers from the stack */
  560. if (msr & MSR_VEC)
  561. unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
  562. sizeof(sr->mc_vregs), failed);
  563. /* Always get VRSAVE back */
  564. unsafe_get_user(current->thread.vrsave,
  565. (u32 __user *)&tm_sr->mc_vregs[32], failed);
  566. unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
  567. if (msr & MSR_VSX) {
  568. /*
  569. * Restore altivec registers from the stack to a local
  570. * buffer, then write this out to the thread_struct
  571. */
  572. unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
  573. current->thread.used_vsr = true;
  574. }
  575. /* Get the top half of the MSR from the user context */
  576. unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
  577. msr_hi <<= 32;
  578. user_read_access_end();
  579. /* If TM bits are set to the reserved value, it's an invalid context */
  580. if (MSR_TM_RESV(msr_hi))
  581. return 1;
  582. /*
  583. * Disabling preemption, since it is unsafe to be preempted
  584. * with MSR[TS] set without recheckpointing.
  585. */
  586. preempt_disable();
  587. /*
  588. * CAUTION:
  589. * After regs->MSR[TS] being updated, make sure that get_user(),
  590. * put_user() or similar functions are *not* called. These
  591. * functions can generate page faults which will cause the process
  592. * to be de-scheduled with MSR[TS] set but without calling
  593. * tm_recheckpoint(). This can cause a bug.
  594. *
  595. * Pull in the MSR TM bits from the user context
  596. */
  597. regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
  598. /* Now, recheckpoint. This loads up all of the checkpointed (older)
  599. * registers, including FP and V[S]Rs. After recheckpointing, the
  600. * transactional versions should be loaded.
  601. */
  602. tm_enable();
  603. /* Make sure the transaction is marked as failed */
  604. current->thread.tm_texasr |= TEXASR_FS;
  605. /* This loads the checkpointed FP/VEC state, if used */
  606. tm_recheckpoint(&current->thread);
  607. /* This loads the speculative FP/VEC state, if used */
  608. msr_check_and_set(msr & (MSR_FP | MSR_VEC));
  609. if (msr & MSR_FP) {
  610. load_fp_state(&current->thread.fp_state);
  611. regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
  612. }
  613. if (msr & MSR_VEC) {
  614. load_vr_state(&current->thread.vr_state);
  615. regs_set_return_msr(regs, regs->msr | MSR_VEC);
  616. }
  617. preempt_enable();
  618. return 0;
  619. failed:
  620. user_read_access_end();
  621. return 1;
  622. }
  623. #else
  624. static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
  625. struct mcontext __user *tm_sr)
  626. {
  627. return 0;
  628. }
  629. #endif
  630. #ifdef CONFIG_PPC64
  631. #define copy_siginfo_to_user copy_siginfo_to_user32
  632. #endif /* CONFIG_PPC64 */
  633. /*
  634. * Set up a signal frame for a "real-time" signal handler
  635. * (one which gets siginfo).
  636. */
  637. int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
  638. struct task_struct *tsk)
  639. {
  640. struct rt_sigframe __user *frame;
  641. struct mcontext __user *mctx;
  642. struct mcontext __user *tm_mctx = NULL;
  643. unsigned long newsp = 0;
  644. unsigned long tramp;
  645. struct pt_regs *regs = tsk->thread.regs;
  646. /* Save the thread's msr before get_tm_stackpointer() changes it */
  647. unsigned long msr = regs->msr;
  648. /* Set up Signal Frame */
  649. frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
  650. mctx = &frame->uc.uc_mcontext;
  651. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  652. tm_mctx = &frame->uc_transact.uc_mcontext;
  653. #endif
  654. if (MSR_TM_ACTIVE(msr))
  655. prepare_save_tm_user_regs();
  656. else
  657. prepare_save_user_regs(1);
  658. if (!user_access_begin(frame, sizeof(*frame)))
  659. goto badframe;
  660. /* Put the siginfo & fill in most of the ucontext */
  661. unsafe_put_user(0, &frame->uc.uc_flags, failed);
  662. #ifdef CONFIG_PPC64
  663. unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
  664. #else
  665. unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
  666. #endif
  667. unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
  668. if (MSR_TM_ACTIVE(msr)) {
  669. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  670. unsafe_put_user((unsigned long)&frame->uc_transact,
  671. &frame->uc.uc_link, failed);
  672. unsafe_put_user((unsigned long)tm_mctx,
  673. &frame->uc_transact.uc_regs, failed);
  674. #endif
  675. unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
  676. } else {
  677. unsafe_put_user(0, &frame->uc.uc_link, failed);
  678. unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
  679. }
  680. /* Save user registers on the stack */
  681. if (tsk->mm->context.vdso) {
  682. tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
  683. } else {
  684. tramp = (unsigned long)mctx->mc_pad;
  685. unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
  686. unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
  687. asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
  688. }
  689. unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
  690. user_access_end();
  691. if (copy_siginfo_to_user(&frame->info, &ksig->info))
  692. goto badframe;
  693. regs->link = tramp;
  694. #ifdef CONFIG_PPC_FPU_REGS
  695. tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
  696. #endif
  697. /* create a stack frame for the caller of the handler */
  698. newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
  699. if (put_user(regs->gpr[1], (u32 __user *)newsp))
  700. goto badframe;
  701. /* Fill registers for signal handler */
  702. regs->gpr[1] = newsp;
  703. regs->gpr[3] = ksig->sig;
  704. regs->gpr[4] = (unsigned long)&frame->info;
  705. regs->gpr[5] = (unsigned long)&frame->uc;
  706. regs->gpr[6] = (unsigned long)frame;
  707. regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
  708. /* enter the signal handler in native-endian mode */
  709. regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
  710. return 0;
  711. failed:
  712. user_access_end();
  713. badframe:
  714. signal_fault(tsk, regs, "handle_rt_signal32", frame);
  715. return 1;
  716. }
  717. /*
  718. * OK, we're invoking a handler
  719. */
  720. int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
  721. struct task_struct *tsk)
  722. {
  723. struct sigcontext __user *sc;
  724. struct sigframe __user *frame;
  725. struct mcontext __user *mctx;
  726. struct mcontext __user *tm_mctx = NULL;
  727. unsigned long newsp = 0;
  728. unsigned long tramp;
  729. struct pt_regs *regs = tsk->thread.regs;
  730. /* Save the thread's msr before get_tm_stackpointer() changes it */
  731. unsigned long msr = regs->msr;
  732. /* Set up Signal Frame */
  733. frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
  734. mctx = &frame->mctx;
  735. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  736. tm_mctx = &frame->mctx_transact;
  737. #endif
  738. if (MSR_TM_ACTIVE(msr))
  739. prepare_save_tm_user_regs();
  740. else
  741. prepare_save_user_regs(1);
  742. if (!user_access_begin(frame, sizeof(*frame)))
  743. goto badframe;
  744. sc = (struct sigcontext __user *) &frame->sctx;
  745. #if _NSIG != 64
  746. #error "Please adjust handle_signal()"
  747. #endif
  748. unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
  749. unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
  750. #ifdef CONFIG_PPC64
  751. unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
  752. #else
  753. unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
  754. #endif
  755. unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
  756. unsafe_put_user(ksig->sig, &sc->signal, failed);
  757. if (MSR_TM_ACTIVE(msr))
  758. unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
  759. else
  760. unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
  761. if (tsk->mm->context.vdso) {
  762. tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
  763. } else {
  764. tramp = (unsigned long)mctx->mc_pad;
  765. unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
  766. unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
  767. asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
  768. }
  769. user_access_end();
  770. regs->link = tramp;
  771. #ifdef CONFIG_PPC_FPU_REGS
  772. tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
  773. #endif
  774. /* create a stack frame for the caller of the handler */
  775. newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
  776. if (put_user(regs->gpr[1], (u32 __user *)newsp))
  777. goto badframe;
  778. regs->gpr[1] = newsp;
  779. regs->gpr[3] = ksig->sig;
  780. regs->gpr[4] = (unsigned long) sc;
  781. regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
  782. /* enter the signal handler in native-endian mode */
  783. regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
  784. return 0;
  785. failed:
  786. user_access_end();
  787. badframe:
  788. signal_fault(tsk, regs, "handle_signal32", frame);
  789. return 1;
  790. }
  791. static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
  792. {
  793. sigset_t set;
  794. struct mcontext __user *mcp;
  795. if (!user_read_access_begin(ucp, sizeof(*ucp)))
  796. return -EFAULT;
  797. unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
  798. #ifdef CONFIG_PPC64
  799. {
  800. u32 cmcp;
  801. unsafe_get_user(cmcp, &ucp->uc_regs, failed);
  802. mcp = (struct mcontext __user *)(u64)cmcp;
  803. }
  804. #else
  805. unsafe_get_user(mcp, &ucp->uc_regs, failed);
  806. #endif
  807. user_read_access_end();
  808. set_current_blocked(&set);
  809. if (restore_user_regs(regs, mcp, sig))
  810. return -EFAULT;
  811. return 0;
  812. failed:
  813. user_read_access_end();
  814. return -EFAULT;
  815. }
  816. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  817. static int do_setcontext_tm(struct ucontext __user *ucp,
  818. struct ucontext __user *tm_ucp,
  819. struct pt_regs *regs)
  820. {
  821. sigset_t set;
  822. struct mcontext __user *mcp;
  823. struct mcontext __user *tm_mcp;
  824. u32 cmcp;
  825. u32 tm_cmcp;
  826. if (!user_read_access_begin(ucp, sizeof(*ucp)))
  827. return -EFAULT;
  828. unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
  829. unsafe_get_user(cmcp, &ucp->uc_regs, failed);
  830. user_read_access_end();
  831. if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
  832. return -EFAULT;
  833. mcp = (struct mcontext __user *)(u64)cmcp;
  834. tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
  835. /* no need to check access_ok(mcp), since mcp < 4GB */
  836. set_current_blocked(&set);
  837. if (restore_tm_user_regs(regs, mcp, tm_mcp))
  838. return -EFAULT;
  839. return 0;
  840. failed:
  841. user_read_access_end();
  842. return -EFAULT;
  843. }
  844. #endif
  845. #ifdef CONFIG_PPC64
  846. COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
  847. struct ucontext __user *, new_ctx, int, ctx_size)
  848. #else
  849. SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
  850. struct ucontext __user *, new_ctx, long, ctx_size)
  851. #endif
  852. {
  853. struct pt_regs *regs = current_pt_regs();
  854. int ctx_has_vsx_region = 0;
  855. #ifdef CONFIG_PPC64
  856. unsigned long new_msr = 0;
  857. if (new_ctx) {
  858. struct mcontext __user *mcp;
  859. u32 cmcp;
  860. /*
  861. * Get pointer to the real mcontext. No need for
  862. * access_ok since we are dealing with compat
  863. * pointers.
  864. */
  865. if (__get_user(cmcp, &new_ctx->uc_regs))
  866. return -EFAULT;
  867. mcp = (struct mcontext __user *)(u64)cmcp;
  868. if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
  869. return -EFAULT;
  870. }
  871. /*
  872. * Check that the context is not smaller than the original
  873. * size (with VMX but without VSX)
  874. */
  875. if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
  876. return -EINVAL;
  877. /*
  878. * If the new context state sets the MSR VSX bits but
  879. * it doesn't provide VSX state.
  880. */
  881. if ((ctx_size < sizeof(struct ucontext)) &&
  882. (new_msr & MSR_VSX))
  883. return -EINVAL;
  884. /* Does the context have enough room to store VSX data? */
  885. if (ctx_size >= sizeof(struct ucontext))
  886. ctx_has_vsx_region = 1;
  887. #else
  888. /* Context size is for future use. Right now, we only make sure
  889. * we are passed something we understand
  890. */
  891. if (ctx_size < sizeof(struct ucontext))
  892. return -EINVAL;
  893. #endif
  894. if (old_ctx != NULL) {
  895. struct mcontext __user *mctx;
  896. /*
  897. * old_ctx might not be 16-byte aligned, in which
  898. * case old_ctx->uc_mcontext won't be either.
  899. * Because we have the old_ctx->uc_pad2 field
  900. * before old_ctx->uc_mcontext, we need to round down
  901. * from &old_ctx->uc_mcontext to a 16-byte boundary.
  902. */
  903. mctx = (struct mcontext __user *)
  904. ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
  905. prepare_save_user_regs(ctx_has_vsx_region);
  906. if (!user_write_access_begin(old_ctx, ctx_size))
  907. return -EFAULT;
  908. unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
  909. unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
  910. unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
  911. user_write_access_end();
  912. }
  913. if (new_ctx == NULL)
  914. return 0;
  915. if (!access_ok(new_ctx, ctx_size) ||
  916. fault_in_readable((char __user *)new_ctx, ctx_size))
  917. return -EFAULT;
  918. /*
  919. * If we get a fault copying the context into the kernel's
  920. * image of the user's registers, we can't just return -EFAULT
  921. * because the user's registers will be corrupted. For instance
  922. * the NIP value may have been updated but not some of the
  923. * other registers. Given that we have done the access_ok
  924. * and successfully read the first and last bytes of the region
  925. * above, this should only happen in an out-of-memory situation
  926. * or if another thread unmaps the region containing the context.
  927. * We kill the task with a SIGSEGV in this situation.
  928. */
  929. if (do_setcontext(new_ctx, regs, 0)) {
  930. force_exit_sig(SIGSEGV);
  931. return -EFAULT;
  932. }
  933. set_thread_flag(TIF_RESTOREALL);
  934. return 0;
  935. failed:
  936. user_write_access_end();
  937. return -EFAULT;
  938. }
  939. #ifdef CONFIG_PPC64
  940. COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
  941. #else
  942. SYSCALL_DEFINE0(rt_sigreturn)
  943. #endif
  944. {
  945. struct rt_sigframe __user *rt_sf;
  946. struct pt_regs *regs = current_pt_regs();
  947. int tm_restore = 0;
  948. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  949. struct ucontext __user *uc_transact;
  950. unsigned long msr_hi;
  951. unsigned long tmp;
  952. #endif
  953. /* Always make any pending restarted system calls return -EINTR */
  954. current->restart_block.fn = do_no_restart_syscall;
  955. rt_sf = (struct rt_sigframe __user *)
  956. (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
  957. if (!access_ok(rt_sf, sizeof(*rt_sf)))
  958. goto bad;
  959. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  960. /*
  961. * If there is a transactional state then throw it away.
  962. * The purpose of a sigreturn is to destroy all traces of the
  963. * signal frame, this includes any transactional state created
  964. * within in. We only check for suspended as we can never be
  965. * active in the kernel, we are active, there is nothing better to
  966. * do than go ahead and Bad Thing later.
  967. * The cause is not important as there will never be a
  968. * recheckpoint so it's not user visible.
  969. */
  970. if (MSR_TM_SUSPENDED(mfmsr()))
  971. tm_reclaim_current(0);
  972. if (__get_user(tmp, &rt_sf->uc.uc_link))
  973. goto bad;
  974. uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
  975. if (uc_transact) {
  976. u32 cmcp;
  977. struct mcontext __user *mcp;
  978. if (__get_user(cmcp, &uc_transact->uc_regs))
  979. return -EFAULT;
  980. mcp = (struct mcontext __user *)(u64)cmcp;
  981. /* The top 32 bits of the MSR are stashed in the transactional
  982. * ucontext. */
  983. if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
  984. goto bad;
  985. if (MSR_TM_ACTIVE(msr_hi<<32)) {
  986. /* Trying to start TM on non TM system */
  987. if (!cpu_has_feature(CPU_FTR_TM))
  988. goto bad;
  989. /* We only recheckpoint on return if we're
  990. * transaction.
  991. */
  992. tm_restore = 1;
  993. if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
  994. goto bad;
  995. }
  996. }
  997. if (!tm_restore) {
  998. /*
  999. * Unset regs->msr because ucontext MSR TS is not
  1000. * set, and recheckpoint was not called. This avoid
  1001. * hitting a TM Bad thing at RFID
  1002. */
  1003. regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
  1004. }
  1005. /* Fall through, for non-TM restore */
  1006. #endif
  1007. if (!tm_restore)
  1008. if (do_setcontext(&rt_sf->uc, regs, 1))
  1009. goto bad;
  1010. /*
  1011. * It's not clear whether or why it is desirable to save the
  1012. * sigaltstack setting on signal delivery and restore it on
  1013. * signal return. But other architectures do this and we have
  1014. * always done it up until now so it is probably better not to
  1015. * change it. -- paulus
  1016. */
  1017. #ifdef CONFIG_PPC64
  1018. if (compat_restore_altstack(&rt_sf->uc.uc_stack))
  1019. goto bad;
  1020. #else
  1021. if (restore_altstack(&rt_sf->uc.uc_stack))
  1022. goto bad;
  1023. #endif
  1024. set_thread_flag(TIF_RESTOREALL);
  1025. return 0;
  1026. bad:
  1027. signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
  1028. force_sig(SIGSEGV);
  1029. return 0;
  1030. }
  1031. #ifdef CONFIG_PPC32
  1032. SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
  1033. int, ndbg, struct sig_dbg_op __user *, dbg)
  1034. {
  1035. struct pt_regs *regs = current_pt_regs();
  1036. struct sig_dbg_op op;
  1037. int i;
  1038. unsigned long new_msr = regs->msr;
  1039. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  1040. unsigned long new_dbcr0 = current->thread.debug.dbcr0;
  1041. #endif
  1042. for (i=0; i<ndbg; i++) {
  1043. if (copy_from_user(&op, dbg + i, sizeof(op)))
  1044. return -EFAULT;
  1045. switch (op.dbg_type) {
  1046. case SIG_DBG_SINGLE_STEPPING:
  1047. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  1048. if (op.dbg_value) {
  1049. new_msr |= MSR_DE;
  1050. new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
  1051. } else {
  1052. new_dbcr0 &= ~DBCR0_IC;
  1053. if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
  1054. current->thread.debug.dbcr1)) {
  1055. new_msr &= ~MSR_DE;
  1056. new_dbcr0 &= ~DBCR0_IDM;
  1057. }
  1058. }
  1059. #else
  1060. if (op.dbg_value)
  1061. new_msr |= MSR_SE;
  1062. else
  1063. new_msr &= ~MSR_SE;
  1064. #endif
  1065. break;
  1066. case SIG_DBG_BRANCH_TRACING:
  1067. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  1068. return -EINVAL;
  1069. #else
  1070. if (op.dbg_value)
  1071. new_msr |= MSR_BE;
  1072. else
  1073. new_msr &= ~MSR_BE;
  1074. #endif
  1075. break;
  1076. default:
  1077. return -EINVAL;
  1078. }
  1079. }
  1080. /* We wait until here to actually install the values in the
  1081. registers so if we fail in the above loop, it will not
  1082. affect the contents of these registers. After this point,
  1083. failure is a problem, anyway, and it's very unlikely unless
  1084. the user is really doing something wrong. */
  1085. regs_set_return_msr(regs, new_msr);
  1086. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  1087. current->thread.debug.dbcr0 = new_dbcr0;
  1088. #endif
  1089. if (!access_ok(ctx, sizeof(*ctx)) ||
  1090. fault_in_readable((char __user *)ctx, sizeof(*ctx)))
  1091. return -EFAULT;
  1092. /*
  1093. * If we get a fault copying the context into the kernel's
  1094. * image of the user's registers, we can't just return -EFAULT
  1095. * because the user's registers will be corrupted. For instance
  1096. * the NIP value may have been updated but not some of the
  1097. * other registers. Given that we have done the access_ok
  1098. * and successfully read the first and last bytes of the region
  1099. * above, this should only happen in an out-of-memory situation
  1100. * or if another thread unmaps the region containing the context.
  1101. * We kill the task with a SIGSEGV in this situation.
  1102. */
  1103. if (do_setcontext(ctx, regs, 1)) {
  1104. signal_fault(current, regs, "sys_debug_setcontext", ctx);
  1105. force_sig(SIGSEGV);
  1106. goto out;
  1107. }
  1108. /*
  1109. * It's not clear whether or why it is desirable to save the
  1110. * sigaltstack setting on signal delivery and restore it on
  1111. * signal return. But other architectures do this and we have
  1112. * always done it up until now so it is probably better not to
  1113. * change it. -- paulus
  1114. */
  1115. restore_altstack(&ctx->uc_stack);
  1116. set_thread_flag(TIF_RESTOREALL);
  1117. out:
  1118. return 0;
  1119. }
  1120. #endif
  1121. /*
  1122. * Do a signal return; undo the signal stack.
  1123. */
  1124. #ifdef CONFIG_PPC64
  1125. COMPAT_SYSCALL_DEFINE0(sigreturn)
  1126. #else
  1127. SYSCALL_DEFINE0(sigreturn)
  1128. #endif
  1129. {
  1130. struct pt_regs *regs = current_pt_regs();
  1131. struct sigframe __user *sf;
  1132. struct sigcontext __user *sc;
  1133. struct sigcontext sigctx;
  1134. struct mcontext __user *sr;
  1135. sigset_t set;
  1136. struct mcontext __user *mcp;
  1137. struct mcontext __user *tm_mcp = NULL;
  1138. unsigned long long msr_hi = 0;
  1139. /* Always make any pending restarted system calls return -EINTR */
  1140. current->restart_block.fn = do_no_restart_syscall;
  1141. sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
  1142. sc = &sf->sctx;
  1143. if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
  1144. goto badframe;
  1145. #ifdef CONFIG_PPC64
  1146. /*
  1147. * Note that PPC32 puts the upper 32 bits of the sigmask in the
  1148. * unused part of the signal stackframe
  1149. */
  1150. set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
  1151. #else
  1152. set.sig[0] = sigctx.oldmask;
  1153. set.sig[1] = sigctx._unused[3];
  1154. #endif
  1155. set_current_blocked(&set);
  1156. mcp = (struct mcontext __user *)&sf->mctx;
  1157. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1158. tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
  1159. if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
  1160. goto badframe;
  1161. #endif
  1162. if (MSR_TM_ACTIVE(msr_hi<<32)) {
  1163. if (!cpu_has_feature(CPU_FTR_TM))
  1164. goto badframe;
  1165. if (restore_tm_user_regs(regs, mcp, tm_mcp))
  1166. goto badframe;
  1167. } else {
  1168. sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
  1169. if (restore_user_regs(regs, sr, 1)) {
  1170. signal_fault(current, regs, "sys_sigreturn", sr);
  1171. force_sig(SIGSEGV);
  1172. return 0;
  1173. }
  1174. }
  1175. set_thread_flag(TIF_RESTOREALL);
  1176. return 0;
  1177. badframe:
  1178. signal_fault(current, regs, "sys_sigreturn", sc);
  1179. force_sig(SIGSEGV);
  1180. return 0;
  1181. }