ptrace.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1992 Ross Biro
  7. * Copyright (C) Linus Torvalds
  8. * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
  9. * Copyright (C) 1996 David S. Miller
  10. * Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
  11. * Copyright (C) 1999 MIPS Technologies, Inc.
  12. * Copyright (C) 2000 Ulf Carlsson
  13. *
  14. * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15. * binaries.
  16. */
  17. #include <linux/compiler.h>
  18. #include <linux/context_tracking.h>
  19. #include <linux/elf.h>
  20. #include <linux/kernel.h>
  21. #include <linux/sched.h>
  22. #include <linux/sched/task_stack.h>
  23. #include <linux/mm.h>
  24. #include <linux/errno.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/regset.h>
  27. #include <linux/smp.h>
  28. #include <linux/security.h>
  29. #include <linux/stddef.h>
  30. #include <linux/audit.h>
  31. #include <linux/seccomp.h>
  32. #include <linux/ftrace.h>
  33. #include <asm/byteorder.h>
  34. #include <asm/cpu.h>
  35. #include <asm/cpu-info.h>
  36. #include <asm/dsp.h>
  37. #include <asm/fpu.h>
  38. #include <asm/mipsregs.h>
  39. #include <asm/mipsmtregs.h>
  40. #include <asm/page.h>
  41. #include <asm/processor.h>
  42. #include <asm/syscall.h>
  43. #include <linux/uaccess.h>
  44. #include <asm/bootinfo.h>
  45. #include <asm/reg.h>
  46. #define CREATE_TRACE_POINTS
  47. #include <trace/events/syscalls.h>
  48. /*
  49. * Called by kernel/ptrace.c when detaching..
  50. *
  51. * Make sure single step bits etc are not set.
  52. */
  53. void ptrace_disable(struct task_struct *child)
  54. {
  55. /* Don't load the watchpoint registers for the ex-child. */
  56. clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  57. }
  58. /*
  59. * Read a general register set. We always use the 64-bit format, even
  60. * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  61. * Registers are sign extended to fill the available space.
  62. */
  63. int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
  64. {
  65. struct pt_regs *regs;
  66. int i;
  67. if (!access_ok(data, 38 * 8))
  68. return -EIO;
  69. regs = task_pt_regs(child);
  70. for (i = 0; i < 32; i++)
  71. __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
  72. __put_user((long)regs->lo, (__s64 __user *)&data->lo);
  73. __put_user((long)regs->hi, (__s64 __user *)&data->hi);
  74. __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  75. __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
  76. __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
  77. __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
  78. return 0;
  79. }
  80. /*
  81. * Write a general register set. As for PTRACE_GETREGS, we always use
  82. * the 64-bit format. On a 32-bit kernel only the lower order half
  83. * (according to endianness) will be used.
  84. */
  85. int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
  86. {
  87. struct pt_regs *regs;
  88. int i;
  89. if (!access_ok(data, 38 * 8))
  90. return -EIO;
  91. regs = task_pt_regs(child);
  92. for (i = 0; i < 32; i++)
  93. __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
  94. __get_user(regs->lo, (__s64 __user *)&data->lo);
  95. __get_user(regs->hi, (__s64 __user *)&data->hi);
  96. __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  97. /* badvaddr, status, and cause may not be written. */
  98. /* System call number may have been changed */
  99. mips_syscall_update_nr(child, regs);
  100. return 0;
  101. }
  102. int ptrace_get_watch_regs(struct task_struct *child,
  103. struct pt_watch_regs __user *addr)
  104. {
  105. enum pt_watch_style style;
  106. int i;
  107. if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
  108. return -EIO;
  109. if (!access_ok(addr, sizeof(struct pt_watch_regs)))
  110. return -EIO;
  111. #ifdef CONFIG_32BIT
  112. style = pt_watch_style_mips32;
  113. #define WATCH_STYLE mips32
  114. #else
  115. style = pt_watch_style_mips64;
  116. #define WATCH_STYLE mips64
  117. #endif
  118. __put_user(style, &addr->style);
  119. __put_user(boot_cpu_data.watch_reg_use_cnt,
  120. &addr->WATCH_STYLE.num_valid);
  121. for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
  122. __put_user(child->thread.watch.mips3264.watchlo[i],
  123. &addr->WATCH_STYLE.watchlo[i]);
  124. __put_user(child->thread.watch.mips3264.watchhi[i] &
  125. (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
  126. &addr->WATCH_STYLE.watchhi[i]);
  127. __put_user(boot_cpu_data.watch_reg_masks[i],
  128. &addr->WATCH_STYLE.watch_masks[i]);
  129. }
  130. for (; i < 8; i++) {
  131. __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
  132. __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
  133. __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
  134. }
  135. return 0;
  136. }
  137. int ptrace_set_watch_regs(struct task_struct *child,
  138. struct pt_watch_regs __user *addr)
  139. {
  140. int i;
  141. int watch_active = 0;
  142. unsigned long lt[NUM_WATCH_REGS];
  143. u16 ht[NUM_WATCH_REGS];
  144. if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
  145. return -EIO;
  146. if (!access_ok(addr, sizeof(struct pt_watch_regs)))
  147. return -EIO;
  148. /* Check the values. */
  149. for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
  150. __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
  151. #ifdef CONFIG_32BIT
  152. if (lt[i] & __UA_LIMIT)
  153. return -EINVAL;
  154. #else
  155. if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
  156. if (lt[i] & 0xffffffff80000000UL)
  157. return -EINVAL;
  158. } else {
  159. if (lt[i] & __UA_LIMIT)
  160. return -EINVAL;
  161. }
  162. #endif
  163. __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
  164. if (ht[i] & ~MIPS_WATCHHI_MASK)
  165. return -EINVAL;
  166. }
  167. /* Install them. */
  168. for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
  169. if (lt[i] & MIPS_WATCHLO_IRW)
  170. watch_active = 1;
  171. child->thread.watch.mips3264.watchlo[i] = lt[i];
  172. /* Set the G bit. */
  173. child->thread.watch.mips3264.watchhi[i] = ht[i];
  174. }
  175. if (watch_active)
  176. set_tsk_thread_flag(child, TIF_LOAD_WATCH);
  177. else
  178. clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  179. return 0;
  180. }
  181. /* regset get/set implementations */
  182. #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
  183. static int gpr32_get(struct task_struct *target,
  184. const struct user_regset *regset,
  185. struct membuf to)
  186. {
  187. struct pt_regs *regs = task_pt_regs(target);
  188. u32 uregs[ELF_NGREG] = {};
  189. mips_dump_regs32(uregs, regs);
  190. return membuf_write(&to, uregs, sizeof(uregs));
  191. }
  192. static int gpr32_set(struct task_struct *target,
  193. const struct user_regset *regset,
  194. unsigned int pos, unsigned int count,
  195. const void *kbuf, const void __user *ubuf)
  196. {
  197. struct pt_regs *regs = task_pt_regs(target);
  198. u32 uregs[ELF_NGREG];
  199. unsigned start, num_regs, i;
  200. int err;
  201. start = pos / sizeof(u32);
  202. num_regs = count / sizeof(u32);
  203. if (start + num_regs > ELF_NGREG)
  204. return -EIO;
  205. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  206. sizeof(uregs));
  207. if (err)
  208. return err;
  209. for (i = start; i < num_regs; i++) {
  210. /*
  211. * Cast all values to signed here so that if this is a 64-bit
  212. * kernel, the supplied 32-bit values will be sign extended.
  213. */
  214. switch (i) {
  215. case MIPS32_EF_R1 ... MIPS32_EF_R25:
  216. /* k0/k1 are ignored. */
  217. case MIPS32_EF_R28 ... MIPS32_EF_R31:
  218. regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
  219. break;
  220. case MIPS32_EF_LO:
  221. regs->lo = (s32)uregs[i];
  222. break;
  223. case MIPS32_EF_HI:
  224. regs->hi = (s32)uregs[i];
  225. break;
  226. case MIPS32_EF_CP0_EPC:
  227. regs->cp0_epc = (s32)uregs[i];
  228. break;
  229. }
  230. }
  231. /* System call number may have been changed */
  232. mips_syscall_update_nr(target, regs);
  233. return 0;
  234. }
  235. #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
  236. #ifdef CONFIG_64BIT
  237. static int gpr64_get(struct task_struct *target,
  238. const struct user_regset *regset,
  239. struct membuf to)
  240. {
  241. struct pt_regs *regs = task_pt_regs(target);
  242. u64 uregs[ELF_NGREG] = {};
  243. mips_dump_regs64(uregs, regs);
  244. return membuf_write(&to, uregs, sizeof(uregs));
  245. }
  246. static int gpr64_set(struct task_struct *target,
  247. const struct user_regset *regset,
  248. unsigned int pos, unsigned int count,
  249. const void *kbuf, const void __user *ubuf)
  250. {
  251. struct pt_regs *regs = task_pt_regs(target);
  252. u64 uregs[ELF_NGREG];
  253. unsigned start, num_regs, i;
  254. int err;
  255. start = pos / sizeof(u64);
  256. num_regs = count / sizeof(u64);
  257. if (start + num_regs > ELF_NGREG)
  258. return -EIO;
  259. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  260. sizeof(uregs));
  261. if (err)
  262. return err;
  263. for (i = start; i < num_regs; i++) {
  264. switch (i) {
  265. case MIPS64_EF_R1 ... MIPS64_EF_R25:
  266. /* k0/k1 are ignored. */
  267. case MIPS64_EF_R28 ... MIPS64_EF_R31:
  268. regs->regs[i - MIPS64_EF_R0] = uregs[i];
  269. break;
  270. case MIPS64_EF_LO:
  271. regs->lo = uregs[i];
  272. break;
  273. case MIPS64_EF_HI:
  274. regs->hi = uregs[i];
  275. break;
  276. case MIPS64_EF_CP0_EPC:
  277. regs->cp0_epc = uregs[i];
  278. break;
  279. }
  280. }
  281. /* System call number may have been changed */
  282. mips_syscall_update_nr(target, regs);
  283. return 0;
  284. }
  285. #endif /* CONFIG_64BIT */
  286. #ifdef CONFIG_MIPS_FP_SUPPORT
  287. /*
  288. * Poke at FCSR according to its mask. Set the Cause bits even
  289. * if a corresponding Enable bit is set. This will be noticed at
  290. * the time the thread is switched to and SIGFPE thrown accordingly.
  291. */
  292. static void ptrace_setfcr31(struct task_struct *child, u32 value)
  293. {
  294. u32 fcr31;
  295. u32 mask;
  296. fcr31 = child->thread.fpu.fcr31;
  297. mask = boot_cpu_data.fpu_msk31;
  298. child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
  299. }
  300. int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
  301. {
  302. int i;
  303. if (!access_ok(data, 33 * 8))
  304. return -EIO;
  305. if (tsk_used_math(child)) {
  306. union fpureg *fregs = get_fpu_regs(child);
  307. for (i = 0; i < 32; i++)
  308. __put_user(get_fpr64(&fregs[i], 0),
  309. i + (__u64 __user *)data);
  310. } else {
  311. for (i = 0; i < 32; i++)
  312. __put_user((__u64) -1, i + (__u64 __user *) data);
  313. }
  314. __put_user(child->thread.fpu.fcr31, data + 64);
  315. __put_user(boot_cpu_data.fpu_id, data + 65);
  316. return 0;
  317. }
  318. int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
  319. {
  320. union fpureg *fregs;
  321. u64 fpr_val;
  322. u32 value;
  323. int i;
  324. if (!access_ok(data, 33 * 8))
  325. return -EIO;
  326. init_fp_ctx(child);
  327. fregs = get_fpu_regs(child);
  328. for (i = 0; i < 32; i++) {
  329. __get_user(fpr_val, i + (__u64 __user *)data);
  330. set_fpr64(&fregs[i], 0, fpr_val);
  331. }
  332. __get_user(value, data + 64);
  333. ptrace_setfcr31(child, value);
  334. /* FIR may not be written. */
  335. return 0;
  336. }
  337. /*
  338. * Copy the floating-point context to the supplied NT_PRFPREG buffer,
  339. * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
  340. * correspond 1:1 to buffer slots. Only general registers are copied.
  341. */
  342. static void fpr_get_fpa(struct task_struct *target,
  343. struct membuf *to)
  344. {
  345. membuf_write(to, &target->thread.fpu,
  346. NUM_FPU_REGS * sizeof(elf_fpreg_t));
  347. }
  348. /*
  349. * Copy the floating-point context to the supplied NT_PRFPREG buffer,
  350. * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
  351. * general register slots are copied to buffer slots. Only general
  352. * registers are copied.
  353. */
  354. static void fpr_get_msa(struct task_struct *target, struct membuf *to)
  355. {
  356. unsigned int i;
  357. BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
  358. for (i = 0; i < NUM_FPU_REGS; i++)
  359. membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
  360. }
  361. /*
  362. * Copy the floating-point context to the supplied NT_PRFPREG buffer.
  363. * Choose the appropriate helper for general registers, and then copy
  364. * the FCSR and FIR registers separately.
  365. */
  366. static int fpr_get(struct task_struct *target,
  367. const struct user_regset *regset,
  368. struct membuf to)
  369. {
  370. if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
  371. fpr_get_fpa(target, &to);
  372. else
  373. fpr_get_msa(target, &to);
  374. membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
  375. membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
  376. return 0;
  377. }
  378. /*
  379. * Copy the supplied NT_PRFPREG buffer to the floating-point context,
  380. * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
  381. * context's general register slots. Only general registers are copied.
  382. */
  383. static int fpr_set_fpa(struct task_struct *target,
  384. unsigned int *pos, unsigned int *count,
  385. const void **kbuf, const void __user **ubuf)
  386. {
  387. return user_regset_copyin(pos, count, kbuf, ubuf,
  388. &target->thread.fpu,
  389. 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
  390. }
  391. /*
  392. * Copy the supplied NT_PRFPREG buffer to the floating-point context,
  393. * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
  394. * bits only of FP context's general register slots. Only general
  395. * registers are copied.
  396. */
  397. static int fpr_set_msa(struct task_struct *target,
  398. unsigned int *pos, unsigned int *count,
  399. const void **kbuf, const void __user **ubuf)
  400. {
  401. unsigned int i;
  402. u64 fpr_val;
  403. int err;
  404. BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
  405. for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
  406. err = user_regset_copyin(pos, count, kbuf, ubuf,
  407. &fpr_val, i * sizeof(elf_fpreg_t),
  408. (i + 1) * sizeof(elf_fpreg_t));
  409. if (err)
  410. return err;
  411. set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
  412. }
  413. return 0;
  414. }
  415. /*
  416. * Copy the supplied NT_PRFPREG buffer to the floating-point context.
  417. * Choose the appropriate helper for general registers, and then copy
  418. * the FCSR register separately. Ignore the incoming FIR register
  419. * contents though, as the register is read-only.
  420. *
  421. * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
  422. * which is supposed to have been guaranteed by the kernel before
  423. * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
  424. * so that we can safely avoid preinitializing temporaries for
  425. * partial register writes.
  426. */
  427. static int fpr_set(struct task_struct *target,
  428. const struct user_regset *regset,
  429. unsigned int pos, unsigned int count,
  430. const void *kbuf, const void __user *ubuf)
  431. {
  432. const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
  433. const int fir_pos = fcr31_pos + sizeof(u32);
  434. u32 fcr31;
  435. int err;
  436. BUG_ON(count % sizeof(elf_fpreg_t));
  437. if (pos + count > sizeof(elf_fpregset_t))
  438. return -EIO;
  439. init_fp_ctx(target);
  440. if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
  441. err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
  442. else
  443. err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
  444. if (err)
  445. return err;
  446. if (count > 0) {
  447. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  448. &fcr31,
  449. fcr31_pos, fcr31_pos + sizeof(u32));
  450. if (err)
  451. return err;
  452. ptrace_setfcr31(target, fcr31);
  453. }
  454. if (count > 0)
  455. err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  456. fir_pos,
  457. fir_pos + sizeof(u32));
  458. return err;
  459. }
  460. /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
  461. static int fp_mode_get(struct task_struct *target,
  462. const struct user_regset *regset,
  463. struct membuf to)
  464. {
  465. return membuf_store(&to, (int)mips_get_process_fp_mode(target));
  466. }
  467. /*
  468. * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
  469. *
  470. * We optimize for the case where `count % sizeof(int) == 0', which
  471. * is supposed to have been guaranteed by the kernel before calling
  472. * us, e.g. in `ptrace_regset'. We enforce that requirement, so
  473. * that we can safely avoid preinitializing temporaries for partial
  474. * mode writes.
  475. */
  476. static int fp_mode_set(struct task_struct *target,
  477. const struct user_regset *regset,
  478. unsigned int pos, unsigned int count,
  479. const void *kbuf, const void __user *ubuf)
  480. {
  481. int fp_mode;
  482. int err;
  483. BUG_ON(count % sizeof(int));
  484. if (pos + count > sizeof(fp_mode))
  485. return -EIO;
  486. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
  487. sizeof(fp_mode));
  488. if (err)
  489. return err;
  490. if (count > 0)
  491. err = mips_set_process_fp_mode(target, fp_mode);
  492. return err;
  493. }
  494. #endif /* CONFIG_MIPS_FP_SUPPORT */
  495. #ifdef CONFIG_CPU_HAS_MSA
  496. struct msa_control_regs {
  497. unsigned int fir;
  498. unsigned int fcsr;
  499. unsigned int msair;
  500. unsigned int msacsr;
  501. };
  502. static void copy_pad_fprs(struct task_struct *target,
  503. const struct user_regset *regset,
  504. struct membuf *to,
  505. unsigned int live_sz)
  506. {
  507. int i, j;
  508. unsigned long long fill = ~0ull;
  509. unsigned int cp_sz, pad_sz;
  510. cp_sz = min(regset->size, live_sz);
  511. pad_sz = regset->size - cp_sz;
  512. WARN_ON(pad_sz % sizeof(fill));
  513. for (i = 0; i < NUM_FPU_REGS; i++) {
  514. membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
  515. for (j = 0; j < (pad_sz / sizeof(fill)); j++)
  516. membuf_store(to, fill);
  517. }
  518. }
  519. static int msa_get(struct task_struct *target,
  520. const struct user_regset *regset,
  521. struct membuf to)
  522. {
  523. const unsigned int wr_size = NUM_FPU_REGS * regset->size;
  524. const struct msa_control_regs ctrl_regs = {
  525. .fir = boot_cpu_data.fpu_id,
  526. .fcsr = target->thread.fpu.fcr31,
  527. .msair = boot_cpu_data.msa_id,
  528. .msacsr = target->thread.fpu.msacsr,
  529. };
  530. if (!tsk_used_math(target)) {
  531. /* The task hasn't used FP or MSA, fill with 0xff */
  532. copy_pad_fprs(target, regset, &to, 0);
  533. } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
  534. /* Copy scalar FP context, fill the rest with 0xff */
  535. copy_pad_fprs(target, regset, &to, 8);
  536. } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
  537. /* Trivially copy the vector registers */
  538. membuf_write(&to, &target->thread.fpu.fpr, wr_size);
  539. } else {
  540. /* Copy as much context as possible, fill the rest with 0xff */
  541. copy_pad_fprs(target, regset, &to,
  542. sizeof(target->thread.fpu.fpr[0]));
  543. }
  544. return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
  545. }
  546. static int msa_set(struct task_struct *target,
  547. const struct user_regset *regset,
  548. unsigned int pos, unsigned int count,
  549. const void *kbuf, const void __user *ubuf)
  550. {
  551. const unsigned int wr_size = NUM_FPU_REGS * regset->size;
  552. struct msa_control_regs ctrl_regs;
  553. unsigned int cp_sz;
  554. int i, err, start;
  555. init_fp_ctx(target);
  556. if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
  557. /* Trivially copy the vector registers */
  558. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  559. &target->thread.fpu.fpr,
  560. 0, wr_size);
  561. } else {
  562. /* Copy as much context as possible */
  563. cp_sz = min_t(unsigned int, regset->size,
  564. sizeof(target->thread.fpu.fpr[0]));
  565. i = start = err = 0;
  566. for (; i < NUM_FPU_REGS; i++, start += regset->size) {
  567. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  568. &target->thread.fpu.fpr[i],
  569. start, start + cp_sz);
  570. }
  571. }
  572. if (!err)
  573. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
  574. wr_size, wr_size + sizeof(ctrl_regs));
  575. if (!err) {
  576. target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
  577. target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
  578. }
  579. return err;
  580. }
  581. #endif /* CONFIG_CPU_HAS_MSA */
  582. #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
  583. /*
  584. * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
  585. */
  586. static int dsp32_get(struct task_struct *target,
  587. const struct user_regset *regset,
  588. struct membuf to)
  589. {
  590. u32 dspregs[NUM_DSP_REGS + 1];
  591. unsigned int i;
  592. BUG_ON(to.left % sizeof(u32));
  593. if (!cpu_has_dsp)
  594. return -EIO;
  595. for (i = 0; i < NUM_DSP_REGS; i++)
  596. dspregs[i] = target->thread.dsp.dspr[i];
  597. dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
  598. return membuf_write(&to, dspregs, sizeof(dspregs));
  599. }
  600. /*
  601. * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
  602. */
  603. static int dsp32_set(struct task_struct *target,
  604. const struct user_regset *regset,
  605. unsigned int pos, unsigned int count,
  606. const void *kbuf, const void __user *ubuf)
  607. {
  608. unsigned int start, num_regs, i;
  609. u32 dspregs[NUM_DSP_REGS + 1];
  610. int err;
  611. BUG_ON(count % sizeof(u32));
  612. if (!cpu_has_dsp)
  613. return -EIO;
  614. start = pos / sizeof(u32);
  615. num_regs = count / sizeof(u32);
  616. if (start + num_regs > NUM_DSP_REGS + 1)
  617. return -EIO;
  618. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
  619. sizeof(dspregs));
  620. if (err)
  621. return err;
  622. for (i = start; i < num_regs; i++)
  623. switch (i) {
  624. case 0 ... NUM_DSP_REGS - 1:
  625. target->thread.dsp.dspr[i] = (s32)dspregs[i];
  626. break;
  627. case NUM_DSP_REGS:
  628. target->thread.dsp.dspcontrol = (s32)dspregs[i];
  629. break;
  630. }
  631. return 0;
  632. }
  633. #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
  634. #ifdef CONFIG_64BIT
  635. /*
  636. * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
  637. */
  638. static int dsp64_get(struct task_struct *target,
  639. const struct user_regset *regset,
  640. struct membuf to)
  641. {
  642. u64 dspregs[NUM_DSP_REGS + 1];
  643. unsigned int i;
  644. BUG_ON(to.left % sizeof(u64));
  645. if (!cpu_has_dsp)
  646. return -EIO;
  647. for (i = 0; i < NUM_DSP_REGS; i++)
  648. dspregs[i] = target->thread.dsp.dspr[i];
  649. dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
  650. return membuf_write(&to, dspregs, sizeof(dspregs));
  651. }
  652. /*
  653. * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
  654. */
  655. static int dsp64_set(struct task_struct *target,
  656. const struct user_regset *regset,
  657. unsigned int pos, unsigned int count,
  658. const void *kbuf, const void __user *ubuf)
  659. {
  660. unsigned int start, num_regs, i;
  661. u64 dspregs[NUM_DSP_REGS + 1];
  662. int err;
  663. BUG_ON(count % sizeof(u64));
  664. if (!cpu_has_dsp)
  665. return -EIO;
  666. start = pos / sizeof(u64);
  667. num_regs = count / sizeof(u64);
  668. if (start + num_regs > NUM_DSP_REGS + 1)
  669. return -EIO;
  670. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
  671. sizeof(dspregs));
  672. if (err)
  673. return err;
  674. for (i = start; i < num_regs; i++)
  675. switch (i) {
  676. case 0 ... NUM_DSP_REGS - 1:
  677. target->thread.dsp.dspr[i] = dspregs[i];
  678. break;
  679. case NUM_DSP_REGS:
  680. target->thread.dsp.dspcontrol = dspregs[i];
  681. break;
  682. }
  683. return 0;
  684. }
  685. #endif /* CONFIG_64BIT */
  686. /*
  687. * Determine whether the DSP context is present.
  688. */
  689. static int dsp_active(struct task_struct *target,
  690. const struct user_regset *regset)
  691. {
  692. return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
  693. }
  694. enum mips_regset {
  695. REGSET_GPR,
  696. REGSET_DSP,
  697. #ifdef CONFIG_MIPS_FP_SUPPORT
  698. REGSET_FPR,
  699. REGSET_FP_MODE,
  700. #endif
  701. #ifdef CONFIG_CPU_HAS_MSA
  702. REGSET_MSA,
  703. #endif
  704. };
  705. struct pt_regs_offset {
  706. const char *name;
  707. int offset;
  708. };
  709. #define REG_OFFSET_NAME(reg, r) { \
  710. .name = #reg, \
  711. .offset = offsetof(struct pt_regs, r) \
  712. }
  713. #define REG_OFFSET_END { \
  714. .name = NULL, \
  715. .offset = 0 \
  716. }
  717. static const struct pt_regs_offset regoffset_table[] = {
  718. REG_OFFSET_NAME(r0, regs[0]),
  719. REG_OFFSET_NAME(r1, regs[1]),
  720. REG_OFFSET_NAME(r2, regs[2]),
  721. REG_OFFSET_NAME(r3, regs[3]),
  722. REG_OFFSET_NAME(r4, regs[4]),
  723. REG_OFFSET_NAME(r5, regs[5]),
  724. REG_OFFSET_NAME(r6, regs[6]),
  725. REG_OFFSET_NAME(r7, regs[7]),
  726. REG_OFFSET_NAME(r8, regs[8]),
  727. REG_OFFSET_NAME(r9, regs[9]),
  728. REG_OFFSET_NAME(r10, regs[10]),
  729. REG_OFFSET_NAME(r11, regs[11]),
  730. REG_OFFSET_NAME(r12, regs[12]),
  731. REG_OFFSET_NAME(r13, regs[13]),
  732. REG_OFFSET_NAME(r14, regs[14]),
  733. REG_OFFSET_NAME(r15, regs[15]),
  734. REG_OFFSET_NAME(r16, regs[16]),
  735. REG_OFFSET_NAME(r17, regs[17]),
  736. REG_OFFSET_NAME(r18, regs[18]),
  737. REG_OFFSET_NAME(r19, regs[19]),
  738. REG_OFFSET_NAME(r20, regs[20]),
  739. REG_OFFSET_NAME(r21, regs[21]),
  740. REG_OFFSET_NAME(r22, regs[22]),
  741. REG_OFFSET_NAME(r23, regs[23]),
  742. REG_OFFSET_NAME(r24, regs[24]),
  743. REG_OFFSET_NAME(r25, regs[25]),
  744. REG_OFFSET_NAME(r26, regs[26]),
  745. REG_OFFSET_NAME(r27, regs[27]),
  746. REG_OFFSET_NAME(r28, regs[28]),
  747. REG_OFFSET_NAME(r29, regs[29]),
  748. REG_OFFSET_NAME(r30, regs[30]),
  749. REG_OFFSET_NAME(r31, regs[31]),
  750. REG_OFFSET_NAME(c0_status, cp0_status),
  751. REG_OFFSET_NAME(hi, hi),
  752. REG_OFFSET_NAME(lo, lo),
  753. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  754. REG_OFFSET_NAME(acx, acx),
  755. #endif
  756. REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
  757. REG_OFFSET_NAME(c0_cause, cp0_cause),
  758. REG_OFFSET_NAME(c0_epc, cp0_epc),
  759. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  760. REG_OFFSET_NAME(mpl0, mpl[0]),
  761. REG_OFFSET_NAME(mpl1, mpl[1]),
  762. REG_OFFSET_NAME(mpl2, mpl[2]),
  763. REG_OFFSET_NAME(mtp0, mtp[0]),
  764. REG_OFFSET_NAME(mtp1, mtp[1]),
  765. REG_OFFSET_NAME(mtp2, mtp[2]),
  766. #endif
  767. REG_OFFSET_END,
  768. };
  769. /**
  770. * regs_query_register_offset() - query register offset from its name
  771. * @name: the name of a register
  772. *
  773. * regs_query_register_offset() returns the offset of a register in struct
  774. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  775. */
  776. int regs_query_register_offset(const char *name)
  777. {
  778. const struct pt_regs_offset *roff;
  779. for (roff = regoffset_table; roff->name != NULL; roff++)
  780. if (!strcmp(roff->name, name))
  781. return roff->offset;
  782. return -EINVAL;
  783. }
  784. #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
  785. static const struct user_regset mips_regsets[] = {
  786. [REGSET_GPR] = {
  787. .core_note_type = NT_PRSTATUS,
  788. .n = ELF_NGREG,
  789. .size = sizeof(unsigned int),
  790. .align = sizeof(unsigned int),
  791. .regset_get = gpr32_get,
  792. .set = gpr32_set,
  793. },
  794. [REGSET_DSP] = {
  795. .core_note_type = NT_MIPS_DSP,
  796. .n = NUM_DSP_REGS + 1,
  797. .size = sizeof(u32),
  798. .align = sizeof(u32),
  799. .regset_get = dsp32_get,
  800. .set = dsp32_set,
  801. .active = dsp_active,
  802. },
  803. #ifdef CONFIG_MIPS_FP_SUPPORT
  804. [REGSET_FPR] = {
  805. .core_note_type = NT_PRFPREG,
  806. .n = ELF_NFPREG,
  807. .size = sizeof(elf_fpreg_t),
  808. .align = sizeof(elf_fpreg_t),
  809. .regset_get = fpr_get,
  810. .set = fpr_set,
  811. },
  812. [REGSET_FP_MODE] = {
  813. .core_note_type = NT_MIPS_FP_MODE,
  814. .n = 1,
  815. .size = sizeof(int),
  816. .align = sizeof(int),
  817. .regset_get = fp_mode_get,
  818. .set = fp_mode_set,
  819. },
  820. #endif
  821. #ifdef CONFIG_CPU_HAS_MSA
  822. [REGSET_MSA] = {
  823. .core_note_type = NT_MIPS_MSA,
  824. .n = NUM_FPU_REGS + 1,
  825. .size = 16,
  826. .align = 16,
  827. .regset_get = msa_get,
  828. .set = msa_set,
  829. },
  830. #endif
  831. };
  832. static const struct user_regset_view user_mips_view = {
  833. .name = "mips",
  834. .e_machine = ELF_ARCH,
  835. .ei_osabi = ELF_OSABI,
  836. .regsets = mips_regsets,
  837. .n = ARRAY_SIZE(mips_regsets),
  838. };
  839. #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
  840. #ifdef CONFIG_64BIT
  841. static const struct user_regset mips64_regsets[] = {
  842. [REGSET_GPR] = {
  843. .core_note_type = NT_PRSTATUS,
  844. .n = ELF_NGREG,
  845. .size = sizeof(unsigned long),
  846. .align = sizeof(unsigned long),
  847. .regset_get = gpr64_get,
  848. .set = gpr64_set,
  849. },
  850. [REGSET_DSP] = {
  851. .core_note_type = NT_MIPS_DSP,
  852. .n = NUM_DSP_REGS + 1,
  853. .size = sizeof(u64),
  854. .align = sizeof(u64),
  855. .regset_get = dsp64_get,
  856. .set = dsp64_set,
  857. .active = dsp_active,
  858. },
  859. #ifdef CONFIG_MIPS_FP_SUPPORT
  860. [REGSET_FP_MODE] = {
  861. .core_note_type = NT_MIPS_FP_MODE,
  862. .n = 1,
  863. .size = sizeof(int),
  864. .align = sizeof(int),
  865. .regset_get = fp_mode_get,
  866. .set = fp_mode_set,
  867. },
  868. [REGSET_FPR] = {
  869. .core_note_type = NT_PRFPREG,
  870. .n = ELF_NFPREG,
  871. .size = sizeof(elf_fpreg_t),
  872. .align = sizeof(elf_fpreg_t),
  873. .regset_get = fpr_get,
  874. .set = fpr_set,
  875. },
  876. #endif
  877. #ifdef CONFIG_CPU_HAS_MSA
  878. [REGSET_MSA] = {
  879. .core_note_type = NT_MIPS_MSA,
  880. .n = NUM_FPU_REGS + 1,
  881. .size = 16,
  882. .align = 16,
  883. .regset_get = msa_get,
  884. .set = msa_set,
  885. },
  886. #endif
  887. };
  888. static const struct user_regset_view user_mips64_view = {
  889. .name = "mips64",
  890. .e_machine = ELF_ARCH,
  891. .ei_osabi = ELF_OSABI,
  892. .regsets = mips64_regsets,
  893. .n = ARRAY_SIZE(mips64_regsets),
  894. };
  895. #ifdef CONFIG_MIPS32_N32
  896. static const struct user_regset_view user_mipsn32_view = {
  897. .name = "mipsn32",
  898. .e_flags = EF_MIPS_ABI2,
  899. .e_machine = ELF_ARCH,
  900. .ei_osabi = ELF_OSABI,
  901. .regsets = mips64_regsets,
  902. .n = ARRAY_SIZE(mips64_regsets),
  903. };
  904. #endif /* CONFIG_MIPS32_N32 */
  905. #endif /* CONFIG_64BIT */
  906. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  907. {
  908. #ifdef CONFIG_32BIT
  909. return &user_mips_view;
  910. #else
  911. #ifdef CONFIG_MIPS32_O32
  912. if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
  913. return &user_mips_view;
  914. #endif
  915. #ifdef CONFIG_MIPS32_N32
  916. if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
  917. return &user_mipsn32_view;
  918. #endif
  919. return &user_mips64_view;
  920. #endif
  921. }
  922. long arch_ptrace(struct task_struct *child, long request,
  923. unsigned long addr, unsigned long data)
  924. {
  925. int ret;
  926. void __user *addrp = (void __user *) addr;
  927. void __user *datavp = (void __user *) data;
  928. unsigned long __user *datalp = (void __user *) data;
  929. switch (request) {
  930. /* when I and D space are separate, these will need to be fixed. */
  931. case PTRACE_PEEKTEXT: /* read word at location addr. */
  932. case PTRACE_PEEKDATA:
  933. ret = generic_ptrace_peekdata(child, addr, data);
  934. break;
  935. /* Read the word at location addr in the USER area. */
  936. case PTRACE_PEEKUSR: {
  937. struct pt_regs *regs;
  938. unsigned long tmp = 0;
  939. regs = task_pt_regs(child);
  940. ret = 0; /* Default return value. */
  941. switch (addr) {
  942. case 0 ... 31:
  943. tmp = regs->regs[addr];
  944. break;
  945. #ifdef CONFIG_MIPS_FP_SUPPORT
  946. case FPR_BASE ... FPR_BASE + 31: {
  947. union fpureg *fregs;
  948. if (!tsk_used_math(child)) {
  949. /* FP not yet used */
  950. tmp = -1;
  951. break;
  952. }
  953. fregs = get_fpu_regs(child);
  954. #ifdef CONFIG_32BIT
  955. if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
  956. /*
  957. * The odd registers are actually the high
  958. * order bits of the values stored in the even
  959. * registers.
  960. */
  961. tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
  962. addr & 1);
  963. break;
  964. }
  965. #endif
  966. tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
  967. break;
  968. }
  969. case FPC_CSR:
  970. tmp = child->thread.fpu.fcr31;
  971. break;
  972. case FPC_EIR:
  973. /* implementation / version register */
  974. tmp = boot_cpu_data.fpu_id;
  975. break;
  976. #endif
  977. case PC:
  978. tmp = regs->cp0_epc;
  979. break;
  980. case CAUSE:
  981. tmp = regs->cp0_cause;
  982. break;
  983. case BADVADDR:
  984. tmp = regs->cp0_badvaddr;
  985. break;
  986. case MMHI:
  987. tmp = regs->hi;
  988. break;
  989. case MMLO:
  990. tmp = regs->lo;
  991. break;
  992. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  993. case ACX:
  994. tmp = regs->acx;
  995. break;
  996. #endif
  997. case DSP_BASE ... DSP_BASE + 5: {
  998. dspreg_t *dregs;
  999. if (!cpu_has_dsp) {
  1000. tmp = 0;
  1001. ret = -EIO;
  1002. goto out;
  1003. }
  1004. dregs = __get_dsp_regs(child);
  1005. tmp = dregs[addr - DSP_BASE];
  1006. break;
  1007. }
  1008. case DSP_CONTROL:
  1009. if (!cpu_has_dsp) {
  1010. tmp = 0;
  1011. ret = -EIO;
  1012. goto out;
  1013. }
  1014. tmp = child->thread.dsp.dspcontrol;
  1015. break;
  1016. default:
  1017. tmp = 0;
  1018. ret = -EIO;
  1019. goto out;
  1020. }
  1021. ret = put_user(tmp, datalp);
  1022. break;
  1023. }
  1024. /* when I and D space are separate, this will have to be fixed. */
  1025. case PTRACE_POKETEXT: /* write the word at location addr. */
  1026. case PTRACE_POKEDATA:
  1027. ret = generic_ptrace_pokedata(child, addr, data);
  1028. break;
  1029. case PTRACE_POKEUSR: {
  1030. struct pt_regs *regs;
  1031. ret = 0;
  1032. regs = task_pt_regs(child);
  1033. switch (addr) {
  1034. case 0 ... 31:
  1035. regs->regs[addr] = data;
  1036. /* System call number may have been changed */
  1037. if (addr == 2)
  1038. mips_syscall_update_nr(child, regs);
  1039. else if (addr == 4 &&
  1040. mips_syscall_is_indirect(child, regs))
  1041. mips_syscall_update_nr(child, regs);
  1042. break;
  1043. #ifdef CONFIG_MIPS_FP_SUPPORT
  1044. case FPR_BASE ... FPR_BASE + 31: {
  1045. union fpureg *fregs = get_fpu_regs(child);
  1046. init_fp_ctx(child);
  1047. #ifdef CONFIG_32BIT
  1048. if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
  1049. /*
  1050. * The odd registers are actually the high
  1051. * order bits of the values stored in the even
  1052. * registers.
  1053. */
  1054. set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
  1055. addr & 1, data);
  1056. break;
  1057. }
  1058. #endif
  1059. set_fpr64(&fregs[addr - FPR_BASE], 0, data);
  1060. break;
  1061. }
  1062. case FPC_CSR:
  1063. init_fp_ctx(child);
  1064. ptrace_setfcr31(child, data);
  1065. break;
  1066. #endif
  1067. case PC:
  1068. regs->cp0_epc = data;
  1069. break;
  1070. case MMHI:
  1071. regs->hi = data;
  1072. break;
  1073. case MMLO:
  1074. regs->lo = data;
  1075. break;
  1076. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  1077. case ACX:
  1078. regs->acx = data;
  1079. break;
  1080. #endif
  1081. case DSP_BASE ... DSP_BASE + 5: {
  1082. dspreg_t *dregs;
  1083. if (!cpu_has_dsp) {
  1084. ret = -EIO;
  1085. break;
  1086. }
  1087. dregs = __get_dsp_regs(child);
  1088. dregs[addr - DSP_BASE] = data;
  1089. break;
  1090. }
  1091. case DSP_CONTROL:
  1092. if (!cpu_has_dsp) {
  1093. ret = -EIO;
  1094. break;
  1095. }
  1096. child->thread.dsp.dspcontrol = data;
  1097. break;
  1098. default:
  1099. /* The rest are not allowed. */
  1100. ret = -EIO;
  1101. break;
  1102. }
  1103. break;
  1104. }
  1105. case PTRACE_GETREGS:
  1106. ret = ptrace_getregs(child, datavp);
  1107. break;
  1108. case PTRACE_SETREGS:
  1109. ret = ptrace_setregs(child, datavp);
  1110. break;
  1111. #ifdef CONFIG_MIPS_FP_SUPPORT
  1112. case PTRACE_GETFPREGS:
  1113. ret = ptrace_getfpregs(child, datavp);
  1114. break;
  1115. case PTRACE_SETFPREGS:
  1116. ret = ptrace_setfpregs(child, datavp);
  1117. break;
  1118. #endif
  1119. case PTRACE_GET_THREAD_AREA:
  1120. ret = put_user(task_thread_info(child)->tp_value, datalp);
  1121. break;
  1122. case PTRACE_GET_WATCH_REGS:
  1123. ret = ptrace_get_watch_regs(child, addrp);
  1124. break;
  1125. case PTRACE_SET_WATCH_REGS:
  1126. ret = ptrace_set_watch_regs(child, addrp);
  1127. break;
  1128. default:
  1129. ret = ptrace_request(child, request, addr, data);
  1130. break;
  1131. }
  1132. out:
  1133. return ret;
  1134. }
  1135. /*
  1136. * Notification of system call entry/exit
  1137. * - triggered by current->work.syscall_trace
  1138. */
  1139. asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
  1140. {
  1141. user_exit();
  1142. current_thread_info()->syscall = syscall;
  1143. if (test_thread_flag(TIF_SYSCALL_TRACE)) {
  1144. if (ptrace_report_syscall_entry(regs))
  1145. return -1;
  1146. syscall = current_thread_info()->syscall;
  1147. }
  1148. #ifdef CONFIG_SECCOMP
  1149. if (unlikely(test_thread_flag(TIF_SECCOMP))) {
  1150. int ret, i;
  1151. struct seccomp_data sd;
  1152. unsigned long args[6];
  1153. sd.nr = syscall;
  1154. sd.arch = syscall_get_arch(current);
  1155. syscall_get_arguments(current, regs, args);
  1156. for (i = 0; i < 6; i++)
  1157. sd.args[i] = args[i];
  1158. sd.instruction_pointer = KSTK_EIP(current);
  1159. ret = __secure_computing(&sd);
  1160. if (ret == -1)
  1161. return ret;
  1162. syscall = current_thread_info()->syscall;
  1163. }
  1164. #endif
  1165. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  1166. trace_sys_enter(regs, regs->regs[2]);
  1167. audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
  1168. regs->regs[6], regs->regs[7]);
  1169. /*
  1170. * Negative syscall numbers are mistaken for rejected syscalls, but
  1171. * won't have had the return value set appropriately, so we do so now.
  1172. */
  1173. if (syscall < 0)
  1174. syscall_set_return_value(current, regs, -ENOSYS, 0);
  1175. return syscall;
  1176. }
  1177. /*
  1178. * Notification of system call entry/exit
  1179. * - triggered by current->work.syscall_trace
  1180. */
  1181. asmlinkage void syscall_trace_leave(struct pt_regs *regs)
  1182. {
  1183. /*
  1184. * We may come here right after calling schedule_user()
  1185. * or do_notify_resume(), in which case we can be in RCU
  1186. * user mode.
  1187. */
  1188. user_exit();
  1189. audit_syscall_exit(regs);
  1190. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  1191. trace_sys_exit(regs, regs_return_value(regs));
  1192. if (test_thread_flag(TIF_SYSCALL_TRACE))
  1193. ptrace_report_syscall_exit(regs, 0);
  1194. user_enter();
  1195. }