ptrace.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2001 - 2007 Tensilica Inc.
  7. *
  8. * Joe Taylor <[email protected], [email protected]>
  9. * Chris Zankel <[email protected]>
  10. * Scott Foehner<[email protected]>,
  11. * Kevin Chea
  12. * Marc Gauthier<[email protected]> <[email protected]>
  13. */
  14. #include <linux/audit.h>
  15. #include <linux/errno.h>
  16. #include <linux/hw_breakpoint.h>
  17. #include <linux/kernel.h>
  18. #include <linux/mm.h>
  19. #include <linux/perf_event.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/regset.h>
  22. #include <linux/sched.h>
  23. #include <linux/sched/task_stack.h>
  24. #include <linux/seccomp.h>
  25. #include <linux/security.h>
  26. #include <linux/signal.h>
  27. #include <linux/smp.h>
  28. #include <linux/uaccess.h>
  29. #define CREATE_TRACE_POINTS
  30. #include <trace/events/syscalls.h>
  31. #include <asm/coprocessor.h>
  32. #include <asm/elf.h>
  33. #include <asm/page.h>
  34. #include <asm/ptrace.h>
  35. static int gpr_get(struct task_struct *target,
  36. const struct user_regset *regset,
  37. struct membuf to)
  38. {
  39. struct pt_regs *regs = task_pt_regs(target);
  40. struct user_pt_regs newregs = {
  41. .pc = regs->pc,
  42. .ps = regs->ps & ~(1 << PS_EXCM_BIT),
  43. .lbeg = regs->lbeg,
  44. .lend = regs->lend,
  45. .lcount = regs->lcount,
  46. .sar = regs->sar,
  47. .threadptr = regs->threadptr,
  48. .windowbase = regs->windowbase,
  49. .windowstart = regs->windowstart,
  50. .syscall = regs->syscall,
  51. };
  52. memcpy(newregs.a,
  53. regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4,
  54. regs->windowbase * 16);
  55. memcpy(newregs.a + regs->windowbase * 4,
  56. regs->areg,
  57. (WSBITS - regs->windowbase) * 16);
  58. return membuf_write(&to, &newregs, sizeof(newregs));
  59. }
  60. static int gpr_set(struct task_struct *target,
  61. const struct user_regset *regset,
  62. unsigned int pos, unsigned int count,
  63. const void *kbuf, const void __user *ubuf)
  64. {
  65. int ret;
  66. struct user_pt_regs newregs = {0};
  67. struct pt_regs *regs;
  68. const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
  69. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
  70. if (ret)
  71. return ret;
  72. if (newregs.windowbase >= XCHAL_NUM_AREGS / 4)
  73. return -EINVAL;
  74. regs = task_pt_regs(target);
  75. regs->pc = newregs.pc;
  76. regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask);
  77. regs->lbeg = newregs.lbeg;
  78. regs->lend = newregs.lend;
  79. regs->lcount = newregs.lcount;
  80. regs->sar = newregs.sar;
  81. regs->threadptr = newregs.threadptr;
  82. if (newregs.syscall)
  83. regs->syscall = newregs.syscall;
  84. if (newregs.windowbase != regs->windowbase ||
  85. newregs.windowstart != regs->windowstart) {
  86. u32 rotws, wmask;
  87. rotws = (((newregs.windowstart |
  88. (newregs.windowstart << WSBITS)) >>
  89. newregs.windowbase) &
  90. ((1 << WSBITS) - 1)) & ~1;
  91. wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
  92. (rotws & 0xF) | 1;
  93. regs->windowbase = newregs.windowbase;
  94. regs->windowstart = newregs.windowstart;
  95. regs->wmask = wmask;
  96. }
  97. memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4,
  98. newregs.a, newregs.windowbase * 16);
  99. memcpy(regs->areg, newregs.a + newregs.windowbase * 4,
  100. (WSBITS - newregs.windowbase) * 16);
  101. return 0;
  102. }
  103. static int tie_get(struct task_struct *target,
  104. const struct user_regset *regset,
  105. struct membuf to)
  106. {
  107. int ret;
  108. struct pt_regs *regs = task_pt_regs(target);
  109. struct thread_info *ti = task_thread_info(target);
  110. elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
  111. if (!newregs)
  112. return -ENOMEM;
  113. newregs->opt = regs->xtregs_opt;
  114. newregs->user = ti->xtregs_user;
  115. #if XTENSA_HAVE_COPROCESSORS
  116. /* Flush all coprocessor registers to memory. */
  117. coprocessor_flush_all(ti);
  118. newregs->cp0 = ti->xtregs_cp.cp0;
  119. newregs->cp1 = ti->xtregs_cp.cp1;
  120. newregs->cp2 = ti->xtregs_cp.cp2;
  121. newregs->cp3 = ti->xtregs_cp.cp3;
  122. newregs->cp4 = ti->xtregs_cp.cp4;
  123. newregs->cp5 = ti->xtregs_cp.cp5;
  124. newregs->cp6 = ti->xtregs_cp.cp6;
  125. newregs->cp7 = ti->xtregs_cp.cp7;
  126. #endif
  127. ret = membuf_write(&to, newregs, sizeof(*newregs));
  128. kfree(newregs);
  129. return ret;
  130. }
  131. static int tie_set(struct task_struct *target,
  132. const struct user_regset *regset,
  133. unsigned int pos, unsigned int count,
  134. const void *kbuf, const void __user *ubuf)
  135. {
  136. int ret;
  137. struct pt_regs *regs = task_pt_regs(target);
  138. struct thread_info *ti = task_thread_info(target);
  139. elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
  140. if (!newregs)
  141. return -ENOMEM;
  142. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  143. newregs, 0, -1);
  144. if (ret)
  145. goto exit;
  146. regs->xtregs_opt = newregs->opt;
  147. ti->xtregs_user = newregs->user;
  148. #if XTENSA_HAVE_COPROCESSORS
  149. /* Flush all coprocessors before we overwrite them. */
  150. coprocessor_flush_release_all(ti);
  151. ti->xtregs_cp.cp0 = newregs->cp0;
  152. ti->xtregs_cp.cp1 = newregs->cp1;
  153. ti->xtregs_cp.cp2 = newregs->cp2;
  154. ti->xtregs_cp.cp3 = newregs->cp3;
  155. ti->xtregs_cp.cp4 = newregs->cp4;
  156. ti->xtregs_cp.cp5 = newregs->cp5;
  157. ti->xtregs_cp.cp6 = newregs->cp6;
  158. ti->xtregs_cp.cp7 = newregs->cp7;
  159. #endif
  160. exit:
  161. kfree(newregs);
  162. return ret;
  163. }
  164. enum xtensa_regset {
  165. REGSET_GPR,
  166. REGSET_TIE,
  167. };
  168. static const struct user_regset xtensa_regsets[] = {
  169. [REGSET_GPR] = {
  170. .core_note_type = NT_PRSTATUS,
  171. .n = sizeof(struct user_pt_regs) / sizeof(u32),
  172. .size = sizeof(u32),
  173. .align = sizeof(u32),
  174. .regset_get = gpr_get,
  175. .set = gpr_set,
  176. },
  177. [REGSET_TIE] = {
  178. .core_note_type = NT_PRFPREG,
  179. .n = sizeof(elf_xtregs_t) / sizeof(u32),
  180. .size = sizeof(u32),
  181. .align = sizeof(u32),
  182. .regset_get = tie_get,
  183. .set = tie_set,
  184. },
  185. };
  186. static const struct user_regset_view user_xtensa_view = {
  187. .name = "xtensa",
  188. .e_machine = EM_XTENSA,
  189. .regsets = xtensa_regsets,
  190. .n = ARRAY_SIZE(xtensa_regsets)
  191. };
  192. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  193. {
  194. return &user_xtensa_view;
  195. }
  196. void user_enable_single_step(struct task_struct *child)
  197. {
  198. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  199. }
  200. void user_disable_single_step(struct task_struct *child)
  201. {
  202. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  203. }
  204. /*
  205. * Called by kernel/ptrace.c when detaching to disable single stepping.
  206. */
  207. void ptrace_disable(struct task_struct *child)
  208. {
  209. /* Nothing to do.. */
  210. }
  211. static int ptrace_getregs(struct task_struct *child, void __user *uregs)
  212. {
  213. return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR,
  214. 0, sizeof(xtensa_gregset_t), uregs);
  215. }
  216. static int ptrace_setregs(struct task_struct *child, void __user *uregs)
  217. {
  218. return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR,
  219. 0, sizeof(xtensa_gregset_t), uregs);
  220. }
  221. static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
  222. {
  223. return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE,
  224. 0, sizeof(elf_xtregs_t), uregs);
  225. }
  226. static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
  227. {
  228. return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE,
  229. 0, sizeof(elf_xtregs_t), uregs);
  230. }
  231. static int ptrace_peekusr(struct task_struct *child, long regno,
  232. long __user *ret)
  233. {
  234. struct pt_regs *regs;
  235. unsigned long tmp;
  236. regs = task_pt_regs(child);
  237. tmp = 0; /* Default return value. */
  238. switch(regno) {
  239. case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
  240. tmp = regs->areg[regno - REG_AR_BASE];
  241. break;
  242. case REG_A_BASE ... REG_A_BASE + 15:
  243. tmp = regs->areg[regno - REG_A_BASE];
  244. break;
  245. case REG_PC:
  246. tmp = regs->pc;
  247. break;
  248. case REG_PS:
  249. /* Note: PS.EXCM is not set while user task is running;
  250. * its being set in regs is for exception handling
  251. * convenience.
  252. */
  253. tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
  254. break;
  255. case REG_WB:
  256. break; /* tmp = 0 */
  257. case REG_WS:
  258. {
  259. unsigned long wb = regs->windowbase;
  260. unsigned long ws = regs->windowstart;
  261. tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
  262. ((1 << WSBITS) - 1);
  263. break;
  264. }
  265. case REG_LBEG:
  266. tmp = regs->lbeg;
  267. break;
  268. case REG_LEND:
  269. tmp = regs->lend;
  270. break;
  271. case REG_LCOUNT:
  272. tmp = regs->lcount;
  273. break;
  274. case REG_SAR:
  275. tmp = regs->sar;
  276. break;
  277. case SYSCALL_NR:
  278. tmp = regs->syscall;
  279. break;
  280. default:
  281. return -EIO;
  282. }
  283. return put_user(tmp, ret);
  284. }
  285. static int ptrace_pokeusr(struct task_struct *child, long regno, long val)
  286. {
  287. struct pt_regs *regs;
  288. regs = task_pt_regs(child);
  289. switch (regno) {
  290. case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
  291. regs->areg[regno - REG_AR_BASE] = val;
  292. break;
  293. case REG_A_BASE ... REG_A_BASE + 15:
  294. regs->areg[regno - REG_A_BASE] = val;
  295. break;
  296. case REG_PC:
  297. regs->pc = val;
  298. break;
  299. case SYSCALL_NR:
  300. regs->syscall = val;
  301. break;
  302. default:
  303. return -EIO;
  304. }
  305. return 0;
  306. }
  307. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  308. static void ptrace_hbptriggered(struct perf_event *bp,
  309. struct perf_sample_data *data,
  310. struct pt_regs *regs)
  311. {
  312. int i;
  313. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  314. if (bp->attr.bp_type & HW_BREAKPOINT_X) {
  315. for (i = 0; i < XCHAL_NUM_IBREAK; ++i)
  316. if (current->thread.ptrace_bp[i] == bp)
  317. break;
  318. i <<= 1;
  319. } else {
  320. for (i = 0; i < XCHAL_NUM_DBREAK; ++i)
  321. if (current->thread.ptrace_wp[i] == bp)
  322. break;
  323. i = (i << 1) | 1;
  324. }
  325. force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
  326. }
  327. static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
  328. {
  329. struct perf_event_attr attr;
  330. ptrace_breakpoint_init(&attr);
  331. /* Initialise fields to sane defaults. */
  332. attr.bp_addr = 0;
  333. attr.bp_len = 1;
  334. attr.bp_type = type;
  335. attr.disabled = 1;
  336. return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
  337. tsk);
  338. }
  339. /*
  340. * Address bit 0 choose instruction (0) or data (1) break register, bits
  341. * 31..1 are the register number.
  342. * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words:
  343. * address (0) and control (1).
  344. * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set.
  345. * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is
  346. * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a
  347. * breakpoint. To set a breakpoint length must be a power of 2 in the range
  348. * 1..64 and the address must be length-aligned.
  349. */
  350. static long ptrace_gethbpregs(struct task_struct *child, long addr,
  351. long __user *datap)
  352. {
  353. struct perf_event *bp;
  354. u32 user_data[2] = {0};
  355. bool dbreak = addr & 1;
  356. unsigned idx = addr >> 1;
  357. if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
  358. (dbreak && idx >= XCHAL_NUM_DBREAK))
  359. return -EINVAL;
  360. if (dbreak)
  361. bp = child->thread.ptrace_wp[idx];
  362. else
  363. bp = child->thread.ptrace_bp[idx];
  364. if (bp) {
  365. user_data[0] = bp->attr.bp_addr;
  366. user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len;
  367. if (dbreak) {
  368. if (bp->attr.bp_type & HW_BREAKPOINT_R)
  369. user_data[1] |= DBREAKC_LOAD_MASK;
  370. if (bp->attr.bp_type & HW_BREAKPOINT_W)
  371. user_data[1] |= DBREAKC_STOR_MASK;
  372. }
  373. }
  374. if (copy_to_user(datap, user_data, sizeof(user_data)))
  375. return -EFAULT;
  376. return 0;
  377. }
  378. static long ptrace_sethbpregs(struct task_struct *child, long addr,
  379. long __user *datap)
  380. {
  381. struct perf_event *bp;
  382. struct perf_event_attr attr;
  383. u32 user_data[2];
  384. bool dbreak = addr & 1;
  385. unsigned idx = addr >> 1;
  386. int bp_type = 0;
  387. if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
  388. (dbreak && idx >= XCHAL_NUM_DBREAK))
  389. return -EINVAL;
  390. if (copy_from_user(user_data, datap, sizeof(user_data)))
  391. return -EFAULT;
  392. if (dbreak) {
  393. bp = child->thread.ptrace_wp[idx];
  394. if (user_data[1] & DBREAKC_LOAD_MASK)
  395. bp_type |= HW_BREAKPOINT_R;
  396. if (user_data[1] & DBREAKC_STOR_MASK)
  397. bp_type |= HW_BREAKPOINT_W;
  398. } else {
  399. bp = child->thread.ptrace_bp[idx];
  400. bp_type = HW_BREAKPOINT_X;
  401. }
  402. if (!bp) {
  403. bp = ptrace_hbp_create(child,
  404. bp_type ? bp_type : HW_BREAKPOINT_RW);
  405. if (IS_ERR(bp))
  406. return PTR_ERR(bp);
  407. if (dbreak)
  408. child->thread.ptrace_wp[idx] = bp;
  409. else
  410. child->thread.ptrace_bp[idx] = bp;
  411. }
  412. attr = bp->attr;
  413. attr.bp_addr = user_data[0];
  414. attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK);
  415. attr.bp_type = bp_type;
  416. attr.disabled = !attr.bp_len;
  417. return modify_user_hw_breakpoint(bp, &attr);
  418. }
  419. #endif
  420. long arch_ptrace(struct task_struct *child, long request,
  421. unsigned long addr, unsigned long data)
  422. {
  423. int ret = -EPERM;
  424. void __user *datap = (void __user *) data;
  425. switch (request) {
  426. case PTRACE_PEEKUSR: /* read register specified by addr. */
  427. ret = ptrace_peekusr(child, addr, datap);
  428. break;
  429. case PTRACE_POKEUSR: /* write register specified by addr. */
  430. ret = ptrace_pokeusr(child, addr, data);
  431. break;
  432. case PTRACE_GETREGS:
  433. ret = ptrace_getregs(child, datap);
  434. break;
  435. case PTRACE_SETREGS:
  436. ret = ptrace_setregs(child, datap);
  437. break;
  438. case PTRACE_GETXTREGS:
  439. ret = ptrace_getxregs(child, datap);
  440. break;
  441. case PTRACE_SETXTREGS:
  442. ret = ptrace_setxregs(child, datap);
  443. break;
  444. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  445. case PTRACE_GETHBPREGS:
  446. ret = ptrace_gethbpregs(child, addr, datap);
  447. break;
  448. case PTRACE_SETHBPREGS:
  449. ret = ptrace_sethbpregs(child, addr, datap);
  450. break;
  451. #endif
  452. default:
  453. ret = ptrace_request(child, request, addr, data);
  454. break;
  455. }
  456. return ret;
  457. }
  458. void do_syscall_trace_leave(struct pt_regs *regs);
  459. int do_syscall_trace_enter(struct pt_regs *regs)
  460. {
  461. if (regs->syscall == NO_SYSCALL)
  462. regs->areg[2] = -ENOSYS;
  463. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  464. ptrace_report_syscall_entry(regs)) {
  465. regs->areg[2] = -ENOSYS;
  466. regs->syscall = NO_SYSCALL;
  467. return 0;
  468. }
  469. if (regs->syscall == NO_SYSCALL ||
  470. secure_computing() == -1) {
  471. do_syscall_trace_leave(regs);
  472. return 0;
  473. }
  474. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  475. trace_sys_enter(regs, syscall_get_nr(current, regs));
  476. audit_syscall_entry(regs->syscall, regs->areg[6],
  477. regs->areg[3], regs->areg[4],
  478. regs->areg[5]);
  479. return 1;
  480. }
  481. void do_syscall_trace_leave(struct pt_regs *regs)
  482. {
  483. int step;
  484. audit_syscall_exit(regs);
  485. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  486. trace_sys_exit(regs, regs_return_value(regs));
  487. step = test_thread_flag(TIF_SINGLESTEP);
  488. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  489. ptrace_report_syscall_exit(regs, step);
  490. }