ptrace.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/kernel/ptrace.c
  4. *
  5. * By Ross Biro 1/23/92
  6. * edited by Linus Torvalds
  7. * ARM modifications Copyright (C) 2000 Russell King
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched/signal.h>
  11. #include <linux/sched/task_stack.h>
  12. #include <linux/mm.h>
  13. #include <linux/elf.h>
  14. #include <linux/smp.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/user.h>
  17. #include <linux/security.h>
  18. #include <linux/init.h>
  19. #include <linux/signal.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/perf_event.h>
  22. #include <linux/hw_breakpoint.h>
  23. #include <linux/regset.h>
  24. #include <linux/audit.h>
  25. #include <linux/unistd.h>
  26. #include <asm/syscall.h>
  27. #include <asm/traps.h>
  28. #define CREATE_TRACE_POINTS
  29. #include <trace/events/syscalls.h>
  30. #define REG_PC 15
  31. #define REG_PSR 16
  32. /*
  33. * does not yet catch signals sent when the child dies.
  34. * in exit.c or in signal.c.
  35. */
  36. #if 0
  37. /*
  38. * Breakpoint SWI instruction: SWI &9F0001
  39. */
  40. #define BREAKINST_ARM 0xef9f0001
  41. #define BREAKINST_THUMB 0xdf00 /* fill this in later */
  42. #else
  43. /*
  44. * New breakpoints - use an undefined instruction. The ARM architecture
  45. * reference manual guarantees that the following instruction space
  46. * will produce an undefined instruction exception on all CPUs:
  47. *
  48. * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
  49. * Thumb: 1101 1110 xxxx xxxx
  50. */
  51. #define BREAKINST_ARM 0xe7f001f0
  52. #define BREAKINST_THUMB 0xde01
  53. #endif
  54. struct pt_regs_offset {
  55. const char *name;
  56. int offset;
  57. };
  58. #define REG_OFFSET_NAME(r) \
  59. {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
  60. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  61. static const struct pt_regs_offset regoffset_table[] = {
  62. REG_OFFSET_NAME(r0),
  63. REG_OFFSET_NAME(r1),
  64. REG_OFFSET_NAME(r2),
  65. REG_OFFSET_NAME(r3),
  66. REG_OFFSET_NAME(r4),
  67. REG_OFFSET_NAME(r5),
  68. REG_OFFSET_NAME(r6),
  69. REG_OFFSET_NAME(r7),
  70. REG_OFFSET_NAME(r8),
  71. REG_OFFSET_NAME(r9),
  72. REG_OFFSET_NAME(r10),
  73. REG_OFFSET_NAME(fp),
  74. REG_OFFSET_NAME(ip),
  75. REG_OFFSET_NAME(sp),
  76. REG_OFFSET_NAME(lr),
  77. REG_OFFSET_NAME(pc),
  78. REG_OFFSET_NAME(cpsr),
  79. REG_OFFSET_NAME(ORIG_r0),
  80. REG_OFFSET_END,
  81. };
  82. /**
  83. * regs_query_register_offset() - query register offset from its name
  84. * @name: the name of a register
  85. *
  86. * regs_query_register_offset() returns the offset of a register in struct
  87. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  88. */
  89. int regs_query_register_offset(const char *name)
  90. {
  91. const struct pt_regs_offset *roff;
  92. for (roff = regoffset_table; roff->name != NULL; roff++)
  93. if (!strcmp(roff->name, name))
  94. return roff->offset;
  95. return -EINVAL;
  96. }
  97. /**
  98. * regs_query_register_name() - query register name from its offset
  99. * @offset: the offset of a register in struct pt_regs.
  100. *
  101. * regs_query_register_name() returns the name of a register from its
  102. * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
  103. */
  104. const char *regs_query_register_name(unsigned int offset)
  105. {
  106. const struct pt_regs_offset *roff;
  107. for (roff = regoffset_table; roff->name != NULL; roff++)
  108. if (roff->offset == offset)
  109. return roff->name;
  110. return NULL;
  111. }
  112. /**
  113. * regs_within_kernel_stack() - check the address in the stack
  114. * @regs: pt_regs which contains kernel stack pointer.
  115. * @addr: address which is checked.
  116. *
  117. * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
  118. * If @addr is within the kernel stack, it returns true. If not, returns false.
  119. */
  120. bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  121. {
  122. return ((addr & ~(THREAD_SIZE - 1)) ==
  123. (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
  124. }
  125. /**
  126. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  127. * @regs: pt_regs which contains kernel stack pointer.
  128. * @n: stack entry number.
  129. *
  130. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  131. * is specified by @regs. If the @n th entry is NOT in the kernel stack,
  132. * this returns 0.
  133. */
  134. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  135. {
  136. unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
  137. addr += n;
  138. if (regs_within_kernel_stack(regs, (unsigned long)addr))
  139. return *addr;
  140. else
  141. return 0;
  142. }
  143. /*
  144. * this routine will get a word off of the processes privileged stack.
  145. * the offset is how far from the base addr as stored in the THREAD.
  146. * this routine assumes that all the privileged stacks are in our
  147. * data space.
  148. */
  149. static inline long get_user_reg(struct task_struct *task, int offset)
  150. {
  151. return task_pt_regs(task)->uregs[offset];
  152. }
  153. /*
  154. * this routine will put a word on the processes privileged stack.
  155. * the offset is how far from the base addr as stored in the THREAD.
  156. * this routine assumes that all the privileged stacks are in our
  157. * data space.
  158. */
  159. static inline int
  160. put_user_reg(struct task_struct *task, int offset, long data)
  161. {
  162. struct pt_regs newregs, *regs = task_pt_regs(task);
  163. int ret = -EINVAL;
  164. newregs = *regs;
  165. newregs.uregs[offset] = data;
  166. if (valid_user_regs(&newregs)) {
  167. regs->uregs[offset] = data;
  168. ret = 0;
  169. }
  170. return ret;
  171. }
  172. /*
  173. * Called by kernel/ptrace.c when detaching..
  174. */
  175. void ptrace_disable(struct task_struct *child)
  176. {
  177. /* Nothing to do. */
  178. }
  179. /*
  180. * Handle hitting a breakpoint.
  181. */
  182. void ptrace_break(struct pt_regs *regs)
  183. {
  184. force_sig_fault(SIGTRAP, TRAP_BRKPT,
  185. (void __user *)instruction_pointer(regs));
  186. }
  187. static int break_trap(struct pt_regs *regs, unsigned int instr)
  188. {
  189. ptrace_break(regs);
  190. return 0;
  191. }
  192. static struct undef_hook arm_break_hook = {
  193. .instr_mask = 0x0fffffff,
  194. .instr_val = 0x07f001f0,
  195. .cpsr_mask = PSR_T_BIT,
  196. .cpsr_val = 0,
  197. .fn = break_trap,
  198. };
  199. static struct undef_hook thumb_break_hook = {
  200. .instr_mask = 0xffffffff,
  201. .instr_val = 0x0000de01,
  202. .cpsr_mask = PSR_T_BIT,
  203. .cpsr_val = PSR_T_BIT,
  204. .fn = break_trap,
  205. };
  206. static struct undef_hook thumb2_break_hook = {
  207. .instr_mask = 0xffffffff,
  208. .instr_val = 0xf7f0a000,
  209. .cpsr_mask = PSR_T_BIT,
  210. .cpsr_val = PSR_T_BIT,
  211. .fn = break_trap,
  212. };
  213. static int __init ptrace_break_init(void)
  214. {
  215. register_undef_hook(&arm_break_hook);
  216. register_undef_hook(&thumb_break_hook);
  217. register_undef_hook(&thumb2_break_hook);
  218. return 0;
  219. }
  220. core_initcall(ptrace_break_init);
  221. /*
  222. * Read the word at offset "off" into the "struct user". We
  223. * actually access the pt_regs stored on the kernel stack.
  224. */
  225. static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
  226. unsigned long __user *ret)
  227. {
  228. unsigned long tmp;
  229. if (off & 3)
  230. return -EIO;
  231. tmp = 0;
  232. if (off == PT_TEXT_ADDR)
  233. tmp = tsk->mm->start_code;
  234. else if (off == PT_DATA_ADDR)
  235. tmp = tsk->mm->start_data;
  236. else if (off == PT_TEXT_END_ADDR)
  237. tmp = tsk->mm->end_code;
  238. else if (off < sizeof(struct pt_regs))
  239. tmp = get_user_reg(tsk, off >> 2);
  240. else if (off >= sizeof(struct user))
  241. return -EIO;
  242. return put_user(tmp, ret);
  243. }
  244. /*
  245. * Write the word at offset "off" into "struct user". We
  246. * actually access the pt_regs stored on the kernel stack.
  247. */
  248. static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  249. unsigned long val)
  250. {
  251. if (off & 3 || off >= sizeof(struct user))
  252. return -EIO;
  253. if (off >= sizeof(struct pt_regs))
  254. return 0;
  255. return put_user_reg(tsk, off >> 2, val);
  256. }
  257. #ifdef CONFIG_IWMMXT
  258. /*
  259. * Get the child iWMMXt state.
  260. */
  261. static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
  262. {
  263. struct thread_info *thread = task_thread_info(tsk);
  264. if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
  265. return -ENODATA;
  266. iwmmxt_task_disable(thread); /* force it to ram */
  267. return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
  268. ? -EFAULT : 0;
  269. }
  270. /*
  271. * Set the child iWMMXt state.
  272. */
  273. static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
  274. {
  275. struct thread_info *thread = task_thread_info(tsk);
  276. if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
  277. return -EACCES;
  278. iwmmxt_task_release(thread); /* force a reload */
  279. return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
  280. ? -EFAULT : 0;
  281. }
  282. #endif
  283. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  284. /*
  285. * Convert a virtual register number into an index for a thread_info
  286. * breakpoint array. Breakpoints are identified using positive numbers
  287. * whilst watchpoints are negative. The registers are laid out as pairs
  288. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  289. * Register 0 is reserved for describing resource information.
  290. */
  291. static int ptrace_hbp_num_to_idx(long num)
  292. {
  293. if (num < 0)
  294. num = (ARM_MAX_BRP << 1) - num;
  295. return (num - 1) >> 1;
  296. }
  297. /*
  298. * Returns the virtual register number for the address of the
  299. * breakpoint at index idx.
  300. */
  301. static long ptrace_hbp_idx_to_num(int idx)
  302. {
  303. long mid = ARM_MAX_BRP << 1;
  304. long num = (idx << 1) + 1;
  305. return num > mid ? mid - num : num;
  306. }
  307. /*
  308. * Handle hitting a HW-breakpoint.
  309. */
  310. static void ptrace_hbptriggered(struct perf_event *bp,
  311. struct perf_sample_data *data,
  312. struct pt_regs *regs)
  313. {
  314. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  315. long num;
  316. int i;
  317. for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
  318. if (current->thread.debug.hbp[i] == bp)
  319. break;
  320. num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
  321. force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
  322. }
  323. /*
  324. * Set ptrace breakpoint pointers to zero for this task.
  325. * This is required in order to prevent child processes from unregistering
  326. * breakpoints held by their parent.
  327. */
  328. void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
  329. {
  330. memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
  331. }
  332. /*
  333. * Unregister breakpoints from this task and reset the pointers in
  334. * the thread_struct.
  335. */
  336. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  337. {
  338. int i;
  339. struct thread_struct *t = &tsk->thread;
  340. for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
  341. if (t->debug.hbp[i]) {
  342. unregister_hw_breakpoint(t->debug.hbp[i]);
  343. t->debug.hbp[i] = NULL;
  344. }
  345. }
  346. }
  347. static u32 ptrace_get_hbp_resource_info(void)
  348. {
  349. u8 num_brps, num_wrps, debug_arch, wp_len;
  350. u32 reg = 0;
  351. num_brps = hw_breakpoint_slots(TYPE_INST);
  352. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  353. debug_arch = arch_get_debug_arch();
  354. wp_len = arch_get_max_wp_len();
  355. reg |= debug_arch;
  356. reg <<= 8;
  357. reg |= wp_len;
  358. reg <<= 8;
  359. reg |= num_wrps;
  360. reg <<= 8;
  361. reg |= num_brps;
  362. return reg;
  363. }
  364. static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
  365. {
  366. struct perf_event_attr attr;
  367. ptrace_breakpoint_init(&attr);
  368. /* Initialise fields to sane defaults. */
  369. attr.bp_addr = 0;
  370. attr.bp_len = HW_BREAKPOINT_LEN_4;
  371. attr.bp_type = type;
  372. attr.disabled = 1;
  373. return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
  374. tsk);
  375. }
  376. static int ptrace_gethbpregs(struct task_struct *tsk, long num,
  377. unsigned long __user *data)
  378. {
  379. u32 reg;
  380. int idx, ret = 0;
  381. struct perf_event *bp;
  382. struct arch_hw_breakpoint_ctrl arch_ctrl;
  383. if (num == 0) {
  384. reg = ptrace_get_hbp_resource_info();
  385. } else {
  386. idx = ptrace_hbp_num_to_idx(num);
  387. if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
  388. ret = -EINVAL;
  389. goto out;
  390. }
  391. bp = tsk->thread.debug.hbp[idx];
  392. if (!bp) {
  393. reg = 0;
  394. goto put;
  395. }
  396. arch_ctrl = counter_arch_bp(bp)->ctrl;
  397. /*
  398. * Fix up the len because we may have adjusted it
  399. * to compensate for an unaligned address.
  400. */
  401. while (!(arch_ctrl.len & 0x1))
  402. arch_ctrl.len >>= 1;
  403. if (num & 0x1)
  404. reg = bp->attr.bp_addr;
  405. else
  406. reg = encode_ctrl_reg(arch_ctrl);
  407. }
  408. put:
  409. if (put_user(reg, data))
  410. ret = -EFAULT;
  411. out:
  412. return ret;
  413. }
  414. static int ptrace_sethbpregs(struct task_struct *tsk, long num,
  415. unsigned long __user *data)
  416. {
  417. int idx, gen_len, gen_type, implied_type, ret = 0;
  418. u32 user_val;
  419. struct perf_event *bp;
  420. struct arch_hw_breakpoint_ctrl ctrl;
  421. struct perf_event_attr attr;
  422. if (num == 0)
  423. goto out;
  424. else if (num < 0)
  425. implied_type = HW_BREAKPOINT_RW;
  426. else
  427. implied_type = HW_BREAKPOINT_X;
  428. idx = ptrace_hbp_num_to_idx(num);
  429. if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
  430. ret = -EINVAL;
  431. goto out;
  432. }
  433. if (get_user(user_val, data)) {
  434. ret = -EFAULT;
  435. goto out;
  436. }
  437. bp = tsk->thread.debug.hbp[idx];
  438. if (!bp) {
  439. bp = ptrace_hbp_create(tsk, implied_type);
  440. if (IS_ERR(bp)) {
  441. ret = PTR_ERR(bp);
  442. goto out;
  443. }
  444. tsk->thread.debug.hbp[idx] = bp;
  445. }
  446. attr = bp->attr;
  447. if (num & 0x1) {
  448. /* Address */
  449. attr.bp_addr = user_val;
  450. } else {
  451. /* Control */
  452. decode_ctrl_reg(user_val, &ctrl);
  453. ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
  454. if (ret)
  455. goto out;
  456. if ((gen_type & implied_type) != gen_type) {
  457. ret = -EINVAL;
  458. goto out;
  459. }
  460. attr.bp_len = gen_len;
  461. attr.bp_type = gen_type;
  462. attr.disabled = !ctrl.enabled;
  463. }
  464. ret = modify_user_hw_breakpoint(bp, &attr);
  465. out:
  466. return ret;
  467. }
  468. #endif
  469. /* regset get/set implementations */
  470. static int gpr_get(struct task_struct *target,
  471. const struct user_regset *regset,
  472. struct membuf to)
  473. {
  474. return membuf_write(&to, task_pt_regs(target), sizeof(struct pt_regs));
  475. }
  476. static int gpr_set(struct task_struct *target,
  477. const struct user_regset *regset,
  478. unsigned int pos, unsigned int count,
  479. const void *kbuf, const void __user *ubuf)
  480. {
  481. int ret;
  482. struct pt_regs newregs = *task_pt_regs(target);
  483. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  484. &newregs,
  485. 0, sizeof(newregs));
  486. if (ret)
  487. return ret;
  488. if (!valid_user_regs(&newregs))
  489. return -EINVAL;
  490. *task_pt_regs(target) = newregs;
  491. return 0;
  492. }
  493. static int fpa_get(struct task_struct *target,
  494. const struct user_regset *regset,
  495. struct membuf to)
  496. {
  497. return membuf_write(&to, &task_thread_info(target)->fpstate,
  498. sizeof(struct user_fp));
  499. }
  500. static int fpa_set(struct task_struct *target,
  501. const struct user_regset *regset,
  502. unsigned int pos, unsigned int count,
  503. const void *kbuf, const void __user *ubuf)
  504. {
  505. struct thread_info *thread = task_thread_info(target);
  506. thread->used_cp[1] = thread->used_cp[2] = 1;
  507. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  508. &thread->fpstate,
  509. 0, sizeof(struct user_fp));
  510. }
  511. #ifdef CONFIG_VFP
  512. /*
  513. * VFP register get/set implementations.
  514. *
  515. * With respect to the kernel, struct user_fp is divided into three chunks:
  516. * 16 or 32 real VFP registers (d0-d15 or d0-31)
  517. * These are transferred to/from the real registers in the task's
  518. * vfp_hard_struct. The number of registers depends on the kernel
  519. * configuration.
  520. *
  521. * 16 or 0 fake VFP registers (d16-d31 or empty)
  522. * i.e., the user_vfp structure has space for 32 registers even if
  523. * the kernel doesn't have them all.
  524. *
  525. * vfp_get() reads this chunk as zero where applicable
  526. * vfp_set() ignores this chunk
  527. *
  528. * 1 word for the FPSCR
  529. */
  530. static int vfp_get(struct task_struct *target,
  531. const struct user_regset *regset,
  532. struct membuf to)
  533. {
  534. struct thread_info *thread = task_thread_info(target);
  535. struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
  536. const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
  537. vfp_sync_hwstate(thread);
  538. membuf_write(&to, vfp->fpregs, sizeof(vfp->fpregs));
  539. membuf_zero(&to, user_fpscr_offset - sizeof(vfp->fpregs));
  540. return membuf_store(&to, vfp->fpscr);
  541. }
  542. /*
  543. * For vfp_set() a read-modify-write is done on the VFP registers,
  544. * in order to avoid writing back a half-modified set of registers on
  545. * failure.
  546. */
  547. static int vfp_set(struct task_struct *target,
  548. const struct user_regset *regset,
  549. unsigned int pos, unsigned int count,
  550. const void *kbuf, const void __user *ubuf)
  551. {
  552. int ret;
  553. struct thread_info *thread = task_thread_info(target);
  554. struct vfp_hard_struct new_vfp;
  555. const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
  556. const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
  557. vfp_sync_hwstate(thread);
  558. new_vfp = thread->vfpstate.hard;
  559. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  560. &new_vfp.fpregs,
  561. user_fpregs_offset,
  562. user_fpregs_offset + sizeof(new_vfp.fpregs));
  563. if (ret)
  564. return ret;
  565. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  566. user_fpregs_offset + sizeof(new_vfp.fpregs),
  567. user_fpscr_offset);
  568. if (ret)
  569. return ret;
  570. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  571. &new_vfp.fpscr,
  572. user_fpscr_offset,
  573. user_fpscr_offset + sizeof(new_vfp.fpscr));
  574. if (ret)
  575. return ret;
  576. thread->vfpstate.hard = new_vfp;
  577. vfp_flush_hwstate(thread);
  578. return 0;
  579. }
  580. #endif /* CONFIG_VFP */
  581. enum arm_regset {
  582. REGSET_GPR,
  583. REGSET_FPR,
  584. #ifdef CONFIG_VFP
  585. REGSET_VFP,
  586. #endif
  587. };
  588. static const struct user_regset arm_regsets[] = {
  589. [REGSET_GPR] = {
  590. .core_note_type = NT_PRSTATUS,
  591. .n = ELF_NGREG,
  592. .size = sizeof(u32),
  593. .align = sizeof(u32),
  594. .regset_get = gpr_get,
  595. .set = gpr_set
  596. },
  597. [REGSET_FPR] = {
  598. /*
  599. * For the FPA regs in fpstate, the real fields are a mixture
  600. * of sizes, so pretend that the registers are word-sized:
  601. */
  602. .core_note_type = NT_PRFPREG,
  603. .n = sizeof(struct user_fp) / sizeof(u32),
  604. .size = sizeof(u32),
  605. .align = sizeof(u32),
  606. .regset_get = fpa_get,
  607. .set = fpa_set
  608. },
  609. #ifdef CONFIG_VFP
  610. [REGSET_VFP] = {
  611. /*
  612. * Pretend that the VFP regs are word-sized, since the FPSCR is
  613. * a single word dangling at the end of struct user_vfp:
  614. */
  615. .core_note_type = NT_ARM_VFP,
  616. .n = ARM_VFPREGS_SIZE / sizeof(u32),
  617. .size = sizeof(u32),
  618. .align = sizeof(u32),
  619. .regset_get = vfp_get,
  620. .set = vfp_set
  621. },
  622. #endif /* CONFIG_VFP */
  623. };
  624. static const struct user_regset_view user_arm_view = {
  625. .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
  626. .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
  627. };
  628. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  629. {
  630. return &user_arm_view;
  631. }
  632. long arch_ptrace(struct task_struct *child, long request,
  633. unsigned long addr, unsigned long data)
  634. {
  635. int ret;
  636. unsigned long __user *datap = (unsigned long __user *) data;
  637. switch (request) {
  638. case PTRACE_PEEKUSR:
  639. ret = ptrace_read_user(child, addr, datap);
  640. break;
  641. case PTRACE_POKEUSR:
  642. ret = ptrace_write_user(child, addr, data);
  643. break;
  644. case PTRACE_GETREGS:
  645. ret = copy_regset_to_user(child,
  646. &user_arm_view, REGSET_GPR,
  647. 0, sizeof(struct pt_regs),
  648. datap);
  649. break;
  650. case PTRACE_SETREGS:
  651. ret = copy_regset_from_user(child,
  652. &user_arm_view, REGSET_GPR,
  653. 0, sizeof(struct pt_regs),
  654. datap);
  655. break;
  656. case PTRACE_GETFPREGS:
  657. ret = copy_regset_to_user(child,
  658. &user_arm_view, REGSET_FPR,
  659. 0, sizeof(union fp_state),
  660. datap);
  661. break;
  662. case PTRACE_SETFPREGS:
  663. ret = copy_regset_from_user(child,
  664. &user_arm_view, REGSET_FPR,
  665. 0, sizeof(union fp_state),
  666. datap);
  667. break;
  668. #ifdef CONFIG_IWMMXT
  669. case PTRACE_GETWMMXREGS:
  670. ret = ptrace_getwmmxregs(child, datap);
  671. break;
  672. case PTRACE_SETWMMXREGS:
  673. ret = ptrace_setwmmxregs(child, datap);
  674. break;
  675. #endif
  676. case PTRACE_GET_THREAD_AREA:
  677. ret = put_user(task_thread_info(child)->tp_value[0],
  678. datap);
  679. break;
  680. case PTRACE_SET_SYSCALL:
  681. if (data != -1)
  682. data &= __NR_SYSCALL_MASK;
  683. task_thread_info(child)->abi_syscall = data;
  684. ret = 0;
  685. break;
  686. #ifdef CONFIG_VFP
  687. case PTRACE_GETVFPREGS:
  688. ret = copy_regset_to_user(child,
  689. &user_arm_view, REGSET_VFP,
  690. 0, ARM_VFPREGS_SIZE,
  691. datap);
  692. break;
  693. case PTRACE_SETVFPREGS:
  694. ret = copy_regset_from_user(child,
  695. &user_arm_view, REGSET_VFP,
  696. 0, ARM_VFPREGS_SIZE,
  697. datap);
  698. break;
  699. #endif
  700. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  701. case PTRACE_GETHBPREGS:
  702. ret = ptrace_gethbpregs(child, addr,
  703. (unsigned long __user *)data);
  704. break;
  705. case PTRACE_SETHBPREGS:
  706. ret = ptrace_sethbpregs(child, addr,
  707. (unsigned long __user *)data);
  708. break;
  709. #endif
  710. default:
  711. ret = ptrace_request(child, request, addr, data);
  712. break;
  713. }
  714. return ret;
  715. }
  716. enum ptrace_syscall_dir {
  717. PTRACE_SYSCALL_ENTER = 0,
  718. PTRACE_SYSCALL_EXIT,
  719. };
  720. static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
  721. {
  722. unsigned long ip;
  723. /*
  724. * IP is used to denote syscall entry/exit:
  725. * IP = 0 -> entry, =1 -> exit
  726. */
  727. ip = regs->ARM_ip;
  728. regs->ARM_ip = dir;
  729. if (dir == PTRACE_SYSCALL_EXIT)
  730. ptrace_report_syscall_exit(regs, 0);
  731. else if (ptrace_report_syscall_entry(regs))
  732. current_thread_info()->abi_syscall = -1;
  733. regs->ARM_ip = ip;
  734. }
  735. asmlinkage int syscall_trace_enter(struct pt_regs *regs)
  736. {
  737. int scno;
  738. if (test_thread_flag(TIF_SYSCALL_TRACE))
  739. report_syscall(regs, PTRACE_SYSCALL_ENTER);
  740. /* Do seccomp after ptrace; syscall may have changed. */
  741. #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
  742. if (secure_computing() == -1)
  743. return -1;
  744. #else
  745. /* XXX: remove this once OABI gets fixed */
  746. secure_computing_strict(syscall_get_nr(current, regs));
  747. #endif
  748. /* Tracer or seccomp may have changed syscall. */
  749. scno = syscall_get_nr(current, regs);
  750. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  751. trace_sys_enter(regs, scno);
  752. audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
  753. regs->ARM_r3);
  754. return scno;
  755. }
  756. asmlinkage void syscall_trace_exit(struct pt_regs *regs)
  757. {
  758. /*
  759. * Audit the syscall before anything else, as a debugger may
  760. * come in and change the current registers.
  761. */
  762. audit_syscall_exit(regs);
  763. /*
  764. * Note that we haven't updated the ->syscall field for the
  765. * current thread. This isn't a problem because it will have
  766. * been set on syscall entry and there hasn't been an opportunity
  767. * for a PTRACE_SET_SYSCALL since then.
  768. */
  769. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  770. trace_sys_exit(regs, regs_return_value(regs));
  771. if (test_thread_flag(TIF_SYSCALL_TRACE))
  772. report_syscall(regs, PTRACE_SYSCALL_EXIT);
  773. }