uprobes.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * User-space Probes (UProbes) for powerpc
  4. *
  5. * Copyright IBM Corporation, 2007-2012
  6. *
  7. * Adapted from the x86 port by Ananth N Mavinakayanahalli <[email protected]>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/ptrace.h>
  12. #include <linux/uprobes.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/kdebug.h>
  15. #include <asm/sstep.h>
  16. #include <asm/inst.h>
  17. #define UPROBE_TRAP_NR UINT_MAX
  18. /**
  19. * is_trap_insn - check if the instruction is a trap variant
  20. * @insn: instruction to be checked.
  21. * Returns true if @insn is a trap variant.
  22. */
  23. bool is_trap_insn(uprobe_opcode_t *insn)
  24. {
  25. return (is_trap(*insn));
  26. }
  27. /**
  28. * arch_uprobe_analyze_insn
  29. * @mm: the probed address space.
  30. * @arch_uprobe: the probepoint information.
  31. * @addr: vaddr to probe.
  32. * Return 0 on success or a -ve number on error.
  33. */
  34. int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
  35. struct mm_struct *mm, unsigned long addr)
  36. {
  37. if (addr & 0x03)
  38. return -EINVAL;
  39. if (cpu_has_feature(CPU_FTR_ARCH_31) &&
  40. ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
  41. (addr & 0x3f) == 60) {
  42. pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
  43. return -EINVAL;
  44. }
  45. if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) {
  46. pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n");
  47. return -ENOTSUPP;
  48. }
  49. return 0;
  50. }
  51. /*
  52. * arch_uprobe_pre_xol - prepare to execute out of line.
  53. * @auprobe: the probepoint information.
  54. * @regs: reflects the saved user state of current task.
  55. */
  56. int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  57. {
  58. struct arch_uprobe_task *autask = &current->utask->autask;
  59. autask->saved_trap_nr = current->thread.trap_nr;
  60. current->thread.trap_nr = UPROBE_TRAP_NR;
  61. regs_set_return_ip(regs, current->utask->xol_vaddr);
  62. user_enable_single_step(current);
  63. return 0;
  64. }
  65. /**
  66. * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
  67. * @regs: Reflects the saved state of the task after it has hit a breakpoint
  68. * instruction.
  69. * Return the address of the breakpoint instruction.
  70. */
  71. unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
  72. {
  73. return instruction_pointer(regs);
  74. }
  75. /*
  76. * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
  77. * then detect the case where a singlestepped instruction jumps back to its
  78. * own address. It is assumed that anything like do_page_fault/do_trap/etc
  79. * sets thread.trap_nr != UINT_MAX.
  80. *
  81. * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
  82. * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
  83. * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
  84. */
  85. bool arch_uprobe_xol_was_trapped(struct task_struct *t)
  86. {
  87. if (t->thread.trap_nr != UPROBE_TRAP_NR)
  88. return true;
  89. return false;
  90. }
  91. /*
  92. * Called after single-stepping. To avoid the SMP problems that can
  93. * occur when we temporarily put back the original opcode to
  94. * single-step, we single-stepped a copy of the instruction.
  95. *
  96. * This function prepares to resume execution after the single-step.
  97. */
  98. int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  99. {
  100. struct uprobe_task *utask = current->utask;
  101. WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
  102. current->thread.trap_nr = utask->autask.saved_trap_nr;
  103. /*
  104. * On powerpc, except for loads and stores, most instructions
  105. * including ones that alter code flow (branches, calls, returns)
  106. * are emulated in the kernel. We get here only if the emulation
  107. * support doesn't exist and have to fix-up the next instruction
  108. * to be executed.
  109. */
  110. regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
  111. user_disable_single_step(current);
  112. return 0;
  113. }
  114. /* callback routine for handling exceptions. */
  115. int arch_uprobe_exception_notify(struct notifier_block *self,
  116. unsigned long val, void *data)
  117. {
  118. struct die_args *args = data;
  119. struct pt_regs *regs = args->regs;
  120. /* regs == NULL is a kernel bug */
  121. if (WARN_ON(!regs))
  122. return NOTIFY_DONE;
  123. /* We are only interested in userspace traps */
  124. if (!user_mode(regs))
  125. return NOTIFY_DONE;
  126. switch (val) {
  127. case DIE_BPT:
  128. if (uprobe_pre_sstep_notifier(regs))
  129. return NOTIFY_STOP;
  130. break;
  131. case DIE_SSTEP:
  132. if (uprobe_post_sstep_notifier(regs))
  133. return NOTIFY_STOP;
  134. break;
  135. default:
  136. break;
  137. }
  138. return NOTIFY_DONE;
  139. }
  140. /*
  141. * This function gets called when XOL instruction either gets trapped or
  142. * the thread has a fatal signal, so reset the instruction pointer to its
  143. * probed address.
  144. */
  145. void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  146. {
  147. struct uprobe_task *utask = current->utask;
  148. current->thread.trap_nr = utask->autask.saved_trap_nr;
  149. instruction_pointer_set(regs, utask->vaddr);
  150. user_disable_single_step(current);
  151. }
  152. /*
  153. * See if the instruction can be emulated.
  154. * Returns true if instruction was emulated, false otherwise.
  155. */
  156. bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
  157. {
  158. int ret;
  159. /*
  160. * emulate_step() returns 1 if the insn was successfully emulated.
  161. * For all other cases, we need to single-step in hardware.
  162. */
  163. ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
  164. if (ret > 0)
  165. return true;
  166. return false;
  167. }
  168. unsigned long
  169. arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
  170. {
  171. unsigned long orig_ret_vaddr;
  172. orig_ret_vaddr = regs->link;
  173. /* Replace the return addr with trampoline addr */
  174. regs->link = trampoline_vaddr;
  175. return orig_ret_vaddr;
  176. }
  177. bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
  178. struct pt_regs *regs)
  179. {
  180. if (ctx == RP_CHECK_CHAIN_CALL)
  181. return regs->gpr[1] <= ret->stack;
  182. else
  183. return regs->gpr[1] < ret->stack;
  184. }