uprobes.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/highmem.h>
  3. #include <linux/ptrace.h>
  4. #include <linux/uprobes.h>
  5. #include "decode-insn.h"
  6. #define UPROBE_TRAP_NR UINT_MAX
  7. bool is_swbp_insn(uprobe_opcode_t *insn)
  8. {
  9. #ifdef CONFIG_RISCV_ISA_C
  10. return (*insn & 0xffff) == UPROBE_SWBP_INSN;
  11. #else
  12. return *insn == UPROBE_SWBP_INSN;
  13. #endif
  14. }
  15. unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
  16. {
  17. return instruction_pointer(regs);
  18. }
  19. int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
  20. unsigned long addr)
  21. {
  22. probe_opcode_t opcode;
  23. opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
  24. auprobe->insn_size = GET_INSN_LENGTH(opcode);
  25. switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
  26. case INSN_REJECTED:
  27. return -EINVAL;
  28. case INSN_GOOD_NO_SLOT:
  29. auprobe->simulate = true;
  30. break;
  31. case INSN_GOOD:
  32. auprobe->simulate = false;
  33. break;
  34. default:
  35. return -EINVAL;
  36. }
  37. return 0;
  38. }
  39. int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  40. {
  41. struct uprobe_task *utask = current->utask;
  42. utask->autask.saved_cause = current->thread.bad_cause;
  43. current->thread.bad_cause = UPROBE_TRAP_NR;
  44. instruction_pointer_set(regs, utask->xol_vaddr);
  45. return 0;
  46. }
  47. int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  48. {
  49. struct uprobe_task *utask = current->utask;
  50. WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
  51. current->thread.bad_cause = utask->autask.saved_cause;
  52. instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
  53. return 0;
  54. }
  55. bool arch_uprobe_xol_was_trapped(struct task_struct *t)
  56. {
  57. if (t->thread.bad_cause != UPROBE_TRAP_NR)
  58. return true;
  59. return false;
  60. }
  61. bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
  62. {
  63. probe_opcode_t insn;
  64. unsigned long addr;
  65. if (!auprobe->simulate)
  66. return false;
  67. insn = *(probe_opcode_t *)(&auprobe->insn[0]);
  68. addr = instruction_pointer(regs);
  69. if (auprobe->api.handler)
  70. auprobe->api.handler(insn, addr, regs);
  71. return true;
  72. }
  73. void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  74. {
  75. struct uprobe_task *utask = current->utask;
  76. current->thread.bad_cause = utask->autask.saved_cause;
  77. /*
  78. * Task has received a fatal signal, so reset back to probbed
  79. * address.
  80. */
  81. instruction_pointer_set(regs, utask->vaddr);
  82. }
  83. bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
  84. struct pt_regs *regs)
  85. {
  86. if (ctx == RP_CHECK_CHAIN_CALL)
  87. return regs->sp <= ret->stack;
  88. else
  89. return regs->sp < ret->stack;
  90. }
  91. unsigned long
  92. arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
  93. struct pt_regs *regs)
  94. {
  95. unsigned long ra;
  96. ra = regs->ra;
  97. regs->ra = trampoline_vaddr;
  98. return ra;
  99. }
  100. int arch_uprobe_exception_notify(struct notifier_block *self,
  101. unsigned long val, void *data)
  102. {
  103. return NOTIFY_DONE;
  104. }
  105. bool uprobe_breakpoint_handler(struct pt_regs *regs)
  106. {
  107. if (uprobe_pre_sstep_notifier(regs))
  108. return true;
  109. return false;
  110. }
  111. bool uprobe_single_step_handler(struct pt_regs *regs)
  112. {
  113. if (uprobe_post_sstep_notifier(regs))
  114. return true;
  115. return false;
  116. }
  117. void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
  118. void *src, unsigned long len)
  119. {
  120. /* Initialize the slot */
  121. void *kaddr = kmap_atomic(page);
  122. void *dst = kaddr + (vaddr & ~PAGE_MASK);
  123. memcpy(dst, src, len);
  124. /* Add ebreak behind opcode to simulate singlestep */
  125. if (vaddr) {
  126. dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
  127. *(uprobe_opcode_t *)dst = __BUG_INSN_32;
  128. }
  129. kunmap_atomic(kaddr);
  130. /*
  131. * We probably need flush_icache_user_page() but it needs vma.
  132. * This should work on most of architectures by default. If
  133. * architecture needs to do something different it can define
  134. * its own version of the function.
  135. */
  136. flush_dcache_page(page);
  137. }