kprobes.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #define pr_fmt(fmt) "kprobes: " fmt
  3. #include <linux/kprobes.h>
  4. #include <linux/extable.h>
  5. #include <linux/slab.h>
  6. #include <linux/stop_machine.h>
  7. #include <asm/ptrace.h>
  8. #include <linux/uaccess.h>
  9. #include <asm/sections.h>
  10. #include <asm/cacheflush.h>
  11. #include "decode-insn.h"
  12. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  13. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  14. static void __kprobes
  15. post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
  16. struct csky_insn_patch {
  17. kprobe_opcode_t *addr;
  18. u32 opcode;
  19. atomic_t cpu_count;
  20. };
  21. static int __kprobes patch_text_cb(void *priv)
  22. {
  23. struct csky_insn_patch *param = priv;
  24. unsigned int addr = (unsigned int)param->addr;
  25. if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
  26. *(u16 *) addr = cpu_to_le16(param->opcode);
  27. dcache_wb_range(addr, addr + 2);
  28. atomic_inc(&param->cpu_count);
  29. } else {
  30. while (atomic_read(&param->cpu_count) <= num_online_cpus())
  31. cpu_relax();
  32. }
  33. icache_inv_range(addr, addr + 2);
  34. return 0;
  35. }
  36. static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
  37. {
  38. struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
  39. return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
  40. }
  41. static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
  42. {
  43. unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
  44. p->ainsn.api.restore = (unsigned long)p->addr + offset;
  45. patch_text(p->ainsn.api.insn, p->opcode);
  46. }
  47. static void __kprobes arch_prepare_simulate(struct kprobe *p)
  48. {
  49. p->ainsn.api.restore = 0;
  50. }
  51. static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
  52. {
  53. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  54. if (p->ainsn.api.handler)
  55. p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
  56. post_kprobe_handler(kcb, regs);
  57. }
  58. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  59. {
  60. unsigned long probe_addr = (unsigned long)p->addr;
  61. if (probe_addr & 0x1)
  62. return -EILSEQ;
  63. /* copy instruction */
  64. p->opcode = le32_to_cpu(*p->addr);
  65. /* decode instruction */
  66. switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
  67. case INSN_REJECTED: /* insn not supported */
  68. return -EINVAL;
  69. case INSN_GOOD_NO_SLOT: /* insn need simulation */
  70. p->ainsn.api.insn = NULL;
  71. break;
  72. case INSN_GOOD: /* instruction uses slot */
  73. p->ainsn.api.insn = get_insn_slot();
  74. if (!p->ainsn.api.insn)
  75. return -ENOMEM;
  76. break;
  77. }
  78. /* prepare the instruction */
  79. if (p->ainsn.api.insn)
  80. arch_prepare_ss_slot(p);
  81. else
  82. arch_prepare_simulate(p);
  83. return 0;
  84. }
  85. /* install breakpoint in text */
  86. void __kprobes arch_arm_kprobe(struct kprobe *p)
  87. {
  88. patch_text(p->addr, USR_BKPT);
  89. }
  90. /* remove breakpoint from text */
  91. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  92. {
  93. patch_text(p->addr, p->opcode);
  94. }
  95. void __kprobes arch_remove_kprobe(struct kprobe *p)
  96. {
  97. if (p->ainsn.api.insn) {
  98. free_insn_slot(p->ainsn.api.insn, 0);
  99. p->ainsn.api.insn = NULL;
  100. }
  101. }
  102. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  103. {
  104. kcb->prev_kprobe.kp = kprobe_running();
  105. kcb->prev_kprobe.status = kcb->kprobe_status;
  106. }
  107. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  108. {
  109. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  110. kcb->kprobe_status = kcb->prev_kprobe.status;
  111. }
  112. static void __kprobes set_current_kprobe(struct kprobe *p)
  113. {
  114. __this_cpu_write(current_kprobe, p);
  115. }
  116. /*
  117. * Interrupts need to be disabled before single-step mode is set, and not
  118. * reenabled until after single-step mode ends.
  119. * Without disabling interrupt on local CPU, there is a chance of
  120. * interrupt occurrence in the period of exception return and start of
  121. * out-of-line single-step, that result in wrongly single stepping
  122. * into the interrupt handler.
  123. */
  124. static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
  125. struct pt_regs *regs)
  126. {
  127. kcb->saved_sr = regs->sr;
  128. regs->sr &= ~BIT(6);
  129. }
  130. static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
  131. struct pt_regs *regs)
  132. {
  133. regs->sr = kcb->saved_sr;
  134. }
  135. static void __kprobes
  136. set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
  137. {
  138. unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
  139. kcb->ss_ctx.ss_pending = true;
  140. kcb->ss_ctx.match_addr = addr + offset;
  141. }
  142. static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
  143. {
  144. kcb->ss_ctx.ss_pending = false;
  145. kcb->ss_ctx.match_addr = 0;
  146. }
  147. #define TRACE_MODE_SI BIT(14)
  148. #define TRACE_MODE_MASK ~(0x3 << 14)
  149. #define TRACE_MODE_RUN 0
  150. static void __kprobes setup_singlestep(struct kprobe *p,
  151. struct pt_regs *regs,
  152. struct kprobe_ctlblk *kcb, int reenter)
  153. {
  154. unsigned long slot;
  155. if (reenter) {
  156. save_previous_kprobe(kcb);
  157. set_current_kprobe(p);
  158. kcb->kprobe_status = KPROBE_REENTER;
  159. } else {
  160. kcb->kprobe_status = KPROBE_HIT_SS;
  161. }
  162. if (p->ainsn.api.insn) {
  163. /* prepare for single stepping */
  164. slot = (unsigned long)p->ainsn.api.insn;
  165. set_ss_context(kcb, slot, p); /* mark pending ss */
  166. /* IRQs and single stepping do not mix well. */
  167. kprobes_save_local_irqflag(kcb, regs);
  168. regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
  169. instruction_pointer_set(regs, slot);
  170. } else {
  171. /* insn simulation */
  172. arch_simulate_insn(p, regs);
  173. }
  174. }
  175. static int __kprobes reenter_kprobe(struct kprobe *p,
  176. struct pt_regs *regs,
  177. struct kprobe_ctlblk *kcb)
  178. {
  179. switch (kcb->kprobe_status) {
  180. case KPROBE_HIT_SSDONE:
  181. case KPROBE_HIT_ACTIVE:
  182. kprobes_inc_nmissed_count(p);
  183. setup_singlestep(p, regs, kcb, 1);
  184. break;
  185. case KPROBE_HIT_SS:
  186. case KPROBE_REENTER:
  187. pr_warn("Failed to recover from reentered kprobes.\n");
  188. dump_kprobe(p);
  189. BUG();
  190. break;
  191. default:
  192. WARN_ON(1);
  193. return 0;
  194. }
  195. return 1;
  196. }
  197. static void __kprobes
  198. post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
  199. {
  200. struct kprobe *cur = kprobe_running();
  201. if (!cur)
  202. return;
  203. /* return addr restore if non-branching insn */
  204. if (cur->ainsn.api.restore != 0)
  205. regs->pc = cur->ainsn.api.restore;
  206. /* restore back original saved kprobe variables and continue */
  207. if (kcb->kprobe_status == KPROBE_REENTER) {
  208. restore_previous_kprobe(kcb);
  209. return;
  210. }
  211. /* call post handler */
  212. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  213. if (cur->post_handler) {
  214. /* post_handler can hit breakpoint and single step
  215. * again, so we enable D-flag for recursive exception.
  216. */
  217. cur->post_handler(cur, regs, 0);
  218. }
  219. reset_current_kprobe();
  220. }
  221. int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
  222. {
  223. struct kprobe *cur = kprobe_running();
  224. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  225. switch (kcb->kprobe_status) {
  226. case KPROBE_HIT_SS:
  227. case KPROBE_REENTER:
  228. /*
  229. * We are here because the instruction being single
  230. * stepped caused a page fault. We reset the current
  231. * kprobe and the ip points back to the probe address
  232. * and allow the page fault handler to continue as a
  233. * normal page fault.
  234. */
  235. regs->pc = (unsigned long) cur->addr;
  236. BUG_ON(!instruction_pointer(regs));
  237. if (kcb->kprobe_status == KPROBE_REENTER)
  238. restore_previous_kprobe(kcb);
  239. else
  240. reset_current_kprobe();
  241. break;
  242. case KPROBE_HIT_ACTIVE:
  243. case KPROBE_HIT_SSDONE:
  244. /*
  245. * In case the user-specified fault handler returned
  246. * zero, try to fix up.
  247. */
  248. if (fixup_exception(regs))
  249. return 1;
  250. }
  251. return 0;
  252. }
  253. int __kprobes
  254. kprobe_breakpoint_handler(struct pt_regs *regs)
  255. {
  256. struct kprobe *p, *cur_kprobe;
  257. struct kprobe_ctlblk *kcb;
  258. unsigned long addr = instruction_pointer(regs);
  259. kcb = get_kprobe_ctlblk();
  260. cur_kprobe = kprobe_running();
  261. p = get_kprobe((kprobe_opcode_t *) addr);
  262. if (p) {
  263. if (cur_kprobe) {
  264. if (reenter_kprobe(p, regs, kcb))
  265. return 1;
  266. } else {
  267. /* Probe hit */
  268. set_current_kprobe(p);
  269. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  270. /*
  271. * If we have no pre-handler or it returned 0, we
  272. * continue with normal processing. If we have a
  273. * pre-handler and it returned non-zero, it will
  274. * modify the execution path and no need to single
  275. * stepping. Let's just reset current kprobe and exit.
  276. *
  277. * pre_handler can hit a breakpoint and can step thru
  278. * before return.
  279. */
  280. if (!p->pre_handler || !p->pre_handler(p, regs))
  281. setup_singlestep(p, regs, kcb, 0);
  282. else
  283. reset_current_kprobe();
  284. }
  285. return 1;
  286. }
  287. /*
  288. * The breakpoint instruction was removed right
  289. * after we hit it. Another cpu has removed
  290. * either a probepoint or a debugger breakpoint
  291. * at this address. In either case, no further
  292. * handling of this interrupt is appropriate.
  293. * Return back to original instruction, and continue.
  294. */
  295. return 0;
  296. }
  297. int __kprobes
  298. kprobe_single_step_handler(struct pt_regs *regs)
  299. {
  300. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  301. if ((kcb->ss_ctx.ss_pending)
  302. && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
  303. clear_ss_context(kcb); /* clear pending ss */
  304. kprobes_restore_local_irqflag(kcb, regs);
  305. regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
  306. post_kprobe_handler(kcb, regs);
  307. return 1;
  308. }
  309. return 0;
  310. }
  311. /*
  312. * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
  313. * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
  314. */
  315. int __init arch_populate_kprobe_blacklist(void)
  316. {
  317. int ret;
  318. ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
  319. (unsigned long)__irqentry_text_end);
  320. return ret;
  321. }
  322. void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
  323. {
  324. return (void *)kretprobe_trampoline_handler(regs, NULL);
  325. }
  326. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  327. struct pt_regs *regs)
  328. {
  329. ri->ret_addr = (kprobe_opcode_t *)regs->lr;
  330. ri->fp = NULL;
  331. regs->lr = (unsigned long) &__kretprobe_trampoline;
  332. }
  333. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  334. {
  335. return 0;
  336. }
  337. int __init arch_init_kprobes(void)
  338. {
  339. return 0;
  340. }