kprobes.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * arch/arm64/kernel/probes/kprobes.c
  4. *
  5. * Kprobes support for ARM64
  6. *
  7. * Copyright (C) 2013 Linaro Limited.
  8. * Author: Sandeepa Prabhu <[email protected]>
  9. */
  10. #define pr_fmt(fmt) "kprobes: " fmt
  11. #include <linux/extable.h>
  12. #include <linux/kasan.h>
  13. #include <linux/kernel.h>
  14. #include <linux/kprobes.h>
  15. #include <linux/sched/debug.h>
  16. #include <linux/set_memory.h>
  17. #include <linux/slab.h>
  18. #include <linux/stop_machine.h>
  19. #include <linux/stringify.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/vmalloc.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/daifflags.h>
  24. #include <asm/debug-monitors.h>
  25. #include <asm/insn.h>
  26. #include <asm/irq.h>
  27. #include <asm/patching.h>
  28. #include <asm/ptrace.h>
  29. #include <asm/sections.h>
  30. #include <asm/system_misc.h>
  31. #include <asm/traps.h>
  32. #include "decode-insn.h"
  33. #ifdef CONFIG_RKP
  34. #include <linux/rkp.h>
  35. #endif
  36. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  37. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  38. static void __kprobes
  39. post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
  40. static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
  41. {
  42. kprobe_opcode_t *addr = p->ainsn.api.insn;
  43. /*
  44. * Prepare insn slot, Mark Rutland points out it depends on a coupe of
  45. * subtleties:
  46. *
  47. * - That the I-cache maintenance for these instructions is complete
  48. * *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync()
  49. * ensures this, but just omits causing a Context-Synchronization-Event
  50. * on all CPUS).
  51. *
  52. * - That the kprobe BRK results in an exception (and consequently a
  53. * Context-Synchronoization-Event), which ensures that the CPU will
  54. * fetch thesingle-step slot instructions *after* this, ensuring that
  55. * the new instructions are used
  56. *
  57. * It supposes to place ISB after patching to guarantee I-cache maintenance
  58. * is observed on all CPUS, however, single-step slot is installed in
  59. * the BRK exception handler, so it is unnecessary to generate
  60. * Contex-Synchronization-Event via ISB again.
  61. */
  62. aarch64_insn_patch_text_nosync(addr, p->opcode);
  63. aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS);
  64. /*
  65. * Needs restoring of return address after stepping xol.
  66. */
  67. p->ainsn.api.restore = (unsigned long) p->addr +
  68. sizeof(kprobe_opcode_t);
  69. }
  70. static void __kprobes arch_prepare_simulate(struct kprobe *p)
  71. {
  72. /* This instructions is not executed xol. No need to adjust the PC */
  73. p->ainsn.api.restore = 0;
  74. }
  75. static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
  76. {
  77. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  78. if (p->ainsn.api.handler)
  79. p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
  80. /* single step simulated, now go for post processing */
  81. post_kprobe_handler(p, kcb, regs);
  82. }
  83. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  84. {
  85. unsigned long probe_addr = (unsigned long)p->addr;
  86. if (probe_addr & 0x3)
  87. return -EINVAL;
  88. /* copy instruction */
  89. p->opcode = le32_to_cpu(*p->addr);
  90. if (search_exception_tables(probe_addr))
  91. return -EINVAL;
  92. /* decode instruction */
  93. switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
  94. case INSN_REJECTED: /* insn not supported */
  95. return -EINVAL;
  96. case INSN_GOOD_NO_SLOT: /* insn need simulation */
  97. p->ainsn.api.insn = NULL;
  98. break;
  99. case INSN_GOOD: /* instruction uses slot */
  100. p->ainsn.api.insn = get_insn_slot();
  101. if (!p->ainsn.api.insn)
  102. return -ENOMEM;
  103. break;
  104. }
  105. /* prepare the instruction */
  106. if (p->ainsn.api.insn)
  107. arch_prepare_ss_slot(p);
  108. else
  109. arch_prepare_simulate(p);
  110. return 0;
  111. }
  112. void *alloc_insn_page(void)
  113. {
  114. void *p = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
  115. GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
  116. NUMA_NO_NODE, __builtin_return_address(0));
  117. #ifdef CONFIG_RKP
  118. uh_call(UH_APP_RKP, RKP_KPROBE_PAGE, (u64)p, 4096, 0, 0);
  119. #endif
  120. return p;
  121. }
  122. /* arm kprobe: install breakpoint in text */
  123. void __kprobes arch_arm_kprobe(struct kprobe *p)
  124. {
  125. void *addr = p->addr;
  126. u32 insn = BRK64_OPCODE_KPROBES;
  127. aarch64_insn_patch_text(&addr, &insn, 1);
  128. }
  129. /* disarm kprobe: remove breakpoint from text */
  130. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  131. {
  132. void *addr = p->addr;
  133. aarch64_insn_patch_text(&addr, &p->opcode, 1);
  134. }
  135. void __kprobes arch_remove_kprobe(struct kprobe *p)
  136. {
  137. if (p->ainsn.api.insn) {
  138. free_insn_slot(p->ainsn.api.insn, 0);
  139. p->ainsn.api.insn = NULL;
  140. }
  141. }
  142. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  143. {
  144. kcb->prev_kprobe.kp = kprobe_running();
  145. kcb->prev_kprobe.status = kcb->kprobe_status;
  146. }
  147. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  148. {
  149. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  150. kcb->kprobe_status = kcb->prev_kprobe.status;
  151. }
  152. static void __kprobes set_current_kprobe(struct kprobe *p)
  153. {
  154. __this_cpu_write(current_kprobe, p);
  155. }
  156. /*
  157. * Mask all of DAIF while executing the instruction out-of-line, to keep things
  158. * simple and avoid nesting exceptions. Interrupts do have to be disabled since
  159. * the kprobe state is per-CPU and doesn't get migrated.
  160. */
  161. static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
  162. struct pt_regs *regs)
  163. {
  164. kcb->saved_irqflag = regs->pstate & DAIF_MASK;
  165. regs->pstate |= DAIF_MASK;
  166. }
  167. static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
  168. struct pt_regs *regs)
  169. {
  170. regs->pstate &= ~DAIF_MASK;
  171. regs->pstate |= kcb->saved_irqflag;
  172. }
  173. static void __kprobes setup_singlestep(struct kprobe *p,
  174. struct pt_regs *regs,
  175. struct kprobe_ctlblk *kcb, int reenter)
  176. {
  177. unsigned long slot;
  178. if (reenter) {
  179. save_previous_kprobe(kcb);
  180. set_current_kprobe(p);
  181. kcb->kprobe_status = KPROBE_REENTER;
  182. } else {
  183. kcb->kprobe_status = KPROBE_HIT_SS;
  184. }
  185. if (p->ainsn.api.insn) {
  186. /* prepare for single stepping */
  187. slot = (unsigned long)p->ainsn.api.insn;
  188. kprobes_save_local_irqflag(kcb, regs);
  189. instruction_pointer_set(regs, slot);
  190. } else {
  191. /* insn simulation */
  192. arch_simulate_insn(p, regs);
  193. }
  194. }
  195. static int __kprobes reenter_kprobe(struct kprobe *p,
  196. struct pt_regs *regs,
  197. struct kprobe_ctlblk *kcb)
  198. {
  199. switch (kcb->kprobe_status) {
  200. case KPROBE_HIT_SSDONE:
  201. case KPROBE_HIT_ACTIVE:
  202. kprobes_inc_nmissed_count(p);
  203. setup_singlestep(p, regs, kcb, 1);
  204. break;
  205. case KPROBE_HIT_SS:
  206. case KPROBE_REENTER:
  207. pr_warn("Failed to recover from reentered kprobes.\n");
  208. dump_kprobe(p);
  209. BUG();
  210. break;
  211. default:
  212. WARN_ON(1);
  213. return 0;
  214. }
  215. return 1;
  216. }
  217. static void __kprobes
  218. post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
  219. {
  220. /* return addr restore if non-branching insn */
  221. if (cur->ainsn.api.restore != 0)
  222. instruction_pointer_set(regs, cur->ainsn.api.restore);
  223. /* restore back original saved kprobe variables and continue */
  224. if (kcb->kprobe_status == KPROBE_REENTER) {
  225. restore_previous_kprobe(kcb);
  226. return;
  227. }
  228. /* call post handler */
  229. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  230. if (cur->post_handler)
  231. cur->post_handler(cur, regs, 0);
  232. reset_current_kprobe();
  233. }
  234. int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
  235. {
  236. struct kprobe *cur = kprobe_running();
  237. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  238. switch (kcb->kprobe_status) {
  239. case KPROBE_HIT_SS:
  240. case KPROBE_REENTER:
  241. /*
  242. * We are here because the instruction being single
  243. * stepped caused a page fault. We reset the current
  244. * kprobe and the ip points back to the probe address
  245. * and allow the page fault handler to continue as a
  246. * normal page fault.
  247. */
  248. instruction_pointer_set(regs, (unsigned long) cur->addr);
  249. BUG_ON(!instruction_pointer(regs));
  250. if (kcb->kprobe_status == KPROBE_REENTER) {
  251. restore_previous_kprobe(kcb);
  252. } else {
  253. kprobes_restore_local_irqflag(kcb, regs);
  254. reset_current_kprobe();
  255. }
  256. break;
  257. case KPROBE_HIT_ACTIVE:
  258. case KPROBE_HIT_SSDONE:
  259. /*
  260. * In case the user-specified fault handler returned
  261. * zero, try to fix up.
  262. */
  263. if (fixup_exception(regs))
  264. return 1;
  265. }
  266. return 0;
  267. }
  268. static void __kprobes kprobe_handler(struct pt_regs *regs)
  269. {
  270. struct kprobe *p, *cur_kprobe;
  271. struct kprobe_ctlblk *kcb;
  272. unsigned long addr = instruction_pointer(regs);
  273. kcb = get_kprobe_ctlblk();
  274. cur_kprobe = kprobe_running();
  275. p = get_kprobe((kprobe_opcode_t *) addr);
  276. if (p) {
  277. if (cur_kprobe) {
  278. if (reenter_kprobe(p, regs, kcb))
  279. return;
  280. } else {
  281. /* Probe hit */
  282. set_current_kprobe(p);
  283. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  284. /*
  285. * If we have no pre-handler or it returned 0, we
  286. * continue with normal processing. If we have a
  287. * pre-handler and it returned non-zero, it will
  288. * modify the execution path and no need to single
  289. * stepping. Let's just reset current kprobe and exit.
  290. */
  291. if (!p->pre_handler || !p->pre_handler(p, regs)) {
  292. setup_singlestep(p, regs, kcb, 0);
  293. } else
  294. reset_current_kprobe();
  295. }
  296. }
  297. /*
  298. * The breakpoint instruction was removed right
  299. * after we hit it. Another cpu has removed
  300. * either a probepoint or a debugger breakpoint
  301. * at this address. In either case, no further
  302. * handling of this interrupt is appropriate.
  303. * Return back to original instruction, and continue.
  304. */
  305. }
  306. static int __kprobes
  307. kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
  308. {
  309. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  310. unsigned long addr = instruction_pointer(regs);
  311. struct kprobe *cur = kprobe_running();
  312. if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
  313. ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
  314. kprobes_restore_local_irqflag(kcb, regs);
  315. post_kprobe_handler(cur, kcb, regs);
  316. return DBG_HOOK_HANDLED;
  317. }
  318. /* not ours, kprobes should ignore it */
  319. return DBG_HOOK_ERROR;
  320. }
  321. static struct break_hook kprobes_break_ss_hook = {
  322. .imm = KPROBES_BRK_SS_IMM,
  323. .fn = kprobe_breakpoint_ss_handler,
  324. };
  325. static int __kprobes
  326. kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
  327. {
  328. kprobe_handler(regs);
  329. return DBG_HOOK_HANDLED;
  330. }
  331. static struct break_hook kprobes_break_hook = {
  332. .imm = KPROBES_BRK_IMM,
  333. .fn = kprobe_breakpoint_handler,
  334. };
  335. /*
  336. * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
  337. * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
  338. */
  339. int __init arch_populate_kprobe_blacklist(void)
  340. {
  341. int ret;
  342. ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
  343. (unsigned long)__entry_text_end);
  344. if (ret)
  345. return ret;
  346. ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
  347. (unsigned long)__irqentry_text_end);
  348. if (ret)
  349. return ret;
  350. ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
  351. (unsigned long)__idmap_text_end);
  352. if (ret)
  353. return ret;
  354. ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
  355. (unsigned long)__hyp_text_end);
  356. if (ret || is_kernel_in_hyp_mode())
  357. return ret;
  358. ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
  359. (unsigned long)__hyp_idmap_text_end);
  360. return ret;
  361. }
  362. void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
  363. {
  364. return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
  365. }
  366. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  367. struct pt_regs *regs)
  368. {
  369. ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
  370. ri->fp = (void *)regs->regs[29];
  371. /* replace return addr (x30) with trampoline */
  372. regs->regs[30] = (long)&__kretprobe_trampoline;
  373. }
  374. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  375. {
  376. return 0;
  377. }
  378. int __init arch_init_kprobes(void)
  379. {
  380. register_kernel_break_hook(&kprobes_break_hook);
  381. register_kernel_break_hook(&kprobes_break_ss_hook);
  382. return 0;
  383. }