opt.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Kernel Probes Jump Optimization (Optprobes)
  4. *
  5. * Copyright (C) IBM Corporation, 2002, 2004
  6. * Copyright (C) Hitachi Ltd., 2012
  7. */
  8. #include <linux/kprobes.h>
  9. #include <linux/perf_event.h>
  10. #include <linux/ptrace.h>
  11. #include <linux/string.h>
  12. #include <linux/slab.h>
  13. #include <linux/hardirq.h>
  14. #include <linux/preempt.h>
  15. #include <linux/extable.h>
  16. #include <linux/kdebug.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/kgdb.h>
  19. #include <linux/ftrace.h>
  20. #include <linux/objtool.h>
  21. #include <linux/pgtable.h>
  22. #include <linux/static_call.h>
  23. #include <asm/text-patching.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/desc.h>
  26. #include <linux/uaccess.h>
  27. #include <asm/alternative.h>
  28. #include <asm/insn.h>
  29. #include <asm/debugreg.h>
  30. #include <asm/set_memory.h>
  31. #include <asm/sections.h>
  32. #include <asm/nospec-branch.h>
  33. #include "common.h"
  34. unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
  35. {
  36. struct optimized_kprobe *op;
  37. struct kprobe *kp;
  38. long offs;
  39. int i;
  40. for (i = 0; i < JMP32_INSN_SIZE; i++) {
  41. kp = get_kprobe((void *)addr - i);
  42. /* This function only handles jump-optimized kprobe */
  43. if (kp && kprobe_optimized(kp)) {
  44. op = container_of(kp, struct optimized_kprobe, kp);
  45. /* If op is optimized or under unoptimizing */
  46. if (list_empty(&op->list) || optprobe_queued_unopt(op))
  47. goto found;
  48. }
  49. }
  50. return addr;
  51. found:
  52. /*
  53. * If the kprobe can be optimized, original bytes which can be
  54. * overwritten by jump destination address. In this case, original
  55. * bytes must be recovered from op->optinsn.copied_insn buffer.
  56. */
  57. if (copy_from_kernel_nofault(buf, (void *)addr,
  58. MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  59. return 0UL;
  60. if (addr == (unsigned long)kp->addr) {
  61. buf[0] = kp->opcode;
  62. memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
  63. } else {
  64. offs = addr - (unsigned long)kp->addr - 1;
  65. memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
  66. }
  67. return (unsigned long)buf;
  68. }
  69. static void synthesize_clac(kprobe_opcode_t *addr)
  70. {
  71. /*
  72. * Can't be static_cpu_has() due to how objtool treats this feature bit.
  73. * This isn't a fast path anyway.
  74. */
  75. if (!boot_cpu_has(X86_FEATURE_SMAP))
  76. return;
  77. /* Replace the NOP3 with CLAC */
  78. addr[0] = 0x0f;
  79. addr[1] = 0x01;
  80. addr[2] = 0xca;
  81. }
  82. /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
  83. static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
  84. {
  85. #ifdef CONFIG_X86_64
  86. *addr++ = 0x48;
  87. *addr++ = 0xbf;
  88. #else
  89. *addr++ = 0xb8;
  90. #endif
  91. *(unsigned long *)addr = val;
  92. }
  93. asm (
  94. ".pushsection .rodata\n"
  95. "optprobe_template_func:\n"
  96. ".global optprobe_template_entry\n"
  97. "optprobe_template_entry:\n"
  98. #ifdef CONFIG_X86_64
  99. " pushq $" __stringify(__KERNEL_DS) "\n"
  100. /* Save the 'sp - 8', this will be fixed later. */
  101. " pushq %rsp\n"
  102. " pushfq\n"
  103. ".global optprobe_template_clac\n"
  104. "optprobe_template_clac:\n"
  105. ASM_NOP3
  106. SAVE_REGS_STRING
  107. " movq %rsp, %rsi\n"
  108. ".global optprobe_template_val\n"
  109. "optprobe_template_val:\n"
  110. ASM_NOP5
  111. ASM_NOP5
  112. ".global optprobe_template_call\n"
  113. "optprobe_template_call:\n"
  114. ASM_NOP5
  115. /* Copy 'regs->flags' into 'regs->ss'. */
  116. " movq 18*8(%rsp), %rdx\n"
  117. " movq %rdx, 20*8(%rsp)\n"
  118. RESTORE_REGS_STRING
  119. /* Skip 'regs->flags' and 'regs->sp'. */
  120. " addq $16, %rsp\n"
  121. /* And pop flags register from 'regs->ss'. */
  122. " popfq\n"
  123. #else /* CONFIG_X86_32 */
  124. " pushl %ss\n"
  125. /* Save the 'sp - 4', this will be fixed later. */
  126. " pushl %esp\n"
  127. " pushfl\n"
  128. ".global optprobe_template_clac\n"
  129. "optprobe_template_clac:\n"
  130. ASM_NOP3
  131. SAVE_REGS_STRING
  132. " movl %esp, %edx\n"
  133. ".global optprobe_template_val\n"
  134. "optprobe_template_val:\n"
  135. ASM_NOP5
  136. ".global optprobe_template_call\n"
  137. "optprobe_template_call:\n"
  138. ASM_NOP5
  139. /* Copy 'regs->flags' into 'regs->ss'. */
  140. " movl 14*4(%esp), %edx\n"
  141. " movl %edx, 16*4(%esp)\n"
  142. RESTORE_REGS_STRING
  143. /* Skip 'regs->flags' and 'regs->sp'. */
  144. " addl $8, %esp\n"
  145. /* And pop flags register from 'regs->ss'. */
  146. " popfl\n"
  147. #endif
  148. ".global optprobe_template_end\n"
  149. "optprobe_template_end:\n"
  150. ".popsection\n");
  151. void optprobe_template_func(void);
  152. STACK_FRAME_NON_STANDARD(optprobe_template_func);
  153. #define TMPL_CLAC_IDX \
  154. ((long)optprobe_template_clac - (long)optprobe_template_entry)
  155. #define TMPL_MOVE_IDX \
  156. ((long)optprobe_template_val - (long)optprobe_template_entry)
  157. #define TMPL_CALL_IDX \
  158. ((long)optprobe_template_call - (long)optprobe_template_entry)
  159. #define TMPL_END_IDX \
  160. ((long)optprobe_template_end - (long)optprobe_template_entry)
  161. /* Optimized kprobe call back function: called from optinsn */
  162. static void
  163. optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
  164. {
  165. /* This is possible if op is under delayed unoptimizing */
  166. if (kprobe_disabled(&op->kp))
  167. return;
  168. preempt_disable();
  169. if (kprobe_running()) {
  170. kprobes_inc_nmissed_count(&op->kp);
  171. } else {
  172. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  173. /* Adjust stack pointer */
  174. regs->sp += sizeof(long);
  175. /* Save skipped registers */
  176. regs->cs = __KERNEL_CS;
  177. #ifdef CONFIG_X86_32
  178. regs->gs = 0;
  179. #endif
  180. regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
  181. regs->orig_ax = ~0UL;
  182. __this_cpu_write(current_kprobe, &op->kp);
  183. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  184. opt_pre_handler(&op->kp, regs);
  185. __this_cpu_write(current_kprobe, NULL);
  186. }
  187. preempt_enable();
  188. }
  189. NOKPROBE_SYMBOL(optimized_callback);
  190. static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
  191. {
  192. struct insn insn;
  193. int len = 0, ret;
  194. while (len < JMP32_INSN_SIZE) {
  195. ret = __copy_instruction(dest + len, src + len, real + len, &insn);
  196. if (!ret || !can_boost(&insn, src + len))
  197. return -EINVAL;
  198. len += ret;
  199. }
  200. /* Check whether the address range is reserved */
  201. if (ftrace_text_reserved(src, src + len - 1) ||
  202. alternatives_text_reserved(src, src + len - 1) ||
  203. jump_label_text_reserved(src, src + len - 1) ||
  204. static_call_text_reserved(src, src + len - 1))
  205. return -EBUSY;
  206. return len;
  207. }
  208. /* Check whether insn is indirect jump */
  209. static int __insn_is_indirect_jump(struct insn *insn)
  210. {
  211. return ((insn->opcode.bytes[0] == 0xff &&
  212. (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
  213. insn->opcode.bytes[0] == 0xea); /* Segment based jump */
  214. }
  215. /* Check whether insn jumps into specified address range */
  216. static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
  217. {
  218. unsigned long target = 0;
  219. switch (insn->opcode.bytes[0]) {
  220. case 0xe0: /* loopne */
  221. case 0xe1: /* loope */
  222. case 0xe2: /* loop */
  223. case 0xe3: /* jcxz */
  224. case 0xe9: /* near relative jump */
  225. case 0xeb: /* short relative jump */
  226. break;
  227. case 0x0f:
  228. if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
  229. break;
  230. return 0;
  231. default:
  232. if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
  233. break;
  234. return 0;
  235. }
  236. target = (unsigned long)insn->next_byte + insn->immediate.value;
  237. return (start <= target && target <= start + len);
  238. }
  239. static int insn_is_indirect_jump(struct insn *insn)
  240. {
  241. int ret = __insn_is_indirect_jump(insn);
  242. #ifdef CONFIG_RETPOLINE
  243. /*
  244. * Jump to x86_indirect_thunk_* is treated as an indirect jump.
  245. * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
  246. * older gcc may use indirect jump. So we add this check instead of
  247. * replace indirect-jump check.
  248. */
  249. if (!ret)
  250. ret = insn_jump_into_range(insn,
  251. (unsigned long)__indirect_thunk_start,
  252. (unsigned long)__indirect_thunk_end -
  253. (unsigned long)__indirect_thunk_start);
  254. #endif
  255. return ret;
  256. }
  257. /* Decode whole function to ensure any instructions don't jump into target */
  258. static int can_optimize(unsigned long paddr)
  259. {
  260. unsigned long addr, size = 0, offset = 0;
  261. struct insn insn;
  262. kprobe_opcode_t buf[MAX_INSN_SIZE];
  263. /* Lookup symbol including addr */
  264. if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
  265. return 0;
  266. /*
  267. * Do not optimize in the entry code due to the unstable
  268. * stack handling and registers setup.
  269. */
  270. if (((paddr >= (unsigned long)__entry_text_start) &&
  271. (paddr < (unsigned long)__entry_text_end)))
  272. return 0;
  273. /* Check there is enough space for a relative jump. */
  274. if (size - offset < JMP32_INSN_SIZE)
  275. return 0;
  276. /* Decode instructions */
  277. addr = paddr - offset;
  278. while (addr < paddr - offset + size) { /* Decode until function end */
  279. unsigned long recovered_insn;
  280. int ret;
  281. if (search_exception_tables(addr))
  282. /*
  283. * Since some fixup code will jumps into this function,
  284. * we can't optimize kprobe in this function.
  285. */
  286. return 0;
  287. recovered_insn = recover_probed_instruction(buf, addr);
  288. if (!recovered_insn)
  289. return 0;
  290. ret = insn_decode_kernel(&insn, (void *)recovered_insn);
  291. if (ret < 0)
  292. return 0;
  293. #ifdef CONFIG_KGDB
  294. /*
  295. * If there is a dynamically installed kgdb sw breakpoint,
  296. * this function should not be probed.
  297. */
  298. if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
  299. kgdb_has_hit_break(addr))
  300. return 0;
  301. #endif
  302. /* Recover address */
  303. insn.kaddr = (void *)addr;
  304. insn.next_byte = (void *)(addr + insn.length);
  305. /* Check any instructions don't jump into target */
  306. if (insn_is_indirect_jump(&insn) ||
  307. insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
  308. DISP32_SIZE))
  309. return 0;
  310. addr += insn.length;
  311. }
  312. return 1;
  313. }
  314. /* Check optimized_kprobe can actually be optimized. */
  315. int arch_check_optimized_kprobe(struct optimized_kprobe *op)
  316. {
  317. int i;
  318. struct kprobe *p;
  319. for (i = 1; i < op->optinsn.size; i++) {
  320. p = get_kprobe(op->kp.addr + i);
  321. if (p && !kprobe_disarmed(p))
  322. return -EEXIST;
  323. }
  324. return 0;
  325. }
  326. /* Check the addr is within the optimized instructions. */
  327. int arch_within_optimized_kprobe(struct optimized_kprobe *op,
  328. kprobe_opcode_t *addr)
  329. {
  330. return (op->kp.addr <= addr &&
  331. op->kp.addr + op->optinsn.size > addr);
  332. }
  333. /* Free optimized instruction slot */
  334. static
  335. void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
  336. {
  337. u8 *slot = op->optinsn.insn;
  338. if (slot) {
  339. int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
  340. /* Record the perf event before freeing the slot */
  341. if (dirty)
  342. perf_event_text_poke(slot, slot, len, NULL, 0);
  343. free_optinsn_slot(slot, dirty);
  344. op->optinsn.insn = NULL;
  345. op->optinsn.size = 0;
  346. }
  347. }
  348. void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
  349. {
  350. __arch_remove_optimized_kprobe(op, 1);
  351. }
  352. /*
  353. * Copy replacing target instructions
  354. * Target instructions MUST be relocatable (checked inside)
  355. * This is called when new aggr(opt)probe is allocated or reused.
  356. */
  357. int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
  358. struct kprobe *__unused)
  359. {
  360. u8 *buf = NULL, *slot;
  361. int ret, len;
  362. long rel;
  363. if (!can_optimize((unsigned long)op->kp.addr))
  364. return -EILSEQ;
  365. buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
  366. if (!buf)
  367. return -ENOMEM;
  368. op->optinsn.insn = slot = get_optinsn_slot();
  369. if (!slot) {
  370. ret = -ENOMEM;
  371. goto out;
  372. }
  373. /*
  374. * Verify if the address gap is in 2GB range, because this uses
  375. * a relative jump.
  376. */
  377. rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
  378. if (abs(rel) > 0x7fffffff) {
  379. ret = -ERANGE;
  380. goto err;
  381. }
  382. /* Copy arch-dep-instance from template */
  383. memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
  384. /* Copy instructions into the out-of-line buffer */
  385. ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
  386. slot + TMPL_END_IDX);
  387. if (ret < 0)
  388. goto err;
  389. op->optinsn.size = ret;
  390. len = TMPL_END_IDX + op->optinsn.size;
  391. synthesize_clac(buf + TMPL_CLAC_IDX);
  392. /* Set probe information */
  393. synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
  394. /* Set probe function call */
  395. synthesize_relcall(buf + TMPL_CALL_IDX,
  396. slot + TMPL_CALL_IDX, optimized_callback);
  397. /* Set returning jmp instruction at the tail of out-of-line buffer */
  398. synthesize_reljump(buf + len, slot + len,
  399. (u8 *)op->kp.addr + op->optinsn.size);
  400. len += JMP32_INSN_SIZE;
  401. /*
  402. * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
  403. * used in __arch_remove_optimized_kprobe().
  404. */
  405. /* We have to use text_poke() for instruction buffer because it is RO */
  406. perf_event_text_poke(slot, NULL, 0, buf, len);
  407. text_poke(slot, buf, len);
  408. ret = 0;
  409. out:
  410. kfree(buf);
  411. return ret;
  412. err:
  413. __arch_remove_optimized_kprobe(op, 0);
  414. goto out;
  415. }
  416. /*
  417. * Replace breakpoints (INT3) with relative jumps (JMP.d32).
  418. * Caller must call with locking kprobe_mutex and text_mutex.
  419. *
  420. * The caller will have installed a regular kprobe and after that issued
  421. * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
  422. * the 4 bytes after the INT3 are unused and can now be overwritten.
  423. */
  424. void arch_optimize_kprobes(struct list_head *oplist)
  425. {
  426. struct optimized_kprobe *op, *tmp;
  427. u8 insn_buff[JMP32_INSN_SIZE];
  428. list_for_each_entry_safe(op, tmp, oplist, list) {
  429. s32 rel = (s32)((long)op->optinsn.insn -
  430. ((long)op->kp.addr + JMP32_INSN_SIZE));
  431. WARN_ON(kprobe_disabled(&op->kp));
  432. /* Backup instructions which will be replaced by jump address */
  433. memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
  434. DISP32_SIZE);
  435. insn_buff[0] = JMP32_INSN_OPCODE;
  436. *(s32 *)(&insn_buff[1]) = rel;
  437. text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
  438. list_del_init(&op->list);
  439. }
  440. }
  441. /*
  442. * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
  443. *
  444. * After that, we can restore the 4 bytes after the INT3 to undo what
  445. * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
  446. * unused once the INT3 lands.
  447. */
  448. void arch_unoptimize_kprobe(struct optimized_kprobe *op)
  449. {
  450. u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
  451. u8 old[JMP32_INSN_SIZE];
  452. u8 *addr = op->kp.addr;
  453. memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
  454. memcpy(new + INT3_INSN_SIZE,
  455. op->optinsn.copied_insn,
  456. JMP32_INSN_SIZE - INT3_INSN_SIZE);
  457. text_poke(addr, new, INT3_INSN_SIZE);
  458. text_poke_sync();
  459. text_poke(addr + INT3_INSN_SIZE,
  460. new + INT3_INSN_SIZE,
  461. JMP32_INSN_SIZE - INT3_INSN_SIZE);
  462. text_poke_sync();
  463. perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
  464. }
  465. /*
  466. * Recover original instructions and breakpoints from relative jumps.
  467. * Caller must call with locking kprobe_mutex.
  468. */
  469. extern void arch_unoptimize_kprobes(struct list_head *oplist,
  470. struct list_head *done_list)
  471. {
  472. struct optimized_kprobe *op, *tmp;
  473. list_for_each_entry_safe(op, tmp, oplist, list) {
  474. arch_unoptimize_kprobe(op);
  475. list_move(&op->list, done_list);
  476. }
  477. }
  478. int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
  479. {
  480. struct optimized_kprobe *op;
  481. if (p->flags & KPROBE_FLAG_OPTIMIZED) {
  482. /* This kprobe is really able to run optimized path. */
  483. op = container_of(p, struct optimized_kprobe, kp);
  484. /* Detour through copied instructions */
  485. regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
  486. if (!reenter)
  487. reset_current_kprobe();
  488. return 1;
  489. }
  490. return 0;
  491. }
  492. NOKPROBE_SYMBOL(setup_detour_execution);