ftrace.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Code for replacing ftrace calls with jumps.
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
  6. *
  7. * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  8. *
  9. * Added function graph tracer code, taken from x86 that was written
  10. * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
  11. *
  12. */
  13. #define pr_fmt(fmt) "ftrace-powerpc: " fmt
  14. #include <linux/spinlock.h>
  15. #include <linux/hardirq.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/module.h>
  18. #include <linux/ftrace.h>
  19. #include <linux/percpu.h>
  20. #include <linux/init.h>
  21. #include <linux/list.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/code-patching.h>
  24. #include <asm/ftrace.h>
  25. #include <asm/syscall.h>
  26. #include <asm/inst.h>
  27. /*
  28. * We generally only have a single long_branch tramp and at most 2 or 3 plt
  29. * tramps generated. But, we don't use the plt tramps currently. We also allot
  30. * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
  31. * tramps in total. Set aside 8 just to be sure.
  32. */
  33. #define NUM_FTRACE_TRAMPS 8
  34. static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
  35. static ppc_inst_t
  36. ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
  37. {
  38. ppc_inst_t op;
  39. addr = ppc_function_entry((void *)addr);
  40. /* if (link) set op to 'bl' else 'b' */
  41. create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
  42. return op;
  43. }
  44. static inline int
  45. ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
  46. {
  47. ppc_inst_t replaced;
  48. /*
  49. * Note:
  50. * We are paranoid about modifying text, as if a bug was to happen, it
  51. * could cause us to read or write to someplace that could cause harm.
  52. * Carefully read and modify the code with probe_kernel_*(), and make
  53. * sure what we read is what we expected it to be before modifying it.
  54. */
  55. /* read the text we want to modify */
  56. if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
  57. return -EFAULT;
  58. /* Make sure it is what we expect it to be */
  59. if (!ppc_inst_equal(replaced, old)) {
  60. pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
  61. ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
  62. return -EINVAL;
  63. }
  64. /* replace the text with the new text */
  65. return patch_instruction((u32 *)ip, new);
  66. }
  67. /*
  68. * Helper functions that are the same for both PPC64 and PPC32.
  69. */
  70. static int test_24bit_addr(unsigned long ip, unsigned long addr)
  71. {
  72. addr = ppc_function_entry((void *)addr);
  73. return is_offset_in_branch_range(addr - ip);
  74. }
  75. static int is_bl_op(ppc_inst_t op)
  76. {
  77. return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
  78. }
  79. static int is_b_op(ppc_inst_t op)
  80. {
  81. return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
  82. }
  83. static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
  84. {
  85. int offset;
  86. offset = PPC_LI(ppc_inst_val(op));
  87. /* make it signed */
  88. if (offset & 0x02000000)
  89. offset |= 0xfe000000;
  90. return ip + (long)offset;
  91. }
  92. #ifdef CONFIG_MODULES
  93. static int
  94. __ftrace_make_nop(struct module *mod,
  95. struct dyn_ftrace *rec, unsigned long addr)
  96. {
  97. unsigned long entry, ptr, tramp;
  98. unsigned long ip = rec->ip;
  99. ppc_inst_t op, pop;
  100. /* read where this goes */
  101. if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
  102. pr_err("Fetching opcode failed.\n");
  103. return -EFAULT;
  104. }
  105. /* Make sure that this is still a 24bit jump */
  106. if (!is_bl_op(op)) {
  107. pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
  108. return -EINVAL;
  109. }
  110. /* lets find where the pointer goes */
  111. tramp = find_bl_target(ip, op);
  112. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  113. if (module_trampoline_target(mod, tramp, &ptr)) {
  114. pr_err("Failed to get trampoline target\n");
  115. return -EFAULT;
  116. }
  117. pr_devel("trampoline target %lx", ptr);
  118. entry = ppc_global_function_entry((void *)addr);
  119. /* This should match what was called */
  120. if (ptr != entry) {
  121. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  122. return -EINVAL;
  123. }
  124. if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
  125. if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
  126. pr_err("Fetching instruction at %lx failed.\n", ip - 4);
  127. return -EFAULT;
  128. }
  129. /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
  130. if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
  131. !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
  132. pr_err("Unexpected instruction %08lx around bl _mcount\n",
  133. ppc_inst_as_ulong(op));
  134. return -EINVAL;
  135. }
  136. } else if (IS_ENABLED(CONFIG_PPC64)) {
  137. /*
  138. * Check what is in the next instruction. We can see ld r2,40(r1), but
  139. * on first pass after boot we will see mflr r0.
  140. */
  141. if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
  142. pr_err("Fetching op failed.\n");
  143. return -EFAULT;
  144. }
  145. if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
  146. pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
  147. ppc_inst_as_ulong(op));
  148. return -EINVAL;
  149. }
  150. }
  151. /*
  152. * When using -mprofile-kernel or PPC32 there is no load to jump over.
  153. *
  154. * Otherwise our original call site looks like:
  155. *
  156. * bl <tramp>
  157. * ld r2,XX(r1)
  158. *
  159. * Milton Miller pointed out that we can not simply nop the branch.
  160. * If a task was preempted when calling a trace function, the nops
  161. * will remove the way to restore the TOC in r2 and the r2 TOC will
  162. * get corrupted.
  163. *
  164. * Use a b +8 to jump over the load.
  165. */
  166. if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
  167. pop = ppc_inst(PPC_RAW_NOP());
  168. else
  169. pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */
  170. if (patch_instruction((u32 *)ip, pop)) {
  171. pr_err("Patching NOP failed.\n");
  172. return -EPERM;
  173. }
  174. return 0;
  175. }
  176. #else
  177. static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
  178. {
  179. return 0;
  180. }
  181. #endif /* CONFIG_MODULES */
  182. static unsigned long find_ftrace_tramp(unsigned long ip)
  183. {
  184. int i;
  185. /*
  186. * We have the compiler generated long_branch tramps at the end
  187. * and we prefer those
  188. */
  189. for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
  190. if (!ftrace_tramps[i])
  191. continue;
  192. else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
  193. return ftrace_tramps[i];
  194. return 0;
  195. }
  196. static int add_ftrace_tramp(unsigned long tramp)
  197. {
  198. int i;
  199. for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
  200. if (!ftrace_tramps[i]) {
  201. ftrace_tramps[i] = tramp;
  202. return 0;
  203. }
  204. return -1;
  205. }
  206. /*
  207. * If this is a compiler generated long_branch trampoline (essentially, a
  208. * trampoline that has a branch to _mcount()), we re-write the branch to
  209. * instead go to ftrace_[regs_]caller() and note down the location of this
  210. * trampoline.
  211. */
  212. static int setup_mcount_compiler_tramp(unsigned long tramp)
  213. {
  214. int i;
  215. ppc_inst_t op;
  216. unsigned long ptr;
  217. /* Is this a known long jump tramp? */
  218. for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
  219. if (ftrace_tramps[i] == tramp)
  220. return 0;
  221. /* New trampoline -- read where this goes */
  222. if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
  223. pr_debug("Fetching opcode failed.\n");
  224. return -1;
  225. }
  226. /* Is this a 24 bit branch? */
  227. if (!is_b_op(op)) {
  228. pr_debug("Trampoline is not a long branch tramp.\n");
  229. return -1;
  230. }
  231. /* lets find where the pointer goes */
  232. ptr = find_bl_target(tramp, op);
  233. if (ptr != ppc_global_function_entry((void *)_mcount)) {
  234. pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
  235. return -1;
  236. }
  237. /* Let's re-write the tramp to go to ftrace_[regs_]caller */
  238. if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
  239. ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
  240. else
  241. ptr = ppc_global_function_entry((void *)ftrace_caller);
  242. if (patch_branch((u32 *)tramp, ptr, 0)) {
  243. pr_debug("REL24 out of range!\n");
  244. return -1;
  245. }
  246. if (add_ftrace_tramp(tramp)) {
  247. pr_debug("No tramp locations left\n");
  248. return -1;
  249. }
  250. return 0;
  251. }
  252. static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
  253. {
  254. unsigned long tramp, ip = rec->ip;
  255. ppc_inst_t op;
  256. /* Read where this goes */
  257. if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
  258. pr_err("Fetching opcode failed.\n");
  259. return -EFAULT;
  260. }
  261. /* Make sure that this is still a 24bit jump */
  262. if (!is_bl_op(op)) {
  263. pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
  264. return -EINVAL;
  265. }
  266. /* Let's find where the pointer goes */
  267. tramp = find_bl_target(ip, op);
  268. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  269. if (setup_mcount_compiler_tramp(tramp)) {
  270. /* Are other trampolines reachable? */
  271. if (!find_ftrace_tramp(ip)) {
  272. pr_err("No ftrace trampolines reachable from %ps\n",
  273. (void *)ip);
  274. return -EINVAL;
  275. }
  276. }
  277. if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
  278. pr_err("Patching NOP failed.\n");
  279. return -EPERM;
  280. }
  281. return 0;
  282. }
  283. int ftrace_make_nop(struct module *mod,
  284. struct dyn_ftrace *rec, unsigned long addr)
  285. {
  286. unsigned long ip = rec->ip;
  287. ppc_inst_t old, new;
  288. /*
  289. * If the calling address is more that 24 bits away,
  290. * then we had to use a trampoline to make the call.
  291. * Otherwise just update the call site.
  292. */
  293. if (test_24bit_addr(ip, addr)) {
  294. /* within range */
  295. old = ftrace_call_replace(ip, addr, 1);
  296. new = ppc_inst(PPC_RAW_NOP());
  297. return ftrace_modify_code(ip, old, new);
  298. } else if (core_kernel_text(ip)) {
  299. return __ftrace_make_nop_kernel(rec, addr);
  300. } else if (!IS_ENABLED(CONFIG_MODULES)) {
  301. return -EINVAL;
  302. }
  303. /*
  304. * Out of range jumps are called from modules.
  305. * We should either already have a pointer to the module
  306. * or it has been passed in.
  307. */
  308. if (!rec->arch.mod) {
  309. if (!mod) {
  310. pr_err("No module loaded addr=%lx\n", addr);
  311. return -EFAULT;
  312. }
  313. rec->arch.mod = mod;
  314. } else if (mod) {
  315. if (mod != rec->arch.mod) {
  316. pr_err("Record mod %p not equal to passed in mod %p\n",
  317. rec->arch.mod, mod);
  318. return -EINVAL;
  319. }
  320. /* nothing to do if mod == rec->arch.mod */
  321. } else
  322. mod = rec->arch.mod;
  323. return __ftrace_make_nop(mod, rec, addr);
  324. }
  325. #ifdef CONFIG_MODULES
  326. /*
  327. * Examine the existing instructions for __ftrace_make_call.
  328. * They should effectively be a NOP, and follow formal constraints,
  329. * depending on the ABI. Return false if they don't.
  330. */
  331. static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
  332. {
  333. if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
  334. return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
  335. else
  336. return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
  337. ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
  338. }
  339. static int
  340. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  341. {
  342. ppc_inst_t op[2];
  343. void *ip = (void *)rec->ip;
  344. unsigned long entry, ptr, tramp;
  345. struct module *mod = rec->arch.mod;
  346. /* read where this goes */
  347. if (copy_inst_from_kernel_nofault(op, ip))
  348. return -EFAULT;
  349. if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
  350. copy_inst_from_kernel_nofault(op + 1, ip + 4))
  351. return -EFAULT;
  352. if (!expected_nop_sequence(ip, op[0], op[1])) {
  353. pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
  354. ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
  355. return -EINVAL;
  356. }
  357. /* If we never set up ftrace trampoline(s), then bail */
  358. if (!mod->arch.tramp ||
  359. (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
  360. pr_err("No ftrace trampoline\n");
  361. return -EINVAL;
  362. }
  363. if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
  364. tramp = mod->arch.tramp_regs;
  365. else
  366. tramp = mod->arch.tramp;
  367. if (module_trampoline_target(mod, tramp, &ptr)) {
  368. pr_err("Failed to get trampoline target\n");
  369. return -EFAULT;
  370. }
  371. pr_devel("trampoline target %lx", ptr);
  372. entry = ppc_global_function_entry((void *)addr);
  373. /* This should match what was called */
  374. if (ptr != entry) {
  375. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  376. return -EINVAL;
  377. }
  378. if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
  379. pr_err("REL24 out of range!\n");
  380. return -EINVAL;
  381. }
  382. return 0;
  383. }
  384. #else
  385. static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  386. {
  387. return 0;
  388. }
  389. #endif /* CONFIG_MODULES */
  390. static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
  391. {
  392. ppc_inst_t op;
  393. void *ip = (void *)rec->ip;
  394. unsigned long tramp, entry, ptr;
  395. /* Make sure we're being asked to patch branch to a known ftrace addr */
  396. entry = ppc_global_function_entry((void *)ftrace_caller);
  397. ptr = ppc_global_function_entry((void *)addr);
  398. if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
  399. entry = ppc_global_function_entry((void *)ftrace_regs_caller);
  400. if (ptr != entry) {
  401. pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
  402. return -EINVAL;
  403. }
  404. /* Make sure we have a nop */
  405. if (copy_inst_from_kernel_nofault(&op, ip)) {
  406. pr_err("Unable to read ftrace location %p\n", ip);
  407. return -EFAULT;
  408. }
  409. if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
  410. pr_err("Unexpected call sequence at %p: %08lx\n",
  411. ip, ppc_inst_as_ulong(op));
  412. return -EINVAL;
  413. }
  414. tramp = find_ftrace_tramp((unsigned long)ip);
  415. if (!tramp) {
  416. pr_err("No ftrace trampolines reachable from %ps\n", ip);
  417. return -EINVAL;
  418. }
  419. if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
  420. pr_err("Error patching branch to ftrace tramp!\n");
  421. return -EINVAL;
  422. }
  423. return 0;
  424. }
  425. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  426. {
  427. unsigned long ip = rec->ip;
  428. ppc_inst_t old, new;
  429. /*
  430. * If the calling address is more that 24 bits away,
  431. * then we had to use a trampoline to make the call.
  432. * Otherwise just update the call site.
  433. */
  434. if (test_24bit_addr(ip, addr)) {
  435. /* within range */
  436. old = ppc_inst(PPC_RAW_NOP());
  437. new = ftrace_call_replace(ip, addr, 1);
  438. return ftrace_modify_code(ip, old, new);
  439. } else if (core_kernel_text(ip)) {
  440. return __ftrace_make_call_kernel(rec, addr);
  441. } else if (!IS_ENABLED(CONFIG_MODULES)) {
  442. /* We should not get here without modules */
  443. return -EINVAL;
  444. }
  445. /*
  446. * Out of range jumps are called from modules.
  447. * Being that we are converting from nop, it had better
  448. * already have a module defined.
  449. */
  450. if (!rec->arch.mod) {
  451. pr_err("No module loaded\n");
  452. return -EINVAL;
  453. }
  454. return __ftrace_make_call(rec, addr);
  455. }
  456. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  457. #ifdef CONFIG_MODULES
  458. static int
  459. __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  460. unsigned long addr)
  461. {
  462. ppc_inst_t op;
  463. unsigned long ip = rec->ip;
  464. unsigned long entry, ptr, tramp;
  465. struct module *mod = rec->arch.mod;
  466. /* If we never set up ftrace trampolines, then bail */
  467. if (!mod->arch.tramp || !mod->arch.tramp_regs) {
  468. pr_err("No ftrace trampoline\n");
  469. return -EINVAL;
  470. }
  471. /* read where this goes */
  472. if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
  473. pr_err("Fetching opcode failed.\n");
  474. return -EFAULT;
  475. }
  476. /* Make sure that this is still a 24bit jump */
  477. if (!is_bl_op(op)) {
  478. pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
  479. return -EINVAL;
  480. }
  481. /* lets find where the pointer goes */
  482. tramp = find_bl_target(ip, op);
  483. entry = ppc_global_function_entry((void *)old_addr);
  484. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  485. if (tramp != entry) {
  486. /* old_addr is not within range, so we must have used a trampoline */
  487. if (module_trampoline_target(mod, tramp, &ptr)) {
  488. pr_err("Failed to get trampoline target\n");
  489. return -EFAULT;
  490. }
  491. pr_devel("trampoline target %lx", ptr);
  492. /* This should match what was called */
  493. if (ptr != entry) {
  494. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  495. return -EINVAL;
  496. }
  497. }
  498. /* The new target may be within range */
  499. if (test_24bit_addr(ip, addr)) {
  500. /* within range */
  501. if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
  502. pr_err("REL24 out of range!\n");
  503. return -EINVAL;
  504. }
  505. return 0;
  506. }
  507. if (rec->flags & FTRACE_FL_REGS)
  508. tramp = mod->arch.tramp_regs;
  509. else
  510. tramp = mod->arch.tramp;
  511. if (module_trampoline_target(mod, tramp, &ptr)) {
  512. pr_err("Failed to get trampoline target\n");
  513. return -EFAULT;
  514. }
  515. pr_devel("trampoline target %lx", ptr);
  516. entry = ppc_global_function_entry((void *)addr);
  517. /* This should match what was called */
  518. if (ptr != entry) {
  519. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  520. return -EINVAL;
  521. }
  522. if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
  523. pr_err("REL24 out of range!\n");
  524. return -EINVAL;
  525. }
  526. return 0;
  527. }
  528. #else
  529. static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
  530. {
  531. return 0;
  532. }
  533. #endif
  534. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  535. unsigned long addr)
  536. {
  537. unsigned long ip = rec->ip;
  538. ppc_inst_t old, new;
  539. /*
  540. * If the calling address is more that 24 bits away,
  541. * then we had to use a trampoline to make the call.
  542. * Otherwise just update the call site.
  543. */
  544. if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
  545. /* within range */
  546. old = ftrace_call_replace(ip, old_addr, 1);
  547. new = ftrace_call_replace(ip, addr, 1);
  548. return ftrace_modify_code(ip, old, new);
  549. } else if (core_kernel_text(ip)) {
  550. /*
  551. * We always patch out of range locations to go to the regs
  552. * variant, so there is nothing to do here
  553. */
  554. return 0;
  555. } else if (!IS_ENABLED(CONFIG_MODULES)) {
  556. /* We should not get here without modules */
  557. return -EINVAL;
  558. }
  559. /*
  560. * Out of range jumps are called from modules.
  561. */
  562. if (!rec->arch.mod) {
  563. pr_err("No module loaded\n");
  564. return -EINVAL;
  565. }
  566. return __ftrace_modify_call(rec, old_addr, addr);
  567. }
  568. #endif
  569. int ftrace_update_ftrace_func(ftrace_func_t func)
  570. {
  571. unsigned long ip = (unsigned long)(&ftrace_call);
  572. ppc_inst_t old, new;
  573. int ret;
  574. old = ppc_inst_read((u32 *)&ftrace_call);
  575. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  576. ret = ftrace_modify_code(ip, old, new);
  577. /* Also update the regs callback function */
  578. if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
  579. ip = (unsigned long)(&ftrace_regs_call);
  580. old = ppc_inst_read((u32 *)&ftrace_regs_call);
  581. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  582. ret = ftrace_modify_code(ip, old, new);
  583. }
  584. return ret;
  585. }
  586. /*
  587. * Use the default ftrace_modify_all_code, but without
  588. * stop_machine().
  589. */
  590. void arch_ftrace_update_code(int command)
  591. {
  592. ftrace_modify_all_code(command);
  593. }
  594. #ifdef CONFIG_PPC64
  595. #define PACATOC offsetof(struct paca_struct, kernel_toc)
  596. extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
  597. void ftrace_free_init_tramp(void)
  598. {
  599. int i;
  600. for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
  601. if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
  602. ftrace_tramps[i] = 0;
  603. return;
  604. }
  605. }
  606. int __init ftrace_dyn_arch_init(void)
  607. {
  608. int i;
  609. unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
  610. u32 stub_insns[] = {
  611. PPC_RAW_LD(_R12, _R13, PACATOC),
  612. PPC_RAW_ADDIS(_R12, _R12, 0),
  613. PPC_RAW_ADDI(_R12, _R12, 0),
  614. PPC_RAW_MTCTR(_R12),
  615. PPC_RAW_BCTR()
  616. };
  617. unsigned long addr;
  618. long reladdr;
  619. if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
  620. addr = ppc_global_function_entry((void *)ftrace_regs_caller);
  621. else
  622. addr = ppc_global_function_entry((void *)ftrace_caller);
  623. reladdr = addr - kernel_toc_addr();
  624. if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
  625. pr_err("Address of %ps out of range of kernel_toc.\n",
  626. (void *)addr);
  627. return -1;
  628. }
  629. for (i = 0; i < 2; i++) {
  630. memcpy(tramp[i], stub_insns, sizeof(stub_insns));
  631. tramp[i][1] |= PPC_HA(reladdr);
  632. tramp[i][2] |= PPC_LO(reladdr);
  633. add_ftrace_tramp((unsigned long)tramp[i]);
  634. }
  635. return 0;
  636. }
  637. #endif
  638. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  639. extern void ftrace_graph_call(void);
  640. extern void ftrace_graph_stub(void);
  641. static int ftrace_modify_ftrace_graph_caller(bool enable)
  642. {
  643. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  644. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  645. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  646. ppc_inst_t old, new;
  647. if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
  648. return 0;
  649. old = ftrace_call_replace(ip, enable ? stub : addr, 0);
  650. new = ftrace_call_replace(ip, enable ? addr : stub, 0);
  651. return ftrace_modify_code(ip, old, new);
  652. }
  653. int ftrace_enable_ftrace_graph_caller(void)
  654. {
  655. return ftrace_modify_ftrace_graph_caller(true);
  656. }
  657. int ftrace_disable_ftrace_graph_caller(void)
  658. {
  659. return ftrace_modify_ftrace_graph_caller(false);
  660. }
  661. /*
  662. * Hook the return address and push it in the stack of return addrs
  663. * in current thread info. Return the address we want to divert to.
  664. */
  665. static unsigned long
  666. __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
  667. {
  668. unsigned long return_hooker;
  669. int bit;
  670. if (unlikely(ftrace_graph_is_dead()))
  671. goto out;
  672. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  673. goto out;
  674. bit = ftrace_test_recursion_trylock(ip, parent);
  675. if (bit < 0)
  676. goto out;
  677. return_hooker = ppc_function_entry(return_to_handler);
  678. if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
  679. parent = return_hooker;
  680. ftrace_test_recursion_unlock(bit);
  681. out:
  682. return parent;
  683. }
  684. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
  685. void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
  686. struct ftrace_ops *op, struct ftrace_regs *fregs)
  687. {
  688. fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
  689. }
  690. #else
  691. unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
  692. unsigned long sp)
  693. {
  694. return __prepare_ftrace_return(parent, ip, sp);
  695. }
  696. #endif
  697. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  698. #ifdef CONFIG_PPC64_ELF_ABI_V1
  699. char *arch_ftrace_match_adjust(char *str, const char *search)
  700. {
  701. if (str[0] == '.' && search[0] != '.')
  702. return str + 1;
  703. else
  704. return str;
  705. }
  706. #endif /* CONFIG_PPC64_ELF_ABI_V1 */