ftrace.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2013 Linaro Limited
  4. * Author: AKASHI Takahiro <[email protected]>
  5. * Copyright (C) 2017 Andes Technology Corporation
  6. */
  7. #include <linux/ftrace.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/memory.h>
  10. #include <asm/cacheflush.h>
  11. #include <asm/patch.h>
  12. #ifdef CONFIG_DYNAMIC_FTRACE
  13. void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
  14. {
  15. mutex_lock(&text_mutex);
  16. /*
  17. * The code sequences we use for ftrace can't be patched while the
  18. * kernel is running, so we need to use stop_machine() to modify them
  19. * for now. This doesn't play nice with text_mutex, we use this flag
  20. * to elide the check.
  21. */
  22. riscv_patch_in_stop_machine = true;
  23. }
  24. void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
  25. {
  26. riscv_patch_in_stop_machine = false;
  27. mutex_unlock(&text_mutex);
  28. }
  29. static int ftrace_check_current_call(unsigned long hook_pos,
  30. unsigned int *expected)
  31. {
  32. unsigned int replaced[2];
  33. unsigned int nops[2] = {NOP4, NOP4};
  34. /* we expect nops at the hook position */
  35. if (!expected)
  36. expected = nops;
  37. /*
  38. * Read the text we want to modify;
  39. * return must be -EFAULT on read error
  40. */
  41. if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
  42. MCOUNT_INSN_SIZE))
  43. return -EFAULT;
  44. /*
  45. * Make sure it is what we expect it to be;
  46. * return must be -EINVAL on failed comparison
  47. */
  48. if (memcmp(expected, replaced, sizeof(replaced))) {
  49. pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
  50. (void *)hook_pos, expected[0], expected[1], replaced[0],
  51. replaced[1]);
  52. return -EINVAL;
  53. }
  54. return 0;
  55. }
  56. static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
  57. bool enable, bool ra)
  58. {
  59. unsigned int call[2];
  60. unsigned int nops[2] = {NOP4, NOP4};
  61. if (ra)
  62. make_call_ra(hook_pos, target, call);
  63. else
  64. make_call_t0(hook_pos, target, call);
  65. /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
  66. if (patch_text_nosync
  67. ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
  68. return -EPERM;
  69. return 0;
  70. }
  71. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  72. {
  73. unsigned int call[2];
  74. make_call_t0(rec->ip, addr, call);
  75. if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
  76. return -EPERM;
  77. return 0;
  78. }
  79. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  80. unsigned long addr)
  81. {
  82. unsigned int nops[2] = {NOP4, NOP4};
  83. if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
  84. return -EPERM;
  85. return 0;
  86. }
  87. /*
  88. * This is called early on, and isn't wrapped by
  89. * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
  90. * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
  91. * just directly poke the text, but it's simpler to just take the lock
  92. * ourselves.
  93. */
  94. int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
  95. {
  96. int out;
  97. mutex_lock(&text_mutex);
  98. out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
  99. mutex_unlock(&text_mutex);
  100. return out;
  101. }
  102. int ftrace_update_ftrace_func(ftrace_func_t func)
  103. {
  104. int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
  105. (unsigned long)func, true, true);
  106. if (!ret) {
  107. ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
  108. (unsigned long)func, true, true);
  109. }
  110. return ret;
  111. }
  112. #endif
  113. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  114. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  115. unsigned long addr)
  116. {
  117. unsigned int call[2];
  118. unsigned long caller = rec->ip;
  119. int ret;
  120. make_call_t0(caller, old_addr, call);
  121. ret = ftrace_check_current_call(caller, call);
  122. if (ret)
  123. return ret;
  124. return __ftrace_modify_call(caller, addr, true, false);
  125. }
  126. #endif
  127. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  128. /*
  129. * Most of this function is copied from arm64.
  130. */
  131. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  132. unsigned long frame_pointer)
  133. {
  134. unsigned long return_hooker = (unsigned long)&return_to_handler;
  135. unsigned long old;
  136. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  137. return;
  138. /*
  139. * We don't suffer access faults, so no extra fault-recovery assembly
  140. * is needed here.
  141. */
  142. old = *parent;
  143. if (!function_graph_enter(old, self_addr, frame_pointer, parent))
  144. *parent = return_hooker;
  145. }
  146. #ifdef CONFIG_DYNAMIC_FTRACE
  147. extern void ftrace_graph_call(void);
  148. extern void ftrace_graph_regs_call(void);
  149. int ftrace_enable_ftrace_graph_caller(void)
  150. {
  151. int ret;
  152. ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
  153. (unsigned long)&prepare_ftrace_return, true, true);
  154. if (ret)
  155. return ret;
  156. return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
  157. (unsigned long)&prepare_ftrace_return, true, true);
  158. }
  159. int ftrace_disable_ftrace_graph_caller(void)
  160. {
  161. int ret;
  162. ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
  163. (unsigned long)&prepare_ftrace_return, false, true);
  164. if (ret)
  165. return ret;
  166. return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
  167. (unsigned long)&prepare_ftrace_return, false, true);
  168. }
  169. #endif /* CONFIG_DYNAMIC_FTRACE */
  170. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */