ftrace.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/ftrace.h>
  4. #include <linux/uaccess.h>
  5. #include <linux/stop_machine.h>
  6. #include <asm/cacheflush.h>
  7. #ifdef CONFIG_DYNAMIC_FTRACE
  8. #define NOP 0x4000
  9. #define NOP32_HI 0xc400
  10. #define NOP32_LO 0x4820
  11. #define PUSH_LR 0x14d0
  12. #define MOVIH_LINK 0xea3a
  13. #define ORI_LINK 0xef5a
  14. #define JSR_LINK 0xe8fa
  15. #define BSR_LINK 0xe000
  16. /*
  17. * Gcc-csky with -pg will insert stub in function prologue:
  18. * push lr
  19. * jbsr _mcount
  20. * nop32
  21. * nop32
  22. *
  23. * If the (callee - current_pc) is less then 64MB, we'll use bsr:
  24. * push lr
  25. * bsr _mcount
  26. * nop32
  27. * nop32
  28. * else we'll use (movih + ori + jsr):
  29. * push lr
  30. * movih r26, ...
  31. * ori r26, ...
  32. * jsr r26
  33. *
  34. * (r26 is our reserved link-reg)
  35. *
  36. */
  37. static inline void make_jbsr(unsigned long callee, unsigned long pc,
  38. uint16_t *call, bool nolr)
  39. {
  40. long offset;
  41. call[0] = nolr ? NOP : PUSH_LR;
  42. offset = (long) callee - (long) pc;
  43. if (unlikely(offset < -67108864 || offset > 67108864)) {
  44. call[1] = MOVIH_LINK;
  45. call[2] = callee >> 16;
  46. call[3] = ORI_LINK;
  47. call[4] = callee & 0xffff;
  48. call[5] = JSR_LINK;
  49. call[6] = 0;
  50. } else {
  51. offset = offset >> 1;
  52. call[1] = BSR_LINK |
  53. ((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
  54. call[2] = (uint16_t)((unsigned long) offset & 0xffff);
  55. call[3] = call[5] = NOP32_HI;
  56. call[4] = call[6] = NOP32_LO;
  57. }
  58. }
  59. static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO,
  60. NOP32_HI, NOP32_LO};
  61. static int ftrace_check_current_nop(unsigned long hook)
  62. {
  63. uint16_t olds[7];
  64. unsigned long hook_pos = hook - 2;
  65. if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos,
  66. sizeof(nops)))
  67. return -EFAULT;
  68. if (memcmp((void *)nops, (void *)olds, sizeof(nops))) {
  69. pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n",
  70. (void *)hook_pos,
  71. olds[0], olds[1], olds[2], olds[3], olds[4], olds[5],
  72. olds[6]);
  73. return -EINVAL;
  74. }
  75. return 0;
  76. }
  77. static int ftrace_modify_code(unsigned long hook, unsigned long target,
  78. bool enable, bool nolr)
  79. {
  80. uint16_t call[7];
  81. unsigned long hook_pos = hook - 2;
  82. int ret = 0;
  83. make_jbsr(target, hook, call, nolr);
  84. ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops,
  85. sizeof(nops));
  86. if (ret)
  87. return -EPERM;
  88. flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE);
  89. return 0;
  90. }
  91. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  92. {
  93. int ret = ftrace_check_current_nop(rec->ip);
  94. if (ret)
  95. return ret;
  96. return ftrace_modify_code(rec->ip, addr, true, false);
  97. }
  98. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  99. unsigned long addr)
  100. {
  101. return ftrace_modify_code(rec->ip, addr, false, false);
  102. }
  103. int ftrace_update_ftrace_func(ftrace_func_t func)
  104. {
  105. int ret = ftrace_modify_code((unsigned long)&ftrace_call,
  106. (unsigned long)func, true, true);
  107. if (!ret)
  108. ret = ftrace_modify_code((unsigned long)&ftrace_regs_call,
  109. (unsigned long)func, true, true);
  110. return ret;
  111. }
  112. #endif /* CONFIG_DYNAMIC_FTRACE */
  113. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  114. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  115. unsigned long addr)
  116. {
  117. return ftrace_modify_code(rec->ip, addr, true, true);
  118. }
  119. #endif
  120. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  121. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  122. unsigned long frame_pointer)
  123. {
  124. unsigned long return_hooker = (unsigned long)&return_to_handler;
  125. unsigned long old;
  126. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  127. return;
  128. old = *parent;
  129. if (!function_graph_enter(old, self_addr,
  130. *(unsigned long *)frame_pointer, parent)) {
  131. /*
  132. * For csky-gcc function has sub-call:
  133. * subi sp, sp, 8
  134. * stw r8, (sp, 0)
  135. * mov r8, sp
  136. * st.w r15, (sp, 0x4)
  137. * push r15
  138. * jl _mcount
  139. * We only need set *parent for resume
  140. *
  141. * For csky-gcc function has no sub-call:
  142. * subi sp, sp, 4
  143. * stw r8, (sp, 0)
  144. * mov r8, sp
  145. * push r15
  146. * jl _mcount
  147. * We need set *parent and *(frame_pointer + 4) for resume,
  148. * because lr is resumed twice.
  149. */
  150. *parent = return_hooker;
  151. frame_pointer += 4;
  152. if (*(unsigned long *)frame_pointer == old)
  153. *(unsigned long *)frame_pointer = return_hooker;
  154. }
  155. }
  156. #ifdef CONFIG_DYNAMIC_FTRACE
  157. int ftrace_enable_ftrace_graph_caller(void)
  158. {
  159. return ftrace_modify_code((unsigned long)&ftrace_graph_call,
  160. (unsigned long)&ftrace_graph_caller, true, true);
  161. }
  162. int ftrace_disable_ftrace_graph_caller(void)
  163. {
  164. return ftrace_modify_code((unsigned long)&ftrace_graph_call,
  165. (unsigned long)&ftrace_graph_caller, false, true);
  166. }
  167. #endif /* CONFIG_DYNAMIC_FTRACE */
  168. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  169. #ifdef CONFIG_DYNAMIC_FTRACE
  170. #ifndef CONFIG_CPU_HAS_ICACHE_INS
  171. struct ftrace_modify_param {
  172. int command;
  173. atomic_t cpu_count;
  174. };
  175. static int __ftrace_modify_code(void *data)
  176. {
  177. struct ftrace_modify_param *param = data;
  178. if (atomic_inc_return(&param->cpu_count) == 1) {
  179. ftrace_modify_all_code(param->command);
  180. atomic_inc(&param->cpu_count);
  181. } else {
  182. while (atomic_read(&param->cpu_count) <= num_online_cpus())
  183. cpu_relax();
  184. local_icache_inv_all(NULL);
  185. }
  186. return 0;
  187. }
  188. void arch_ftrace_update_code(int command)
  189. {
  190. struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
  191. stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
  192. }
  193. #endif
  194. #endif /* CONFIG_DYNAMIC_FTRACE */
  195. /* _mcount is defined in abi's mcount.S */
  196. EXPORT_SYMBOL(_mcount);