ftrace.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Code for tracing calls in Linux kernel.
  4. * Copyright (C) 2009-2016 Helge Deller <[email protected]>
  5. *
  6. * based on code for x86 which is:
  7. * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
  8. *
  9. * future possible enhancements:
  10. * - add CONFIG_STACK_TRACER
  11. */
  12. #include <linux/init.h>
  13. #include <linux/ftrace.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/kprobes.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/jump_label.h>
  18. #include <asm/assembly.h>
  19. #include <asm/sections.h>
  20. #include <asm/ftrace.h>
  21. #include <asm/patch.h>
  22. #define __hot __section(".text.hot")
  23. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  24. static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable);
  25. /*
  26. * Hook the return address and push it in the stack of return addrs
  27. * in current thread info.
  28. */
  29. static void __hot prepare_ftrace_return(unsigned long *parent,
  30. unsigned long self_addr)
  31. {
  32. unsigned long old;
  33. extern int parisc_return_to_handler;
  34. if (unlikely(ftrace_graph_is_dead()))
  35. return;
  36. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  37. return;
  38. old = *parent;
  39. if (!function_graph_enter(old, self_addr, 0, NULL))
  40. /* activate parisc_return_to_handler() as return point */
  41. *parent = (unsigned long) &parisc_return_to_handler;
  42. }
  43. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  44. static ftrace_func_t ftrace_func;
  45. void notrace __hot ftrace_function_trampoline(unsigned long parent,
  46. unsigned long self_addr,
  47. unsigned long org_sp_gr3,
  48. struct ftrace_regs *fregs)
  49. {
  50. extern struct ftrace_ops *function_trace_op;
  51. ftrace_func(self_addr, parent, function_trace_op, fregs);
  52. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  53. if (static_branch_unlikely(&ftrace_graph_enable)) {
  54. unsigned long *parent_rp;
  55. /* calculate pointer to %rp in stack */
  56. parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
  57. /* sanity check: parent_rp should hold parent */
  58. if (*parent_rp != parent)
  59. return;
  60. prepare_ftrace_return(parent_rp, self_addr);
  61. return;
  62. }
  63. #endif
  64. }
  65. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  66. int ftrace_enable_ftrace_graph_caller(void)
  67. {
  68. static_key_enable(&ftrace_graph_enable.key);
  69. return 0;
  70. }
  71. int ftrace_disable_ftrace_graph_caller(void)
  72. {
  73. static_key_enable(&ftrace_graph_enable.key);
  74. return 0;
  75. }
  76. #endif
  77. #ifdef CONFIG_DYNAMIC_FTRACE
  78. int ftrace_update_ftrace_func(ftrace_func_t func)
  79. {
  80. ftrace_func = func;
  81. return 0;
  82. }
  83. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  84. unsigned long addr)
  85. {
  86. return 0;
  87. }
  88. unsigned long ftrace_call_adjust(unsigned long addr)
  89. {
  90. return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
  91. }
  92. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  93. {
  94. u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
  95. u32 *tramp;
  96. int size, ret, i;
  97. void *ip;
  98. #ifdef CONFIG_64BIT
  99. unsigned long addr2 =
  100. (unsigned long)dereference_function_descriptor((void *)addr);
  101. u32 ftrace_trampoline[] = {
  102. 0x73c10208, /* std,ma r1,100(sp) */
  103. 0x0c2110c1, /* ldd -10(r1),r1 */
  104. 0xe820d002, /* bve,n (r1) */
  105. addr2 >> 32,
  106. addr2 & 0xffffffff,
  107. 0xe83f1fd7, /* b,l,n .-14,r1 */
  108. };
  109. u32 ftrace_trampoline_unaligned[] = {
  110. addr2 >> 32,
  111. addr2 & 0xffffffff,
  112. 0x37de0200, /* ldo 100(sp),sp */
  113. 0x73c13e01, /* std r1,-100(sp) */
  114. 0x34213ff9, /* ldo -4(r1),r1 */
  115. 0x50213fc1, /* ldd -20(r1),r1 */
  116. 0xe820d002, /* bve,n (r1) */
  117. 0xe83f1fcf, /* b,l,n .-20,r1 */
  118. };
  119. BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
  120. FTRACE_PATCHABLE_FUNCTION_SIZE);
  121. #else
  122. u32 ftrace_trampoline[] = {
  123. (u32)addr,
  124. 0x6fc10080, /* stw,ma r1,40(sp) */
  125. 0x48213fd1, /* ldw -18(r1),r1 */
  126. 0xe820c002, /* bv,n r0(r1) */
  127. 0xe83f1fdf, /* b,l,n .-c,r1 */
  128. };
  129. #endif
  130. BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
  131. FTRACE_PATCHABLE_FUNCTION_SIZE);
  132. size = sizeof(ftrace_trampoline);
  133. tramp = ftrace_trampoline;
  134. #ifdef CONFIG_64BIT
  135. if (rec->ip & 0x4) {
  136. size = sizeof(ftrace_trampoline_unaligned);
  137. tramp = ftrace_trampoline_unaligned;
  138. }
  139. #endif
  140. ip = (void *)(rec->ip + 4 - size);
  141. ret = copy_from_kernel_nofault(insn, ip, size);
  142. if (ret)
  143. return ret;
  144. for (i = 0; i < size / 4; i++) {
  145. if (insn[i] != INSN_NOP)
  146. return -EINVAL;
  147. }
  148. __patch_text_multiple(ip, tramp, size);
  149. return 0;
  150. }
  151. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  152. unsigned long addr)
  153. {
  154. u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
  155. int i;
  156. for (i = 0; i < ARRAY_SIZE(insn); i++)
  157. insn[i] = INSN_NOP;
  158. __patch_text((void *)rec->ip, INSN_NOP);
  159. __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
  160. insn, sizeof(insn)-4);
  161. return 0;
  162. }
  163. #endif
  164. #ifdef CONFIG_KPROBES_ON_FTRACE
  165. void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
  166. struct ftrace_ops *ops, struct ftrace_regs *fregs)
  167. {
  168. struct kprobe_ctlblk *kcb;
  169. struct pt_regs *regs;
  170. struct kprobe *p;
  171. int bit;
  172. bit = ftrace_test_recursion_trylock(ip, parent_ip);
  173. if (bit < 0)
  174. return;
  175. regs = ftrace_get_regs(fregs);
  176. p = get_kprobe((kprobe_opcode_t *)ip);
  177. if (unlikely(!p) || kprobe_disabled(p))
  178. goto out;
  179. if (kprobe_running()) {
  180. kprobes_inc_nmissed_count(p);
  181. goto out;
  182. }
  183. __this_cpu_write(current_kprobe, p);
  184. kcb = get_kprobe_ctlblk();
  185. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  186. regs->iaoq[0] = ip;
  187. regs->iaoq[1] = ip + 4;
  188. if (!p->pre_handler || !p->pre_handler(p, regs)) {
  189. regs->iaoq[0] = ip + 4;
  190. regs->iaoq[1] = ip + 8;
  191. if (unlikely(p->post_handler)) {
  192. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  193. p->post_handler(p, regs, 0);
  194. }
  195. }
  196. __this_cpu_write(current_kprobe, NULL);
  197. out:
  198. ftrace_test_recursion_unlock(bit);
  199. }
  200. NOKPROBE_SYMBOL(kprobe_ftrace_handler);
  201. int arch_prepare_kprobe_ftrace(struct kprobe *p)
  202. {
  203. p->ainsn.insn = NULL;
  204. return 0;
  205. }
  206. #endif