entry-ftrace.S 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * arch/arm64/kernel/entry-ftrace.S
  4. *
  5. * Copyright (C) 2013 Linaro Limited
  6. * Author: AKASHI Takahiro <[email protected]>
  7. */
  8. #include <linux/linkage.h>
  9. #include <linux/cfi_types.h>
  10. #include <asm/asm-offsets.h>
  11. #include <asm/assembler.h>
  12. #include <asm/ftrace.h>
  13. #include <asm/insn.h>
  14. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  15. /*
  16. * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
  17. * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
  18. * ftrace_make_call() have patched those NOPs to:
  19. *
  20. * MOV X9, LR
  21. * BL <entry>
  22. *
  23. * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
  24. *
  25. * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
  26. * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
  27. * clobber.
  28. *
  29. * We save the callsite's context into a pt_regs before invoking any ftrace
  30. * callbacks. So that we can get a sensible backtrace, we create a stack record
  31. * for the callsite and the ftrace entry assembly. This is not sufficient for
  32. * reliable stacktrace: until we create the callsite stack record, its caller
  33. * is missing from the LR and existing chain of frame records.
  34. */
  35. .macro ftrace_regs_entry, allregs=0
  36. /* Make room for pt_regs, plus a callee frame */
  37. sub sp, sp, #(PT_REGS_SIZE + 16)
  38. /* Save function arguments (and x9 for simplicity) */
  39. stp x0, x1, [sp, #S_X0]
  40. stp x2, x3, [sp, #S_X2]
  41. stp x4, x5, [sp, #S_X4]
  42. stp x6, x7, [sp, #S_X6]
  43. stp x8, x9, [sp, #S_X8]
  44. /* Optionally save the callee-saved registers, always save the FP */
  45. .if \allregs == 1
  46. stp x10, x11, [sp, #S_X10]
  47. stp x12, x13, [sp, #S_X12]
  48. stp x14, x15, [sp, #S_X14]
  49. stp x16, x17, [sp, #S_X16]
  50. stp x18, x19, [sp, #S_X18]
  51. stp x20, x21, [sp, #S_X20]
  52. stp x22, x23, [sp, #S_X22]
  53. stp x24, x25, [sp, #S_X24]
  54. stp x26, x27, [sp, #S_X26]
  55. stp x28, x29, [sp, #S_X28]
  56. .else
  57. str x29, [sp, #S_FP]
  58. .endif
  59. /* Save the callsite's SP and LR */
  60. add x10, sp, #(PT_REGS_SIZE + 16)
  61. stp x9, x10, [sp, #S_LR]
  62. /* Save the PC after the ftrace callsite */
  63. str x30, [sp, #S_PC]
  64. /* Create a frame record for the callsite above pt_regs */
  65. stp x29, x9, [sp, #PT_REGS_SIZE]
  66. add x29, sp, #PT_REGS_SIZE
  67. /* Create our frame record within pt_regs. */
  68. stp x29, x30, [sp, #S_STACKFRAME]
  69. add x29, sp, #S_STACKFRAME
  70. .endm
  71. SYM_CODE_START(ftrace_regs_caller)
  72. bti c
  73. ftrace_regs_entry 1
  74. b ftrace_common
  75. SYM_CODE_END(ftrace_regs_caller)
  76. SYM_CODE_START(ftrace_caller)
  77. bti c
  78. ftrace_regs_entry 0
  79. b ftrace_common
  80. SYM_CODE_END(ftrace_caller)
  81. SYM_CODE_START(ftrace_common)
  82. sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
  83. mov x1, x9 // parent_ip (callsite's LR)
  84. ldr_l x2, function_trace_op // op
  85. mov x3, sp // regs
  86. SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
  87. bl ftrace_stub
  88. /*
  89. * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
  90. * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
  91. * to restore x0-x8, x29, and x30.
  92. */
  93. /* Restore function arguments */
  94. ldp x0, x1, [sp]
  95. ldp x2, x3, [sp, #S_X2]
  96. ldp x4, x5, [sp, #S_X4]
  97. ldp x6, x7, [sp, #S_X6]
  98. ldr x8, [sp, #S_X8]
  99. /* Restore the callsite's FP, LR, PC */
  100. ldr x29, [sp, #S_FP]
  101. ldr x30, [sp, #S_LR]
  102. ldr x9, [sp, #S_PC]
  103. /* Restore the callsite's SP */
  104. add sp, sp, #PT_REGS_SIZE + 16
  105. ret x9
  106. SYM_CODE_END(ftrace_common)
  107. #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
  108. /*
  109. * Gcc with -pg will put the following code in the beginning of each function:
  110. * mov x0, x30
  111. * bl _mcount
  112. * [function's body ...]
  113. * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
  114. * ftrace is enabled.
  115. *
  116. * Please note that x0 as an argument will not be used here because we can
  117. * get lr(x30) of instrumented function at any time by winding up call stack
  118. * as long as the kernel is compiled without -fomit-frame-pointer.
  119. * (or CONFIG_FRAME_POINTER, this is forced on arm64)
  120. *
  121. * stack layout after mcount_enter in _mcount():
  122. *
  123. * current sp/fp => 0:+-----+
  124. * in _mcount() | x29 | -> instrumented function's fp
  125. * +-----+
  126. * | x30 | -> _mcount()'s lr (= instrumented function's pc)
  127. * old sp => +16:+-----+
  128. * when instrumented | |
  129. * function calls | ... |
  130. * _mcount() | |
  131. * | |
  132. * instrumented => +xx:+-----+
  133. * function's fp | x29 | -> parent's fp
  134. * +-----+
  135. * | x30 | -> instrumented function's lr (= parent's pc)
  136. * +-----+
  137. * | ... |
  138. */
  139. .macro mcount_enter
  140. stp x29, x30, [sp, #-16]!
  141. mov x29, sp
  142. .endm
  143. .macro mcount_exit
  144. ldp x29, x30, [sp], #16
  145. ret
  146. .endm
  147. .macro mcount_adjust_addr rd, rn
  148. sub \rd, \rn, #AARCH64_INSN_SIZE
  149. .endm
  150. /* for instrumented function's parent */
  151. .macro mcount_get_parent_fp reg
  152. ldr \reg, [x29]
  153. ldr \reg, [\reg]
  154. .endm
  155. /* for instrumented function */
  156. .macro mcount_get_pc0 reg
  157. mcount_adjust_addr \reg, x30
  158. .endm
  159. .macro mcount_get_pc reg
  160. ldr \reg, [x29, #8]
  161. mcount_adjust_addr \reg, \reg
  162. .endm
  163. .macro mcount_get_lr reg
  164. ldr \reg, [x29]
  165. ldr \reg, [\reg, #8]
  166. .endm
  167. .macro mcount_get_lr_addr reg
  168. ldr \reg, [x29]
  169. add \reg, \reg, #8
  170. .endm
  171. #ifndef CONFIG_DYNAMIC_FTRACE
  172. /*
  173. * void _mcount(unsigned long return_address)
  174. * @return_address: return address to instrumented function
  175. *
  176. * This function makes calls, if enabled, to:
  177. * - tracer function to probe instrumented function's entry,
  178. * - ftrace_graph_caller to set up an exit hook
  179. */
  180. SYM_FUNC_START(_mcount)
  181. mcount_enter
  182. ldr_l x2, ftrace_trace_function
  183. adr x0, ftrace_stub
  184. cmp x0, x2 // if (ftrace_trace_function
  185. b.eq skip_ftrace_call // != ftrace_stub) {
  186. mcount_get_pc x0 // function's pc
  187. mcount_get_lr x1 // function's lr (= parent's pc)
  188. blr x2 // (*ftrace_trace_function)(pc, lr);
  189. skip_ftrace_call: // }
  190. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  191. ldr_l x2, ftrace_graph_return
  192. cmp x0, x2 // if ((ftrace_graph_return
  193. b.ne ftrace_graph_caller // != ftrace_stub)
  194. ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
  195. adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
  196. cmp x0, x2
  197. b.ne ftrace_graph_caller // ftrace_graph_caller();
  198. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  199. mcount_exit
  200. SYM_FUNC_END(_mcount)
  201. EXPORT_SYMBOL(_mcount)
  202. NOKPROBE(_mcount)
  203. #else /* CONFIG_DYNAMIC_FTRACE */
  204. /*
  205. * _mcount() is used to build the kernel with -pg option, but all the branch
  206. * instructions to _mcount() are replaced to NOP initially at kernel start up,
  207. * and later on, NOP to branch to ftrace_caller() when enabled or branch to
  208. * NOP when disabled per-function base.
  209. */
  210. SYM_FUNC_START(_mcount)
  211. ret
  212. SYM_FUNC_END(_mcount)
  213. EXPORT_SYMBOL(_mcount)
  214. NOKPROBE(_mcount)
  215. /*
  216. * void ftrace_caller(unsigned long return_address)
  217. * @return_address: return address to instrumented function
  218. *
  219. * This function is a counterpart of _mcount() in 'static' ftrace, and
  220. * makes calls to:
  221. * - tracer function to probe instrumented function's entry,
  222. * - ftrace_graph_caller to set up an exit hook
  223. */
  224. SYM_FUNC_START(ftrace_caller)
  225. mcount_enter
  226. mcount_get_pc0 x0 // function's pc
  227. mcount_get_lr x1 // function's lr
  228. SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
  229. nop // This will be replaced with "bl xxx"
  230. // where xxx can be any kind of tracer.
  231. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  232. SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
  233. nop // If enabled, this will be replaced
  234. // "b ftrace_graph_caller"
  235. #endif
  236. mcount_exit
  237. SYM_FUNC_END(ftrace_caller)
  238. #endif /* CONFIG_DYNAMIC_FTRACE */
  239. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  240. /*
  241. * void ftrace_graph_caller(void)
  242. *
  243. * Called from _mcount() or ftrace_caller() when function_graph tracer is
  244. * selected.
  245. * This function w/ prepare_ftrace_return() fakes link register's value on
  246. * the call stack in order to intercept instrumented function's return path
  247. * and run return_to_handler() later on its exit.
  248. */
  249. SYM_FUNC_START(ftrace_graph_caller)
  250. mcount_get_pc x0 // function's pc
  251. mcount_get_lr_addr x1 // pointer to function's saved lr
  252. mcount_get_parent_fp x2 // parent's fp
  253. bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
  254. mcount_exit
  255. SYM_FUNC_END(ftrace_graph_caller)
  256. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  257. #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
  258. SYM_TYPED_FUNC_START(ftrace_stub)
  259. ret
  260. SYM_FUNC_END(ftrace_stub)
  261. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  262. SYM_TYPED_FUNC_START(ftrace_stub_graph)
  263. ret
  264. SYM_FUNC_END(ftrace_stub_graph)
  265. /*
  266. * void return_to_handler(void)
  267. *
  268. * Run ftrace_return_to_handler() before going back to parent.
  269. * @fp is checked against the value passed by ftrace_graph_caller().
  270. */
  271. SYM_CODE_START(return_to_handler)
  272. /* save return value regs */
  273. sub sp, sp, #64
  274. stp x0, x1, [sp]
  275. stp x2, x3, [sp, #16]
  276. stp x4, x5, [sp, #32]
  277. stp x6, x7, [sp, #48]
  278. mov x0, x29 // parent's fp
  279. bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
  280. mov x30, x0 // restore the original return address
  281. /* restore return value regs */
  282. ldp x0, x1, [sp]
  283. ldp x2, x3, [sp, #16]
  284. ldp x4, x5, [sp, #32]
  285. ldp x6, x7, [sp, #48]
  286. add sp, sp, #64
  287. ret
  288. SYM_CODE_END(return_to_handler)
  289. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */