ftrace_mprofile.S 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Split from ftrace_64.S
  4. */
  5. #include <linux/magic.h>
  6. #include <asm/ppc_asm.h>
  7. #include <asm/asm-offsets.h>
  8. #include <asm/ftrace.h>
  9. #include <asm/ppc-opcode.h>
  10. #include <asm/export.h>
  11. #include <asm/thread_info.h>
  12. #include <asm/bug.h>
  13. #include <asm/ptrace.h>
  14. /*
  15. *
  16. * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount()
  17. * when ftrace is active.
  18. *
  19. * We arrive here after a function A calls function B, and we are the trace
  20. * function for B. When we enter r1 points to A's stack frame, B has not yet
  21. * had a chance to allocate one yet.
  22. *
  23. * Additionally r2 may point either to the TOC for A, or B, depending on
  24. * whether B did a TOC setup sequence before calling us.
  25. *
  26. * On entry the LR points back to the _mcount() call site, and r0 holds the
  27. * saved LR as it was on entry to B, ie. the original return address at the
  28. * call site in A.
  29. *
  30. * Our job is to save the register state into a struct pt_regs (on the stack)
  31. * and then arrange for the ftrace function to be called.
  32. */
  33. .macro ftrace_regs_entry allregs
  34. /* Create a minimal stack frame for representing B */
  35. PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1)
  36. /* Create our stack frame + pt_regs */
  37. PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
  38. /* Save all gprs to pt_regs */
  39. SAVE_GPR(0, r1)
  40. SAVE_GPRS(3, 10, r1)
  41. #ifdef CONFIG_PPC64
  42. /* Save the original return address in A's stack frame */
  43. std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
  44. /* Ok to continue? */
  45. lbz r3, PACA_FTRACE_ENABLED(r13)
  46. cmpdi r3, 0
  47. beq ftrace_no_trace
  48. #endif
  49. .if \allregs == 1
  50. SAVE_GPR(2, r1)
  51. SAVE_GPRS(11, 31, r1)
  52. .else
  53. #ifdef CONFIG_LIVEPATCH_64
  54. SAVE_GPR(14, r1)
  55. #endif
  56. .endif
  57. /* Save previous stack pointer (r1) */
  58. addi r8, r1, SWITCH_FRAME_SIZE
  59. PPC_STL r8, GPR1(r1)
  60. .if \allregs == 1
  61. /* Load special regs for save below */
  62. mfmsr r8
  63. mfctr r9
  64. mfxer r10
  65. mfcr r11
  66. .else
  67. /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
  68. li r8, 0
  69. .endif
  70. /* Get the _mcount() call site out of LR */
  71. mflr r7
  72. /* Save it as pt_regs->nip */
  73. PPC_STL r7, _NIP(r1)
  74. /* Also save it in B's stackframe header for proper unwind */
  75. PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
  76. /* Save the read LR in pt_regs->link */
  77. PPC_STL r0, _LINK(r1)
  78. #ifdef CONFIG_PPC64
  79. /* Save callee's TOC in the ABI compliant location */
  80. std r2, STK_GOT(r1)
  81. LOAD_PACA_TOC() /* get kernel TOC in r2 */
  82. LOAD_REG_ADDR(r3, function_trace_op)
  83. ld r5,0(r3)
  84. #else
  85. lis r3,function_trace_op@ha
  86. lwz r5,function_trace_op@l(r3)
  87. #endif
  88. #ifdef CONFIG_LIVEPATCH_64
  89. mr r14, r7 /* remember old NIP */
  90. #endif
  91. /* Calculate ip from nip-4 into r3 for call below */
  92. subi r3, r7, MCOUNT_INSN_SIZE
  93. /* Put the original return address in r4 as parent_ip */
  94. mr r4, r0
  95. /* Save special regs */
  96. PPC_STL r8, _MSR(r1)
  97. .if \allregs == 1
  98. PPC_STL r9, _CTR(r1)
  99. PPC_STL r10, _XER(r1)
  100. PPC_STL r11, _CCR(r1)
  101. .endif
  102. /* Load &pt_regs in r6 for call below */
  103. addi r6, r1, STACK_FRAME_OVERHEAD
  104. .endm
  105. .macro ftrace_regs_exit allregs
  106. /* Load ctr with the possibly modified NIP */
  107. PPC_LL r3, _NIP(r1)
  108. mtctr r3
  109. #ifdef CONFIG_LIVEPATCH_64
  110. cmpd r14, r3 /* has NIP been altered? */
  111. #endif
  112. /* Restore gprs */
  113. .if \allregs == 1
  114. REST_GPRS(2, 31, r1)
  115. .else
  116. REST_GPRS(3, 10, r1)
  117. #ifdef CONFIG_LIVEPATCH_64
  118. REST_GPR(14, r1)
  119. #endif
  120. .endif
  121. /* Restore possibly modified LR */
  122. PPC_LL r0, _LINK(r1)
  123. mtlr r0
  124. #ifdef CONFIG_PPC64
  125. /* Restore callee's TOC */
  126. ld r2, STK_GOT(r1)
  127. #endif
  128. /* Pop our stack frame */
  129. addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
  130. #ifdef CONFIG_LIVEPATCH_64
  131. /* Based on the cmpd above, if the NIP was altered handle livepatch */
  132. bne- livepatch_handler
  133. #endif
  134. bctr /* jump after _mcount site */
  135. .endm
  136. _GLOBAL(ftrace_regs_caller)
  137. ftrace_regs_entry 1
  138. /* ftrace_call(r3, r4, r5, r6) */
  139. .globl ftrace_regs_call
  140. ftrace_regs_call:
  141. bl ftrace_stub
  142. nop
  143. ftrace_regs_exit 1
  144. _GLOBAL(ftrace_caller)
  145. ftrace_regs_entry 0
  146. /* ftrace_call(r3, r4, r5, r6) */
  147. .globl ftrace_call
  148. ftrace_call:
  149. bl ftrace_stub
  150. nop
  151. ftrace_regs_exit 0
  152. _GLOBAL(ftrace_stub)
  153. blr
  154. #ifdef CONFIG_PPC64
  155. ftrace_no_trace:
  156. mflr r3
  157. mtctr r3
  158. REST_GPR(3, r1)
  159. addi r1, r1, SWITCH_FRAME_SIZE
  160. mtlr r0
  161. bctr
  162. #endif
  163. #ifdef CONFIG_LIVEPATCH_64
  164. /*
  165. * This function runs in the mcount context, between two functions. As
  166. * such it can only clobber registers which are volatile and used in
  167. * function linkage.
  168. *
  169. * We get here when a function A, calls another function B, but B has
  170. * been live patched with a new function C.
  171. *
  172. * On entry:
  173. * - we have no stack frame and can not allocate one
  174. * - LR points back to the original caller (in A)
  175. * - CTR holds the new NIP in C
  176. * - r0, r11 & r12 are free
  177. */
  178. livepatch_handler:
  179. ld r12, PACA_THREAD_INFO(r13)
  180. /* Allocate 3 x 8 bytes */
  181. ld r11, TI_livepatch_sp(r12)
  182. addi r11, r11, 24
  183. std r11, TI_livepatch_sp(r12)
  184. /* Save toc & real LR on livepatch stack */
  185. std r2, -24(r11)
  186. mflr r12
  187. std r12, -16(r11)
  188. /* Store stack end marker */
  189. lis r12, STACK_END_MAGIC@h
  190. ori r12, r12, STACK_END_MAGIC@l
  191. std r12, -8(r11)
  192. /* Put ctr in r12 for global entry and branch there */
  193. mfctr r12
  194. bctrl
  195. /*
  196. * Now we are returning from the patched function to the original
  197. * caller A. We are free to use r11, r12 and we can use r2 until we
  198. * restore it.
  199. */
  200. ld r12, PACA_THREAD_INFO(r13)
  201. ld r11, TI_livepatch_sp(r12)
  202. /* Check stack marker hasn't been trashed */
  203. lis r2, STACK_END_MAGIC@h
  204. ori r2, r2, STACK_END_MAGIC@l
  205. ld r12, -8(r11)
  206. 1: tdne r12, r2
  207. EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
  208. /* Restore LR & toc from livepatch stack */
  209. ld r12, -16(r11)
  210. mtlr r12
  211. ld r2, -24(r11)
  212. /* Pop livepatch stack frame */
  213. ld r12, PACA_THREAD_INFO(r13)
  214. subi r11, r11, 24
  215. std r11, TI_livepatch_sp(r12)
  216. /* Return to original caller of live patched function */
  217. blr
  218. #endif /* CONFIG_LIVEPATCH */