stacktrace.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Stack tracing support
  4. *
  5. * Copyright (C) 2012 ARM Ltd.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/efi.h>
  9. #include <linux/export.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/sched.h>
  12. #include <linux/sched/debug.h>
  13. #include <linux/sched/task_stack.h>
  14. #include <linux/stacktrace.h>
  15. #include <asm/efi.h>
  16. #include <asm/irq.h>
  17. #include <asm/stack_pointer.h>
  18. #include <asm/stacktrace.h>
  19. /*
  20. * Start an unwind from a pt_regs.
  21. *
  22. * The unwind will begin at the PC within the regs.
  23. *
  24. * The regs must be on a stack currently owned by the calling task.
  25. */
  26. static __always_inline void unwind_init_from_regs(struct unwind_state *state,
  27. struct pt_regs *regs)
  28. {
  29. unwind_init_common(state, current);
  30. state->fp = regs->regs[29];
  31. state->pc = regs->pc;
  32. }
  33. /*
  34. * Start an unwind from a caller.
  35. *
  36. * The unwind will begin at the caller of whichever function this is inlined
  37. * into.
  38. *
  39. * The function which invokes this must be noinline.
  40. */
  41. static __always_inline void unwind_init_from_caller(struct unwind_state *state)
  42. {
  43. unwind_init_common(state, current);
  44. state->fp = (unsigned long)__builtin_frame_address(1);
  45. state->pc = (unsigned long)__builtin_return_address(0);
  46. }
  47. /*
  48. * Start an unwind from a blocked task.
  49. *
  50. * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
  51. * cpu_switch_to()).
  52. *
  53. * The caller should ensure the task is blocked in cpu_switch_to() for the
  54. * duration of the unwind, or the unwind will be bogus. It is never valid to
  55. * call this for the current task.
  56. */
  57. static __always_inline void unwind_init_from_task(struct unwind_state *state,
  58. struct task_struct *task)
  59. {
  60. unwind_init_common(state, task);
  61. state->fp = thread_saved_fp(task);
  62. state->pc = thread_saved_pc(task);
  63. }
  64. /*
  65. * Unwind from one frame record (A) to the next frame record (B).
  66. *
  67. * We terminate early if the location of B indicates a malformed chain of frame
  68. * records (e.g. a cycle), determined based on the location and fp value of A
  69. * and the location (but not the fp value) of B.
  70. */
  71. static int notrace unwind_next(struct unwind_state *state)
  72. {
  73. struct task_struct *tsk = state->task;
  74. unsigned long fp = state->fp;
  75. int err;
  76. /* Final frame; nothing to unwind */
  77. if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
  78. return -ENOENT;
  79. err = unwind_next_frame_record(state);
  80. if (err)
  81. return err;
  82. state->pc = ptrauth_strip_insn_pac(state->pc);
  83. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  84. if (tsk->ret_stack &&
  85. (state->pc == (unsigned long)return_to_handler)) {
  86. unsigned long orig_pc;
  87. /*
  88. * This is a case where function graph tracer has
  89. * modified a return address (LR) in a stack frame
  90. * to hook a function return.
  91. * So replace it to an original value.
  92. */
  93. orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
  94. (void *)state->fp);
  95. if (WARN_ON_ONCE(state->pc == orig_pc))
  96. return -EINVAL;
  97. state->pc = orig_pc;
  98. }
  99. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  100. #ifdef CONFIG_KRETPROBES
  101. if (is_kretprobe_trampoline(state->pc))
  102. state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
  103. #endif
  104. return 0;
  105. }
  106. NOKPROBE_SYMBOL(unwind_next);
  107. static void notrace unwind(struct unwind_state *state,
  108. stack_trace_consume_fn consume_entry, void *cookie)
  109. {
  110. while (1) {
  111. int ret;
  112. if (!consume_entry(cookie, state->pc))
  113. break;
  114. ret = unwind_next(state);
  115. if (ret < 0)
  116. break;
  117. }
  118. }
  119. NOKPROBE_SYMBOL(unwind);
  120. static bool dump_backtrace_entry(void *arg, unsigned long where)
  121. {
  122. char *loglvl = arg;
  123. printk("%s %pSb\n", loglvl, (void *)where);
  124. return true;
  125. }
  126. void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
  127. const char *loglvl)
  128. {
  129. pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
  130. if (regs && user_mode(regs))
  131. return;
  132. if (!tsk)
  133. tsk = current;
  134. if (!try_get_task_stack(tsk))
  135. return;
  136. printk("%sCall trace:\n", loglvl);
  137. arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
  138. put_task_stack(tsk);
  139. }
  140. EXPORT_SYMBOL_GPL(dump_backtrace);
  141. void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
  142. {
  143. dump_backtrace(NULL, tsk, loglvl);
  144. barrier();
  145. }
  146. /*
  147. * Per-cpu stacks are only accessible when unwinding the current task in a
  148. * non-preemptible context.
  149. */
  150. #define STACKINFO_CPU(name) \
  151. ({ \
  152. ((task == current) && !preemptible()) \
  153. ? stackinfo_get_##name() \
  154. : stackinfo_get_unknown(); \
  155. })
  156. /*
  157. * SDEI stacks are only accessible when unwinding the current task in an NMI
  158. * context.
  159. */
  160. #define STACKINFO_SDEI(name) \
  161. ({ \
  162. ((task == current) && in_nmi()) \
  163. ? stackinfo_get_sdei_##name() \
  164. : stackinfo_get_unknown(); \
  165. })
  166. #define STACKINFO_EFI \
  167. ({ \
  168. ((task == current) && current_in_efi()) \
  169. ? stackinfo_get_efi() \
  170. : stackinfo_get_unknown(); \
  171. })
  172. noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
  173. void *cookie, struct task_struct *task,
  174. struct pt_regs *regs)
  175. {
  176. struct stack_info stacks[] = {
  177. stackinfo_get_task(task),
  178. STACKINFO_CPU(irq),
  179. #if defined(CONFIG_VMAP_STACK)
  180. STACKINFO_CPU(overflow),
  181. #endif
  182. #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
  183. STACKINFO_SDEI(normal),
  184. STACKINFO_SDEI(critical),
  185. #endif
  186. #ifdef CONFIG_EFI
  187. STACKINFO_EFI,
  188. #endif
  189. };
  190. struct unwind_state state = {
  191. .stacks = stacks,
  192. .nr_stacks = ARRAY_SIZE(stacks),
  193. };
  194. if (regs) {
  195. if (task != current)
  196. return;
  197. unwind_init_from_regs(&state, regs);
  198. } else if (task == current) {
  199. unwind_init_from_caller(&state);
  200. } else {
  201. unwind_init_from_task(&state, task);
  202. }
  203. unwind(&state, consume_entry, cookie);
  204. }