dumpstack_64.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  5. */
  6. #include <linux/sched/debug.h>
  7. #include <linux/kallsyms.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/hardirq.h>
  11. #include <linux/kdebug.h>
  12. #include <linux/export.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/kexec.h>
  15. #include <linux/sysfs.h>
  16. #include <linux/bug.h>
  17. #include <linux/nmi.h>
  18. #include <asm/cpu_entry_area.h>
  19. #include <asm/stacktrace.h>
  20. static const char * const exception_stack_names[] = {
  21. [ ESTACK_DF ] = "#DF",
  22. [ ESTACK_NMI ] = "NMI",
  23. [ ESTACK_DB ] = "#DB",
  24. [ ESTACK_MCE ] = "#MC",
  25. [ ESTACK_VC ] = "#VC",
  26. [ ESTACK_VC2 ] = "#VC2",
  27. };
  28. const char *stack_type_name(enum stack_type type)
  29. {
  30. BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
  31. if (type == STACK_TYPE_TASK)
  32. return "TASK";
  33. if (type == STACK_TYPE_IRQ)
  34. return "IRQ";
  35. if (type == STACK_TYPE_SOFTIRQ)
  36. return "SOFTIRQ";
  37. if (type == STACK_TYPE_ENTRY) {
  38. /*
  39. * On 64-bit, we have a generic entry stack that we
  40. * use for all the kernel entry points, including
  41. * SYSENTER.
  42. */
  43. return "ENTRY_TRAMPOLINE";
  44. }
  45. if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
  46. return exception_stack_names[type - STACK_TYPE_EXCEPTION];
  47. return NULL;
  48. }
  49. /**
  50. * struct estack_pages - Page descriptor for exception stacks
  51. * @offs: Offset from the start of the exception stack area
  52. * @size: Size of the exception stack
  53. * @type: Type to store in the stack_info struct
  54. */
  55. struct estack_pages {
  56. u32 offs;
  57. u16 size;
  58. u16 type;
  59. };
  60. #define EPAGERANGE(st) \
  61. [PFN_DOWN(CEA_ESTACK_OFFS(st)) ... \
  62. PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = { \
  63. .offs = CEA_ESTACK_OFFS(st), \
  64. .size = CEA_ESTACK_SIZE(st), \
  65. .type = STACK_TYPE_EXCEPTION + ESTACK_ ##st, }
  66. /*
  67. * Array of exception stack page descriptors. If the stack is larger than
  68. * PAGE_SIZE, all pages covering a particular stack will have the same
  69. * info. The guard pages including the not mapped DB2 stack are zeroed
  70. * out.
  71. */
  72. static const
  73. struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
  74. EPAGERANGE(DF),
  75. EPAGERANGE(NMI),
  76. EPAGERANGE(DB),
  77. EPAGERANGE(MCE),
  78. EPAGERANGE(VC),
  79. EPAGERANGE(VC2),
  80. };
  81. static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info)
  82. {
  83. unsigned long begin, end, stk = (unsigned long)stack;
  84. const struct estack_pages *ep;
  85. struct pt_regs *regs;
  86. unsigned int k;
  87. BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
  88. begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
  89. /*
  90. * Handle the case where stack trace is collected _before_
  91. * cea_exception_stacks had been initialized.
  92. */
  93. if (!begin)
  94. return false;
  95. end = begin + sizeof(struct cea_exception_stacks);
  96. /* Bail if @stack is outside the exception stack area. */
  97. if (stk < begin || stk >= end)
  98. return false;
  99. /* Calc page offset from start of exception stacks */
  100. k = (stk - begin) >> PAGE_SHIFT;
  101. /* Lookup the page descriptor */
  102. ep = &estack_pages[k];
  103. /* Guard page? */
  104. if (!ep->size)
  105. return false;
  106. begin += (unsigned long)ep->offs;
  107. end = begin + (unsigned long)ep->size;
  108. regs = (struct pt_regs *)end - 1;
  109. info->type = ep->type;
  110. info->begin = (unsigned long *)begin;
  111. info->end = (unsigned long *)end;
  112. info->next_sp = (unsigned long *)regs->sp;
  113. return true;
  114. }
  115. static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
  116. {
  117. unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
  118. unsigned long *begin;
  119. /*
  120. * @end points directly to the top most stack entry to avoid a -8
  121. * adjustment in the stack switch hotpath. Adjust it back before
  122. * calculating @begin.
  123. */
  124. end++;
  125. begin = end - (IRQ_STACK_SIZE / sizeof(long));
  126. /*
  127. * Due to the switching logic RSP can never be == @end because the
  128. * final operation is 'popq %rsp' which means after that RSP points
  129. * to the original stack and not to @end.
  130. */
  131. if (stack < begin || stack >= end)
  132. return false;
  133. info->type = STACK_TYPE_IRQ;
  134. info->begin = begin;
  135. info->end = end;
  136. /*
  137. * The next stack pointer is stored at the top of the irq stack
  138. * before switching to the irq stack. Actual stack entries are all
  139. * below that.
  140. */
  141. info->next_sp = (unsigned long *)*(end - 1);
  142. return true;
  143. }
  144. bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
  145. struct stack_info *info)
  146. {
  147. if (in_task_stack(stack, task, info))
  148. return true;
  149. if (task != current)
  150. return false;
  151. if (in_exception_stack(stack, info))
  152. return true;
  153. if (in_irq_stack(stack, info))
  154. return true;
  155. if (in_entry_stack(stack, info))
  156. return true;
  157. return false;
  158. }
  159. int get_stack_info(unsigned long *stack, struct task_struct *task,
  160. struct stack_info *info, unsigned long *visit_mask)
  161. {
  162. task = task ? : current;
  163. if (!stack)
  164. goto unknown;
  165. if (!get_stack_info_noinstr(stack, task, info))
  166. goto unknown;
  167. /*
  168. * Make sure we don't iterate through any given stack more than once.
  169. * If it comes up a second time then there's something wrong going on:
  170. * just break out and report an unknown stack type.
  171. */
  172. if (visit_mask) {
  173. if (*visit_mask & (1UL << info->type)) {
  174. if (task == current)
  175. printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
  176. goto unknown;
  177. }
  178. *visit_mask |= 1UL << info->type;
  179. }
  180. return 0;
  181. unknown:
  182. info->type = STACK_TYPE_UNKNOWN;
  183. return -EINVAL;
  184. }