stacktrace.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * KVM nVHE hypervisor stack tracing support.
  4. *
  5. * The unwinder implementation depends on the nVHE mode:
  6. *
  7. * 1) Non-protected nVHE mode - the host can directly access the
  8. * HYP stack pages and unwind the HYP stack in EL1. This saves having
  9. * to allocate shared buffers for the host to read the unwinded
  10. * stacktrace.
  11. *
  12. * 2) pKVM (protected nVHE) mode - the host cannot directly access
  13. * the HYP memory. The stack is unwinded in EL2 and dumped to a shared
  14. * buffer where the host can read and print the stacktrace.
  15. *
  16. * Copyright (C) 2022 Google LLC
  17. */
  18. #include <linux/kvm.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/stacktrace/nvhe.h>
  21. static struct stack_info stackinfo_get_overflow(void)
  22. {
  23. struct kvm_nvhe_stacktrace_info *stacktrace_info
  24. = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
  25. unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
  26. unsigned long high = low + OVERFLOW_STACK_SIZE;
  27. return (struct stack_info) {
  28. .low = low,
  29. .high = high,
  30. };
  31. }
  32. static struct stack_info stackinfo_get_overflow_kern_va(void)
  33. {
  34. unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
  35. unsigned long high = low + OVERFLOW_STACK_SIZE;
  36. return (struct stack_info) {
  37. .low = low,
  38. .high = high,
  39. };
  40. }
  41. static struct stack_info stackinfo_get_hyp(void)
  42. {
  43. struct kvm_nvhe_stacktrace_info *stacktrace_info
  44. = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
  45. unsigned long low = (unsigned long)stacktrace_info->stack_base;
  46. unsigned long high = low + NVHE_STACK_SIZE;
  47. return (struct stack_info) {
  48. .low = low,
  49. .high = high,
  50. };
  51. }
  52. static struct stack_info stackinfo_get_hyp_kern_va(void)
  53. {
  54. unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
  55. unsigned long high = low + NVHE_STACK_SIZE;
  56. return (struct stack_info) {
  57. .low = low,
  58. .high = high,
  59. };
  60. }
  61. /*
  62. * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
  63. *
  64. * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
  65. * allow for guard pages below the stack. Consequently, the fixed offset address
  66. * translation macros won't work here.
  67. *
  68. * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
  69. * stack base.
  70. *
  71. * Returns true on success and updates @addr to its corresponding kernel VA;
  72. * otherwise returns false.
  73. */
  74. static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size)
  75. {
  76. struct stack_info stack_hyp, stack_kern;
  77. stack_hyp = stackinfo_get_hyp();
  78. stack_kern = stackinfo_get_hyp_kern_va();
  79. if (stackinfo_on_stack(&stack_hyp, *addr, size))
  80. goto found;
  81. stack_hyp = stackinfo_get_overflow();
  82. stack_kern = stackinfo_get_overflow_kern_va();
  83. if (stackinfo_on_stack(&stack_hyp, *addr, size))
  84. goto found;
  85. return false;
  86. found:
  87. *addr = *addr - stack_hyp.low + stack_kern.low;
  88. return true;
  89. }
  90. /*
  91. * Convert a KVN nVHE HYP frame record address to a kernel VA
  92. */
  93. static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr)
  94. {
  95. return kvm_nvhe_stack_kern_va(addr, 16);
  96. }
  97. static int unwind_next(struct unwind_state *state)
  98. {
  99. /*
  100. * The FP is in the hypervisor VA space. Convert it to the kernel VA
  101. * space so it can be unwound by the regular unwind functions.
  102. */
  103. if (!kvm_nvhe_stack_kern_record_va(&state->fp))
  104. return -EINVAL;
  105. return unwind_next_frame_record(state);
  106. }
  107. static void unwind(struct unwind_state *state,
  108. stack_trace_consume_fn consume_entry, void *cookie)
  109. {
  110. while (1) {
  111. int ret;
  112. if (!consume_entry(cookie, state->pc))
  113. break;
  114. ret = unwind_next(state);
  115. if (ret < 0)
  116. break;
  117. }
  118. }
  119. /*
  120. * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
  121. *
  122. * @arg : the hypervisor offset, used for address translation
  123. * @where : the program counter corresponding to the stack frame
  124. */
  125. static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
  126. {
  127. unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
  128. unsigned long hyp_offset = (unsigned long)arg;
  129. /* Mask tags and convert to kern addr */
  130. where = (where & va_mask) + hyp_offset;
  131. kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
  132. return true;
  133. }
  134. static void kvm_nvhe_dump_backtrace_start(void)
  135. {
  136. kvm_err("nVHE call trace:\n");
  137. }
  138. static void kvm_nvhe_dump_backtrace_end(void)
  139. {
  140. kvm_err("---[ end nVHE call trace ]---\n");
  141. }
  142. /*
  143. * hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
  144. *
  145. * @hyp_offset: hypervisor offset, used for address translation.
  146. *
  147. * The host can directly access HYP stack pages in non-protected
  148. * mode, so the unwinding is done directly from EL1. This removes
  149. * the need for shared buffers between host and hypervisor for
  150. * the stacktrace.
  151. */
  152. static void hyp_dump_backtrace(unsigned long hyp_offset)
  153. {
  154. struct kvm_nvhe_stacktrace_info *stacktrace_info;
  155. struct stack_info stacks[] = {
  156. stackinfo_get_overflow_kern_va(),
  157. stackinfo_get_hyp_kern_va(),
  158. };
  159. struct unwind_state state = {
  160. .stacks = stacks,
  161. .nr_stacks = ARRAY_SIZE(stacks),
  162. };
  163. stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
  164. kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
  165. kvm_nvhe_dump_backtrace_start();
  166. unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
  167. kvm_nvhe_dump_backtrace_end();
  168. }
  169. #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
  170. DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
  171. pkvm_stacktrace);
  172. /*
  173. * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
  174. *
  175. * @hyp_offset: hypervisor offset, used for address translation.
  176. *
  177. * Dumping of the pKVM HYP backtrace is done by reading the
  178. * stack addresses from the shared stacktrace buffer, since the
  179. * host cannot directly access hypervisor memory in protected
  180. * mode.
  181. */
  182. static void pkvm_dump_backtrace(unsigned long hyp_offset)
  183. {
  184. unsigned long *stacktrace
  185. = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
  186. int i;
  187. kvm_nvhe_dump_backtrace_start();
  188. /* The saved stacktrace is terminated by a null entry */
  189. for (i = 0;
  190. i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
  191. i++)
  192. kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
  193. kvm_nvhe_dump_backtrace_end();
  194. }
  195. #else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
  196. static void pkvm_dump_backtrace(unsigned long hyp_offset)
  197. {
  198. kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
  199. }
  200. #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
  201. /*
  202. * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
  203. *
  204. * @hyp_offset: hypervisor offset, used for address translation.
  205. */
  206. void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
  207. {
  208. if (is_protected_kvm_enabled())
  209. pkvm_dump_backtrace(hyp_offset);
  210. else
  211. hyp_dump_backtrace(hyp_offset);
  212. }