stacktrace.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * KVM nVHE hypervisor stack tracing support.
  4. *
  5. * Copyright (C) 2022 Google LLC
  6. */
  7. #include <asm/kvm_asm.h>
  8. #include <asm/kvm_hyp.h>
  9. #include <asm/memory.h>
  10. #include <asm/percpu.h>
  11. DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
  12. __aligned(16);
  13. DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
  14. /*
  15. * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
  16. *
  17. * @fp : frame pointer at which to start the unwinding.
  18. * @pc : program counter at which to start the unwinding.
  19. *
  20. * Save the information needed by the host to unwind the non-protected
  21. * nVHE hypervisor stack in EL1.
  22. */
  23. static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
  24. {
  25. struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
  26. struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
  27. stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
  28. stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
  29. stacktrace_info->fp = fp;
  30. stacktrace_info->pc = pc;
  31. }
  32. #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
  33. #include <asm/stacktrace/nvhe.h>
  34. DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
  35. static struct stack_info stackinfo_get_overflow(void)
  36. {
  37. unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
  38. unsigned long high = low + OVERFLOW_STACK_SIZE;
  39. return (struct stack_info) {
  40. .low = low,
  41. .high = high,
  42. };
  43. }
  44. static struct stack_info stackinfo_get_hyp(void)
  45. {
  46. struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
  47. unsigned long high = params->stack_hyp_va;
  48. unsigned long low = high - NVHE_STACK_SIZE;
  49. return (struct stack_info) {
  50. .low = low,
  51. .high = high,
  52. };
  53. }
  54. static int unwind_next(struct unwind_state *state)
  55. {
  56. return unwind_next_frame_record(state);
  57. }
  58. static void notrace unwind(struct unwind_state *state,
  59. stack_trace_consume_fn consume_entry,
  60. void *cookie)
  61. {
  62. while (1) {
  63. int ret;
  64. if (!consume_entry(cookie, state->pc))
  65. break;
  66. ret = unwind_next(state);
  67. if (ret < 0)
  68. break;
  69. }
  70. }
  71. /*
  72. * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
  73. *
  74. * @arg : index of the entry in the stacktrace buffer
  75. * @where : the program counter corresponding to the stack frame
  76. *
  77. * Save the return address of a stack frame to the shared stacktrace buffer.
  78. * The host can access this shared buffer from EL1 to dump the backtrace.
  79. */
  80. static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
  81. {
  82. unsigned long *stacktrace = this_cpu_ptr(pkvm_stacktrace);
  83. int *idx = (int *)arg;
  84. /*
  85. * Need 2 free slots: 1 for current entry and 1 for the
  86. * delimiter.
  87. */
  88. if (*idx > ARRAY_SIZE(pkvm_stacktrace) - 2)
  89. return false;
  90. stacktrace[*idx] = where;
  91. stacktrace[++*idx] = 0UL;
  92. return true;
  93. }
  94. /*
  95. * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
  96. *
  97. * @fp : frame pointer at which to start the unwinding.
  98. * @pc : program counter at which to start the unwinding.
  99. *
  100. * Save the unwinded stack addresses to the shared stacktrace buffer.
  101. * The host can access this shared buffer from EL1 to dump the backtrace.
  102. */
  103. static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
  104. {
  105. struct stack_info stacks[] = {
  106. stackinfo_get_overflow(),
  107. stackinfo_get_hyp(),
  108. };
  109. struct unwind_state state = {
  110. .stacks = stacks,
  111. .nr_stacks = ARRAY_SIZE(stacks),
  112. };
  113. int idx = 0;
  114. kvm_nvhe_unwind_init(&state, fp, pc);
  115. unwind(&state, pkvm_save_backtrace_entry, &idx);
  116. }
  117. #else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
  118. static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
  119. {
  120. }
  121. #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
  122. /*
  123. * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
  124. *
  125. * @fp : frame pointer at which to start the unwinding.
  126. * @pc : program counter at which to start the unwinding.
  127. *
  128. * Saves the information needed by the host to dump the nVHE hypervisor
  129. * backtrace.
  130. */
  131. void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
  132. {
  133. if (is_protected_kvm_enabled())
  134. pkvm_save_backtrace(fp, pc);
  135. else
  136. hyp_prepare_backtrace(fp, pc);
  137. }