123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245 |
- #include <linux/kvm.h>
- #include <linux/kvm_host.h>
- #include <asm/stacktrace/nvhe.h>
- static struct stack_info stackinfo_get_overflow(void)
- {
- struct kvm_nvhe_stacktrace_info *stacktrace_info
- = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
- unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
- unsigned long high = low + OVERFLOW_STACK_SIZE;
- return (struct stack_info) {
- .low = low,
- .high = high,
- };
- }
- static struct stack_info stackinfo_get_overflow_kern_va(void)
- {
- unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
- unsigned long high = low + OVERFLOW_STACK_SIZE;
- return (struct stack_info) {
- .low = low,
- .high = high,
- };
- }
- static struct stack_info stackinfo_get_hyp(void)
- {
- struct kvm_nvhe_stacktrace_info *stacktrace_info
- = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
- unsigned long low = (unsigned long)stacktrace_info->stack_base;
- unsigned long high = low + NVHE_STACK_SIZE;
- return (struct stack_info) {
- .low = low,
- .high = high,
- };
- }
- static struct stack_info stackinfo_get_hyp_kern_va(void)
- {
- unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
- unsigned long high = low + NVHE_STACK_SIZE;
- return (struct stack_info) {
- .low = low,
- .high = high,
- };
- }
- static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size)
- {
- struct stack_info stack_hyp, stack_kern;
- stack_hyp = stackinfo_get_hyp();
- stack_kern = stackinfo_get_hyp_kern_va();
- if (stackinfo_on_stack(&stack_hyp, *addr, size))
- goto found;
- stack_hyp = stackinfo_get_overflow();
- stack_kern = stackinfo_get_overflow_kern_va();
- if (stackinfo_on_stack(&stack_hyp, *addr, size))
- goto found;
- return false;
- found:
- *addr = *addr - stack_hyp.low + stack_kern.low;
- return true;
- }
- static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr)
- {
- return kvm_nvhe_stack_kern_va(addr, 16);
- }
- static int unwind_next(struct unwind_state *state)
- {
-
- if (!kvm_nvhe_stack_kern_record_va(&state->fp))
- return -EINVAL;
- return unwind_next_frame_record(state);
- }
- static void unwind(struct unwind_state *state,
- stack_trace_consume_fn consume_entry, void *cookie)
- {
- while (1) {
- int ret;
- if (!consume_entry(cookie, state->pc))
- break;
- ret = unwind_next(state);
- if (ret < 0)
- break;
- }
- }
- static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
- {
- unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
- unsigned long hyp_offset = (unsigned long)arg;
-
- where = (where & va_mask) + hyp_offset;
- kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
- return true;
- }
- static void kvm_nvhe_dump_backtrace_start(void)
- {
- kvm_err("nVHE call trace:\n");
- }
- static void kvm_nvhe_dump_backtrace_end(void)
- {
- kvm_err("---[ end nVHE call trace ]---\n");
- }
- static void hyp_dump_backtrace(unsigned long hyp_offset)
- {
- struct kvm_nvhe_stacktrace_info *stacktrace_info;
- struct stack_info stacks[] = {
- stackinfo_get_overflow_kern_va(),
- stackinfo_get_hyp_kern_va(),
- };
- struct unwind_state state = {
- .stacks = stacks,
- .nr_stacks = ARRAY_SIZE(stacks),
- };
- stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
- kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
- kvm_nvhe_dump_backtrace_start();
- unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
- kvm_nvhe_dump_backtrace_end();
- }
- #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
- DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
- pkvm_stacktrace);
- static void pkvm_dump_backtrace(unsigned long hyp_offset)
- {
- unsigned long *stacktrace
- = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
- int i;
- kvm_nvhe_dump_backtrace_start();
-
- for (i = 0;
- i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
- i++)
- kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
- kvm_nvhe_dump_backtrace_end();
- }
- #else
- static void pkvm_dump_backtrace(unsigned long hyp_offset)
- {
- kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
- }
- #endif
- void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
- {
- if (is_protected_kvm_enabled())
- pkvm_dump_backtrace(hyp_offset);
- else
- hyp_dump_backtrace(hyp_offset);
- }
|