sdei.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2017 Arm Ltd.
  3. #define pr_fmt(fmt) "sdei: " fmt
  4. #include <linux/arm-smccc.h>
  5. #include <linux/arm_sdei.h>
  6. #include <linux/hardirq.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/sched/task_stack.h>
  9. #include <linux/scs.h>
  10. #include <linux/uaccess.h>
  11. #include <asm/alternative.h>
  12. #include <asm/exception.h>
  13. #include <asm/kprobes.h>
  14. #include <asm/mmu.h>
  15. #include <asm/ptrace.h>
  16. #include <asm/sections.h>
  17. #include <asm/stacktrace.h>
  18. #include <asm/sysreg.h>
  19. #include <asm/vmap_stack.h>
  20. unsigned long sdei_exit_mode;
  21. /*
  22. * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
  23. * register, meaning SDEI has to switch to its own stack. We need two stacks as
  24. * a critical event may interrupt a normal event that has just taken a
  25. * synchronous exception, and is using sp as scratch register. For a critical
  26. * event interrupting a normal event, we can't reliably tell if we were on the
  27. * sdei stack.
  28. * For now, we allocate stacks when the driver is probed.
  29. */
  30. DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
  31. DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
  32. #ifdef CONFIG_VMAP_STACK
  33. DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
  34. DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
  35. #endif
  36. DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
  37. DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
  38. #ifdef CONFIG_SHADOW_CALL_STACK
  39. DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
  40. DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
  41. #endif
  42. DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
  43. DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
  44. static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
  45. {
  46. unsigned long *p;
  47. p = per_cpu(*ptr, cpu);
  48. if (p) {
  49. per_cpu(*ptr, cpu) = NULL;
  50. vfree(p);
  51. }
  52. }
  53. static void free_sdei_stacks(void)
  54. {
  55. int cpu;
  56. if (!IS_ENABLED(CONFIG_VMAP_STACK))
  57. return;
  58. for_each_possible_cpu(cpu) {
  59. _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
  60. _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
  61. }
  62. }
  63. static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
  64. {
  65. unsigned long *p;
  66. p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
  67. if (!p)
  68. return -ENOMEM;
  69. per_cpu(*ptr, cpu) = p;
  70. return 0;
  71. }
  72. static int init_sdei_stacks(void)
  73. {
  74. int cpu;
  75. int err = 0;
  76. if (!IS_ENABLED(CONFIG_VMAP_STACK))
  77. return 0;
  78. for_each_possible_cpu(cpu) {
  79. err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
  80. if (err)
  81. break;
  82. err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
  83. if (err)
  84. break;
  85. }
  86. if (err)
  87. free_sdei_stacks();
  88. return err;
  89. }
  90. static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
  91. {
  92. void *s;
  93. s = per_cpu(*ptr, cpu);
  94. if (s) {
  95. per_cpu(*ptr, cpu) = NULL;
  96. scs_free(s);
  97. }
  98. }
  99. static void free_sdei_scs(void)
  100. {
  101. int cpu;
  102. for_each_possible_cpu(cpu) {
  103. _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
  104. _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
  105. }
  106. }
  107. static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
  108. {
  109. void *s;
  110. s = scs_alloc(cpu_to_node(cpu));
  111. if (!s)
  112. return -ENOMEM;
  113. per_cpu(*ptr, cpu) = s;
  114. return 0;
  115. }
  116. static int init_sdei_scs(void)
  117. {
  118. int cpu;
  119. int err = 0;
  120. if (!scs_is_enabled())
  121. return 0;
  122. for_each_possible_cpu(cpu) {
  123. err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
  124. if (err)
  125. break;
  126. err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
  127. if (err)
  128. break;
  129. }
  130. if (err)
  131. free_sdei_scs();
  132. return err;
  133. }
  134. unsigned long sdei_arch_get_entry_point(int conduit)
  135. {
  136. /*
  137. * SDEI works between adjacent exception levels. If we booted at EL1 we
  138. * assume a hypervisor is marshalling events. If we booted at EL2 and
  139. * dropped to EL1 because we don't support VHE, then we can't support
  140. * SDEI.
  141. */
  142. if (is_hyp_nvhe()) {
  143. pr_err("Not supported on this hardware/boot configuration\n");
  144. goto out_err;
  145. }
  146. if (init_sdei_stacks())
  147. goto out_err;
  148. if (init_sdei_scs())
  149. goto out_err_free_stacks;
  150. sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
  151. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  152. if (arm64_kernel_unmapped_at_el0()) {
  153. unsigned long offset;
  154. offset = (unsigned long)__sdei_asm_entry_trampoline -
  155. (unsigned long)__entry_tramp_text_start;
  156. return TRAMP_VALIAS + offset;
  157. } else
  158. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  159. return (unsigned long)__sdei_asm_handler;
  160. out_err_free_stacks:
  161. free_sdei_stacks();
  162. out_err:
  163. return 0;
  164. }
  165. /*
  166. * do_sdei_event() returns one of:
  167. * SDEI_EV_HANDLED - success, return to the interrupted context.
  168. * SDEI_EV_FAILED - failure, return this error code to firmare.
  169. * virtual-address - success, return to this address.
  170. */
  171. unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
  172. struct sdei_registered_event *arg)
  173. {
  174. u32 mode;
  175. int i, err = 0;
  176. int clobbered_registers = 4;
  177. u64 elr = read_sysreg(elr_el1);
  178. u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
  179. unsigned long vbar = read_sysreg(vbar_el1);
  180. if (arm64_kernel_unmapped_at_el0())
  181. clobbered_registers++;
  182. /* Retrieve the missing registers values */
  183. for (i = 0; i < clobbered_registers; i++) {
  184. /* from within the handler, this call always succeeds */
  185. sdei_api_event_context(i, &regs->regs[i]);
  186. }
  187. err = sdei_event_handler(regs, arg);
  188. if (err)
  189. return SDEI_EV_FAILED;
  190. if (elr != read_sysreg(elr_el1)) {
  191. /*
  192. * We took a synchronous exception from the SDEI handler.
  193. * This could deadlock, and if you interrupt KVM it will
  194. * hyp-panic instead.
  195. */
  196. pr_warn("unsafe: exception during handler\n");
  197. }
  198. mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
  199. /*
  200. * If we interrupted the kernel with interrupts masked, we always go
  201. * back to wherever we came from.
  202. */
  203. if (mode == kernel_mode && !interrupts_enabled(regs))
  204. return SDEI_EV_HANDLED;
  205. /*
  206. * Otherwise, we pretend this was an IRQ. This lets user space tasks
  207. * receive signals before we return to them, and KVM to invoke it's
  208. * world switch to do the same.
  209. *
  210. * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
  211. * address'.
  212. */
  213. if (mode == kernel_mode)
  214. return vbar + 0x280;
  215. else if (mode & PSR_MODE32_BIT)
  216. return vbar + 0x680;
  217. return vbar + 0x480;
  218. }