exception.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Fault injection for both 32 and 64bit guests.
  4. *
  5. * Copyright (C) 2012,2013 - ARM Ltd
  6. * Author: Marc Zyngier <[email protected]>
  7. *
  8. * Based on arch/arm/kvm/emulate.c
  9. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  10. * Author: Christoffer Dall <[email protected]>
  11. */
  12. #include <hyp/adjust_pc.h>
  13. #include <linux/kvm_host.h>
  14. #include <asm/kvm_emulate.h>
  15. #include <asm/kvm_mmu.h>
  16. #if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__)
  17. #error Hypervisor code only!
  18. #endif
  19. static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
  20. {
  21. u64 val;
  22. if (__vcpu_read_sys_reg_from_cpu(reg, &val))
  23. return val;
  24. return __vcpu_sys_reg(vcpu, reg);
  25. }
  26. static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
  27. {
  28. if (__vcpu_write_sys_reg_to_cpu(val, reg))
  29. return;
  30. __vcpu_sys_reg(vcpu, reg) = val;
  31. }
  32. static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
  33. {
  34. if (has_vhe())
  35. write_sysreg_el1(val, SYS_SPSR);
  36. else
  37. __vcpu_sys_reg(vcpu, SPSR_EL1) = val;
  38. }
  39. static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
  40. {
  41. if (has_vhe())
  42. write_sysreg(val, spsr_abt);
  43. else
  44. vcpu->arch.ctxt.spsr_abt = val;
  45. }
  46. static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
  47. {
  48. if (has_vhe())
  49. write_sysreg(val, spsr_und);
  50. else
  51. vcpu->arch.ctxt.spsr_und = val;
  52. }
  53. unsigned long get_except64_offset(unsigned long psr, unsigned long target_mode,
  54. enum exception_type type)
  55. {
  56. u64 mode = psr & (PSR_MODE_MASK | PSR_MODE32_BIT);
  57. u64 exc_offset;
  58. if (mode == target_mode)
  59. exc_offset = CURRENT_EL_SP_ELx_VECTOR;
  60. else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
  61. exc_offset = CURRENT_EL_SP_EL0_VECTOR;
  62. else if (!(mode & PSR_MODE32_BIT))
  63. exc_offset = LOWER_EL_AArch64_VECTOR;
  64. else
  65. exc_offset = LOWER_EL_AArch32_VECTOR;
  66. return exc_offset + type;
  67. }
  68. /*
  69. * When an exception is taken, most PSTATE fields are left unchanged in the
  70. * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
  71. * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
  72. * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
  73. *
  74. * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
  75. * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
  76. *
  77. * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
  78. * MSB to LSB.
  79. */
  80. unsigned long get_except64_cpsr(unsigned long old, bool has_mte,
  81. unsigned long sctlr, unsigned long target_mode)
  82. {
  83. u64 new = 0;
  84. new |= (old & PSR_N_BIT);
  85. new |= (old & PSR_Z_BIT);
  86. new |= (old & PSR_C_BIT);
  87. new |= (old & PSR_V_BIT);
  88. if (has_mte)
  89. new |= PSR_TCO_BIT;
  90. new |= (old & PSR_DIT_BIT);
  91. // PSTATE.UAO is set to zero upon any exception to AArch64
  92. // See ARM DDI 0487E.a, page D5-2579.
  93. // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
  94. // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
  95. // See ARM DDI 0487E.a, page D5-2578.
  96. new |= (old & PSR_PAN_BIT);
  97. if (!(sctlr & SCTLR_EL1_SPAN))
  98. new |= PSR_PAN_BIT;
  99. // PSTATE.SS is set to zero upon any exception to AArch64
  100. // See ARM DDI 0487E.a, page D2-2452.
  101. // PSTATE.IL is set to zero upon any exception to AArch64
  102. // See ARM DDI 0487E.a, page D1-2306.
  103. // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
  104. // See ARM DDI 0487E.a, page D13-3258
  105. if (sctlr & SCTLR_ELx_DSSBS)
  106. new |= PSR_SSBS_BIT;
  107. // PSTATE.BTYPE is set to zero upon any exception to AArch64
  108. // See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
  109. new |= PSR_D_BIT;
  110. new |= PSR_A_BIT;
  111. new |= PSR_I_BIT;
  112. new |= PSR_F_BIT;
  113. new |= target_mode;
  114. return new;
  115. }
  116. /*
  117. * This performs the exception entry at a given EL (@target_mode), stashing PC
  118. * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
  119. * The EL passed to this function *must* be a non-secure, privileged mode with
  120. * bit 0 being set (PSTATE.SP == 1).
  121. */
  122. static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
  123. enum exception_type type)
  124. {
  125. u64 offset = get_except64_offset(*vcpu_cpsr(vcpu), target_mode, type);
  126. unsigned long sctlr, vbar, old, new;
  127. switch (target_mode) {
  128. case PSR_MODE_EL1h:
  129. vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL1);
  130. sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
  131. __vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
  132. break;
  133. default:
  134. /* Don't do that */
  135. BUG();
  136. }
  137. *vcpu_pc(vcpu) = vbar + offset;
  138. old = *vcpu_cpsr(vcpu);
  139. new = get_except64_cpsr(old, kvm_has_mte(kern_hyp_va(vcpu->kvm)), sctlr, target_mode);
  140. *vcpu_cpsr(vcpu) = new;
  141. __vcpu_write_spsr(vcpu, old);
  142. }
  143. /*
  144. * When an exception is taken, most CPSR fields are left unchanged in the
  145. * handler. However, some are explicitly overridden (e.g. M[4:0]).
  146. *
  147. * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
  148. * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
  149. * obsoleted by the ARMv7 virtualization extensions and is RES0.
  150. *
  151. * For the SPSR layout seen from AArch32, see:
  152. * - ARM DDI 0406C.d, page B1-1148
  153. * - ARM DDI 0487E.a, page G8-6264
  154. *
  155. * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
  156. * - ARM DDI 0487E.a, page C5-426
  157. *
  158. * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
  159. * MSB to LSB.
  160. */
  161. static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
  162. {
  163. u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
  164. unsigned long old, new;
  165. old = *vcpu_cpsr(vcpu);
  166. new = 0;
  167. new |= (old & PSR_AA32_N_BIT);
  168. new |= (old & PSR_AA32_Z_BIT);
  169. new |= (old & PSR_AA32_C_BIT);
  170. new |= (old & PSR_AA32_V_BIT);
  171. new |= (old & PSR_AA32_Q_BIT);
  172. // CPSR.IT[7:0] are set to zero upon any exception
  173. // See ARM DDI 0487E.a, section G1.12.3
  174. // See ARM DDI 0406C.d, section B1.8.3
  175. new |= (old & PSR_AA32_DIT_BIT);
  176. // CPSR.SSBS is set to SCTLR.DSSBS upon any exception
  177. // See ARM DDI 0487E.a, page G8-6244
  178. if (sctlr & BIT(31))
  179. new |= PSR_AA32_SSBS_BIT;
  180. // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
  181. // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
  182. // See ARM DDI 0487E.a, page G8-6246
  183. new |= (old & PSR_AA32_PAN_BIT);
  184. if (!(sctlr & BIT(23)))
  185. new |= PSR_AA32_PAN_BIT;
  186. // SS does not exist in AArch32, so ignore
  187. // CPSR.IL is set to zero upon any exception
  188. // See ARM DDI 0487E.a, page G1-5527
  189. new |= (old & PSR_AA32_GE_MASK);
  190. // CPSR.IT[7:0] are set to zero upon any exception
  191. // See prior comment above
  192. // CPSR.E is set to SCTLR.EE upon any exception
  193. // See ARM DDI 0487E.a, page G8-6245
  194. // See ARM DDI 0406C.d, page B4-1701
  195. if (sctlr & BIT(25))
  196. new |= PSR_AA32_E_BIT;
  197. // CPSR.A is unchanged upon an exception to Undefined, Supervisor
  198. // CPSR.A is set upon an exception to other modes
  199. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  200. // See ARM DDI 0406C.d, page B1-1182
  201. new |= (old & PSR_AA32_A_BIT);
  202. if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
  203. new |= PSR_AA32_A_BIT;
  204. // CPSR.I is set upon any exception
  205. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  206. // See ARM DDI 0406C.d, page B1-1182
  207. new |= PSR_AA32_I_BIT;
  208. // CPSR.F is set upon an exception to FIQ
  209. // CPSR.F is unchanged upon an exception to other modes
  210. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  211. // See ARM DDI 0406C.d, page B1-1182
  212. new |= (old & PSR_AA32_F_BIT);
  213. if (mode == PSR_AA32_MODE_FIQ)
  214. new |= PSR_AA32_F_BIT;
  215. // CPSR.T is set to SCTLR.TE upon any exception
  216. // See ARM DDI 0487E.a, page G8-5514
  217. // See ARM DDI 0406C.d, page B1-1181
  218. if (sctlr & BIT(30))
  219. new |= PSR_AA32_T_BIT;
  220. new |= mode;
  221. return new;
  222. }
  223. /*
  224. * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
  225. */
  226. static const u8 return_offsets[8][2] = {
  227. [0] = { 0, 0 }, /* Reset, unused */
  228. [1] = { 4, 2 }, /* Undefined */
  229. [2] = { 0, 0 }, /* SVC, unused */
  230. [3] = { 4, 4 }, /* Prefetch abort */
  231. [4] = { 8, 8 }, /* Data abort */
  232. [5] = { 0, 0 }, /* HVC, unused */
  233. [6] = { 4, 4 }, /* IRQ, unused */
  234. [7] = { 4, 4 }, /* FIQ, unused */
  235. };
  236. static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
  237. {
  238. unsigned long spsr = *vcpu_cpsr(vcpu);
  239. bool is_thumb = (spsr & PSR_AA32_T_BIT);
  240. u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
  241. u32 return_address;
  242. *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
  243. return_address = *vcpu_pc(vcpu);
  244. return_address += return_offsets[vect_offset >> 2][is_thumb];
  245. /* KVM only enters the ABT and UND modes, so only deal with those */
  246. switch(mode) {
  247. case PSR_AA32_MODE_ABT:
  248. __vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr));
  249. vcpu_gp_regs(vcpu)->compat_lr_abt = return_address;
  250. break;
  251. case PSR_AA32_MODE_UND:
  252. __vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr));
  253. vcpu_gp_regs(vcpu)->compat_lr_und = return_address;
  254. break;
  255. }
  256. /* Branch to exception vector */
  257. if (sctlr & (1 << 13))
  258. vect_offset += 0xffff0000;
  259. else /* always have security exceptions */
  260. vect_offset += __vcpu_read_sys_reg(vcpu, VBAR_EL1);
  261. *vcpu_pc(vcpu) = vect_offset;
  262. }
  263. static void kvm_inject_exception(struct kvm_vcpu *vcpu)
  264. {
  265. if (vcpu_el1_is_32bit(vcpu)) {
  266. switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
  267. case unpack_vcpu_flag(EXCEPT_AA32_UND):
  268. enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
  269. break;
  270. case unpack_vcpu_flag(EXCEPT_AA32_IABT):
  271. enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
  272. break;
  273. case unpack_vcpu_flag(EXCEPT_AA32_DABT):
  274. enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
  275. break;
  276. default:
  277. /* Err... */
  278. break;
  279. }
  280. } else {
  281. switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
  282. case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
  283. enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
  284. break;
  285. default:
  286. /*
  287. * Only EL1_SYNC makes sense so far, EL2_{SYNC,IRQ}
  288. * will be implemented at some point. Everything
  289. * else gets silently ignored.
  290. */
  291. break;
  292. }
  293. }
  294. }
  295. /*
  296. * Adjust the guest PC (and potentially exception state) depending on
  297. * flags provided by the emulation code.
  298. */
  299. void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
  300. {
  301. if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
  302. kvm_inject_exception(vcpu);
  303. vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
  304. vcpu_clear_flag(vcpu, EXCEPT_MASK);
  305. } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
  306. kvm_skip_instr(vcpu);
  307. vcpu_clear_flag(vcpu, INCREMENT_PC);
  308. }
  309. }