book3s_64_entry.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. #include <asm/asm-offsets.h>
  3. #include <asm/cache.h>
  4. #include <asm/code-patching-asm.h>
  5. #include <asm/exception-64s.h>
  6. #include <asm/export.h>
  7. #include <asm/kvm_asm.h>
  8. #include <asm/kvm_book3s_asm.h>
  9. #include <asm/mmu.h>
  10. #include <asm/ppc_asm.h>
  11. #include <asm/ptrace.h>
  12. #include <asm/reg.h>
  13. #include <asm/ultravisor-api.h>
  14. /*
  15. * These are branched to from interrupt handlers in exception-64s.S which set
  16. * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
  17. */
  18. /*
  19. * This is a hcall, so register convention is as
  20. * Documentation/powerpc/papr_hcalls.rst.
  21. *
  22. * This may also be a syscall from PR-KVM userspace that is to be
  23. * reflected to the PR guest kernel, so registers may be set up for
  24. * a system call rather than hcall. We don't currently clobber
  25. * anything here, but the 0xc00 handler has already clobbered CTR
  26. * and CR0, so PR-KVM can not support a guest kernel that preserves
  27. * those registers across its system calls.
  28. *
  29. * The state of registers is as kvmppc_interrupt, except CFAR is not
  30. * saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
  31. */
  32. .global kvmppc_hcall
  33. .balign IFETCH_ALIGN_BYTES
  34. kvmppc_hcall:
  35. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  36. lbz r10,HSTATE_IN_GUEST(r13)
  37. cmpwi r10,KVM_GUEST_MODE_HV_P9
  38. beq kvmppc_p9_exit_hcall
  39. #endif
  40. ld r10,PACA_EXGEN+EX_R13(r13)
  41. SET_SCRATCH0(r10)
  42. li r10,0xc00
  43. /* Now we look like kvmppc_interrupt */
  44. li r11,PACA_EXGEN
  45. b .Lgot_save_area
  46. /*
  47. * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
  48. * call convention:
  49. *
  50. * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
  51. * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
  52. * guest R13 also saved in SCRATCH0
  53. * R13 = PACA
  54. * R11 = (H)SRR0
  55. * R12 = (H)SRR1
  56. * R9 = guest CR
  57. * PPR is set to medium
  58. *
  59. * With the addition for KVM:
  60. * R10 = trap vector
  61. */
  62. .global kvmppc_interrupt
  63. .balign IFETCH_ALIGN_BYTES
  64. kvmppc_interrupt:
  65. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  66. std r10,HSTATE_SCRATCH0(r13)
  67. lbz r10,HSTATE_IN_GUEST(r13)
  68. cmpwi r10,KVM_GUEST_MODE_HV_P9
  69. beq kvmppc_p9_exit_interrupt
  70. ld r10,HSTATE_SCRATCH0(r13)
  71. #endif
  72. li r11,PACA_EXGEN
  73. cmpdi r10,0x200
  74. bgt+ .Lgot_save_area
  75. li r11,PACA_EXMC
  76. beq .Lgot_save_area
  77. li r11,PACA_EXNMI
  78. .Lgot_save_area:
  79. add r11,r11,r13
  80. BEGIN_FTR_SECTION
  81. ld r12,EX_CFAR(r11)
  82. std r12,HSTATE_CFAR(r13)
  83. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  84. ld r12,EX_CTR(r11)
  85. mtctr r12
  86. BEGIN_FTR_SECTION
  87. ld r12,EX_PPR(r11)
  88. std r12,HSTATE_PPR(r13)
  89. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  90. ld r12,EX_R12(r11)
  91. std r12,HSTATE_SCRATCH0(r13)
  92. sldi r12,r9,32
  93. or r12,r12,r10
  94. ld r9,EX_R9(r11)
  95. ld r10,EX_R10(r11)
  96. ld r11,EX_R11(r11)
  97. /*
  98. * Hcalls and other interrupts come here after normalising register
  99. * contents and save locations:
  100. *
  101. * R12 = (guest CR << 32) | interrupt vector
  102. * R13 = PACA
  103. * guest R12 saved in shadow HSTATE_SCRATCH0
  104. * guest R13 saved in SPRN_SCRATCH0
  105. */
  106. std r9,HSTATE_SCRATCH2(r13)
  107. lbz r9,HSTATE_IN_GUEST(r13)
  108. cmpwi r9,KVM_GUEST_MODE_SKIP
  109. beq- .Lmaybe_skip
  110. .Lno_skip:
  111. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  112. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  113. cmpwi r9,KVM_GUEST_MODE_GUEST
  114. beq kvmppc_interrupt_pr
  115. #endif
  116. b kvmppc_interrupt_hv
  117. #else
  118. b kvmppc_interrupt_pr
  119. #endif
  120. /*
  121. * "Skip" interrupts are part of a trick KVM uses a with hash guests to load
  122. * the faulting instruction in guest memory from the hypervisor without
  123. * walking page tables.
  124. *
  125. * When the guest takes a fault that requires the hypervisor to load the
  126. * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
  127. * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
  128. * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
  129. * but loads and stores will access the guest context. This is used to load
  130. * the faulting instruction using the faulting guest effective address.
  131. *
  132. * However the guest context may not be able to translate, or it may cause a
  133. * machine check or other issue, which results in a fault in the host
  134. * (even with KVM-HV).
  135. *
  136. * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
  137. * are (or are likely) caused by that load, the instruction is skipped by
  138. * just returning with the PC advanced +4, where it is noticed the load did
  139. * not execute and it goes to the slow path which walks the page tables to
  140. * read guest memory.
  141. */
  142. .Lmaybe_skip:
  143. cmpwi r12,BOOK3S_INTERRUPT_MACHINE_CHECK
  144. beq 1f
  145. cmpwi r12,BOOK3S_INTERRUPT_DATA_STORAGE
  146. beq 1f
  147. cmpwi r12,BOOK3S_INTERRUPT_DATA_SEGMENT
  148. beq 1f
  149. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  150. /* HSRR interrupts get 2 added to interrupt number */
  151. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
  152. beq 2f
  153. #endif
  154. b .Lno_skip
  155. 1: mfspr r9,SPRN_SRR0
  156. addi r9,r9,4
  157. mtspr SPRN_SRR0,r9
  158. ld r12,HSTATE_SCRATCH0(r13)
  159. ld r9,HSTATE_SCRATCH2(r13)
  160. GET_SCRATCH0(r13)
  161. RFI_TO_KERNEL
  162. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  163. 2: mfspr r9,SPRN_HSRR0
  164. addi r9,r9,4
  165. mtspr SPRN_HSRR0,r9
  166. ld r12,HSTATE_SCRATCH0(r13)
  167. ld r9,HSTATE_SCRATCH2(r13)
  168. GET_SCRATCH0(r13)
  169. HRFI_TO_KERNEL
  170. #endif
  171. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  172. /* Stack frame offsets for kvmppc_p9_enter_guest */
  173. #define SFS (144 + STACK_FRAME_MIN_SIZE)
  174. #define STACK_SLOT_NVGPRS (SFS - 144) /* 18 gprs */
  175. /*
  176. * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
  177. *
  178. * Enter the guest on a ISAv3.0 or later system.
  179. */
  180. .balign IFETCH_ALIGN_BYTES
  181. _GLOBAL(kvmppc_p9_enter_guest)
  182. EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
  183. mflr r0
  184. std r0,PPC_LR_STKOFF(r1)
  185. stdu r1,-SFS(r1)
  186. std r1,HSTATE_HOST_R1(r13)
  187. mfcr r4
  188. stw r4,SFS+8(r1)
  189. reg = 14
  190. .rept 18
  191. std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
  192. reg = reg + 1
  193. .endr
  194. ld r4,VCPU_LR(r3)
  195. mtlr r4
  196. ld r4,VCPU_CTR(r3)
  197. mtctr r4
  198. ld r4,VCPU_XER(r3)
  199. mtspr SPRN_XER,r4
  200. ld r1,VCPU_CR(r3)
  201. BEGIN_FTR_SECTION
  202. ld r4,VCPU_CFAR(r3)
  203. mtspr SPRN_CFAR,r4
  204. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  205. BEGIN_FTR_SECTION
  206. ld r4,VCPU_PPR(r3)
  207. mtspr SPRN_PPR,r4
  208. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  209. reg = 4
  210. .rept 28
  211. ld reg,__VCPU_GPR(reg)(r3)
  212. reg = reg + 1
  213. .endr
  214. ld r4,VCPU_KVM(r3)
  215. lbz r4,KVM_SECURE_GUEST(r4)
  216. cmpdi r4,0
  217. ld r4,VCPU_GPR(R4)(r3)
  218. bne .Lret_to_ultra
  219. mtcr r1
  220. ld r0,VCPU_GPR(R0)(r3)
  221. ld r1,VCPU_GPR(R1)(r3)
  222. ld r2,VCPU_GPR(R2)(r3)
  223. ld r3,VCPU_GPR(R3)(r3)
  224. HRFI_TO_GUEST
  225. b .
  226. /*
  227. * Use UV_RETURN ultracall to return control back to the Ultravisor
  228. * after processing an hypercall or interrupt that was forwarded
  229. * (a.k.a. reflected) to the Hypervisor.
  230. *
  231. * All registers have already been reloaded except the ucall requires:
  232. * R0 = hcall result
  233. * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
  234. * R3 = UV_RETURN
  235. */
  236. .Lret_to_ultra:
  237. mtcr r1
  238. ld r1,VCPU_GPR(R1)(r3)
  239. ld r0,VCPU_GPR(R3)(r3)
  240. mfspr r2,SPRN_SRR1
  241. LOAD_REG_IMMEDIATE(r3, UV_RETURN)
  242. sc 2
  243. /*
  244. * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
  245. * above if the interrupt was taken for a guest that was entered via
  246. * kvmppc_p9_enter_guest().
  247. *
  248. * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
  249. * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
  250. * establishes the host stack and registers to return from the
  251. * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
  252. * (SPRs and FP, VEC, etc).
  253. */
  254. .balign IFETCH_ALIGN_BYTES
  255. kvmppc_p9_exit_hcall:
  256. mfspr r11,SPRN_SRR0
  257. mfspr r12,SPRN_SRR1
  258. li r10,0xc00
  259. std r10,HSTATE_SCRATCH0(r13)
  260. .balign IFETCH_ALIGN_BYTES
  261. kvmppc_p9_exit_interrupt:
  262. /*
  263. * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
  264. * hypervisor, that means we can't return from the entry stack.
  265. */
  266. rldicl. r10,r12,64-MSR_HV_LG,63
  267. bne- kvmppc_p9_bad_interrupt
  268. std r1,HSTATE_SCRATCH1(r13)
  269. std r3,HSTATE_SCRATCH2(r13)
  270. ld r1,HSTATE_HOST_R1(r13)
  271. ld r3,HSTATE_KVM_VCPU(r13)
  272. std r9,VCPU_CR(r3)
  273. 1:
  274. std r11,VCPU_PC(r3)
  275. std r12,VCPU_MSR(r3)
  276. reg = 14
  277. .rept 18
  278. std reg,__VCPU_GPR(reg)(r3)
  279. reg = reg + 1
  280. .endr
  281. /* r1, r3, r9-r13 are saved to vcpu by C code */
  282. std r0,VCPU_GPR(R0)(r3)
  283. std r2,VCPU_GPR(R2)(r3)
  284. reg = 4
  285. .rept 5
  286. std reg,__VCPU_GPR(reg)(r3)
  287. reg = reg + 1
  288. .endr
  289. LOAD_PACA_TOC()
  290. mflr r4
  291. std r4,VCPU_LR(r3)
  292. mfspr r4,SPRN_XER
  293. std r4,VCPU_XER(r3)
  294. reg = 14
  295. .rept 18
  296. ld reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
  297. reg = reg + 1
  298. .endr
  299. lwz r4,SFS+8(r1)
  300. mtcr r4
  301. /*
  302. * Flush the link stack here, before executing the first blr on the
  303. * way out of the guest.
  304. *
  305. * The link stack won't match coming out of the guest anyway so the
  306. * only cost is the flush itself. The call clobbers r0.
  307. */
  308. 1: nop
  309. patch_site 1b patch__call_kvm_flush_link_stack_p9
  310. addi r1,r1,SFS
  311. ld r0,PPC_LR_STKOFF(r1)
  312. mtlr r0
  313. blr
  314. /*
  315. * Took an interrupt somewhere right before HRFID to guest, so registers are
  316. * in a bad way. Return things hopefully enough to run host virtual code and
  317. * run the Linux interrupt handler (SRESET or MCE) to print something useful.
  318. *
  319. * We could be really clever and save all host registers in known locations
  320. * before setting HSTATE_IN_GUEST, then restoring them all here, and setting
  321. * return address to a fixup that sets them up again. But that's a lot of
  322. * effort for a small bit of code. Lots of other things to do first.
  323. */
  324. kvmppc_p9_bad_interrupt:
  325. BEGIN_MMU_FTR_SECTION
  326. /*
  327. * Hash host doesn't try to recover MMU (requires host SLB reload)
  328. */
  329. b .
  330. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
  331. /*
  332. * Clean up guest registers to give host a chance to run.
  333. */
  334. li r10,0
  335. mtspr SPRN_AMR,r10
  336. mtspr SPRN_IAMR,r10
  337. mtspr SPRN_CIABR,r10
  338. mtspr SPRN_DAWRX0,r10
  339. BEGIN_FTR_SECTION
  340. mtspr SPRN_DAWRX1,r10
  341. END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
  342. /*
  343. * Switch to host MMU mode (don't have the real host PID but we aren't
  344. * going back to userspace).
  345. */
  346. hwsync
  347. isync
  348. mtspr SPRN_PID,r10
  349. ld r10, HSTATE_KVM_VCPU(r13)
  350. ld r10, VCPU_KVM(r10)
  351. lwz r10, KVM_HOST_LPID(r10)
  352. mtspr SPRN_LPID,r10
  353. ld r10, HSTATE_KVM_VCPU(r13)
  354. ld r10, VCPU_KVM(r10)
  355. ld r10, KVM_HOST_LPCR(r10)
  356. mtspr SPRN_LPCR,r10
  357. isync
  358. /*
  359. * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
  360. * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
  361. */
  362. li r10,KVM_GUEST_MODE_NONE
  363. stb r10,HSTATE_IN_GUEST(r13)
  364. li r10,MSR_RI
  365. andc r12,r12,r10
  366. /*
  367. * Go back to interrupt handler. MCE and SRESET have their specific
  368. * PACA save area so they should be used directly. They set up their
  369. * own stack. The other handlers all use EXGEN. They will use the
  370. * guest r1 if it looks like a kernel stack, so just load the
  371. * emergency stack and go to program check for all other interrupts.
  372. */
  373. ld r10,HSTATE_SCRATCH0(r13)
  374. cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
  375. beq .Lcall_machine_check_common
  376. cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
  377. beq .Lcall_system_reset_common
  378. b .
  379. .Lcall_machine_check_common:
  380. b machine_check_common
  381. .Lcall_system_reset_common:
  382. b system_reset_common
  383. #endif