kvm_emul.S 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. *
  4. * Copyright SUSE Linux Products GmbH 2010
  5. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  6. *
  7. * Authors: Alexander Graf <[email protected]>
  8. */
  9. #include <asm/ppc_asm.h>
  10. #include <asm/kvm_asm.h>
  11. #include <asm/reg.h>
  12. #include <asm/page.h>
  13. #include <asm/asm-offsets.h>
  14. #include <asm/asm-compat.h>
  15. #define KVM_MAGIC_PAGE (-4096)
  16. #ifdef CONFIG_64BIT
  17. #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
  18. #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
  19. #else
  20. #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
  21. #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
  22. #endif
  23. #define SCRATCH_SAVE \
  24. /* Enable critical section. We are critical if \
  25. shared->critical == r1 */ \
  26. STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
  27. \
  28. /* Save state */ \
  29. PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  30. PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  31. mfcr r31; \
  32. stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
  33. #define SCRATCH_RESTORE \
  34. /* Restore state */ \
  35. PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  36. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
  37. mtcr r30; \
  38. PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  39. \
  40. /* Disable critical section. We are critical if \
  41. shared->critical == r1 and r2 is always != r1 */ \
  42. STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
  43. .global kvm_template_start
  44. kvm_template_start:
  45. .global kvm_emulate_mtmsrd
  46. kvm_emulate_mtmsrd:
  47. SCRATCH_SAVE
  48. /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
  49. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  50. lis r30, (~(MSR_EE | MSR_RI))@h
  51. ori r30, r30, (~(MSR_EE | MSR_RI))@l
  52. and r31, r31, r30
  53. /* OR the register's (MSR_EE|MSR_RI) on MSR */
  54. kvm_emulate_mtmsrd_reg:
  55. ori r30, r0, 0
  56. andi. r30, r30, (MSR_EE|MSR_RI)
  57. or r31, r31, r30
  58. /* Put MSR back into magic page */
  59. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  60. /* Check if we have to fetch an interrupt */
  61. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  62. cmpwi r31, 0
  63. beq+ no_check
  64. /* Check if we may trigger an interrupt */
  65. andi. r30, r30, MSR_EE
  66. beq no_check
  67. SCRATCH_RESTORE
  68. /* Nag hypervisor */
  69. kvm_emulate_mtmsrd_orig_ins:
  70. tlbsync
  71. b kvm_emulate_mtmsrd_branch
  72. no_check:
  73. SCRATCH_RESTORE
  74. /* Go back to caller */
  75. kvm_emulate_mtmsrd_branch:
  76. b .
  77. kvm_emulate_mtmsrd_end:
  78. .global kvm_emulate_mtmsrd_branch_offs
  79. kvm_emulate_mtmsrd_branch_offs:
  80. .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
  81. .global kvm_emulate_mtmsrd_reg_offs
  82. kvm_emulate_mtmsrd_reg_offs:
  83. .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
  84. .global kvm_emulate_mtmsrd_orig_ins_offs
  85. kvm_emulate_mtmsrd_orig_ins_offs:
  86. .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
  87. .global kvm_emulate_mtmsrd_len
  88. kvm_emulate_mtmsrd_len:
  89. .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
  90. #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
  91. #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
  92. .global kvm_emulate_mtmsr
  93. kvm_emulate_mtmsr:
  94. SCRATCH_SAVE
  95. /* Fetch old MSR in r31 */
  96. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  97. /* Find the changed bits between old and new MSR */
  98. kvm_emulate_mtmsr_reg1:
  99. ori r30, r0, 0
  100. xor r31, r30, r31
  101. /* Check if we need to really do mtmsr */
  102. LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
  103. and. r31, r31, r30
  104. /* No critical bits changed? Maybe we can stay in the guest. */
  105. beq maybe_stay_in_guest
  106. do_mtmsr:
  107. SCRATCH_RESTORE
  108. /* Just fire off the mtmsr if it's critical */
  109. kvm_emulate_mtmsr_orig_ins:
  110. mtmsr r0
  111. b kvm_emulate_mtmsr_branch
  112. maybe_stay_in_guest:
  113. /* Get the target register in r30 */
  114. kvm_emulate_mtmsr_reg2:
  115. ori r30, r0, 0
  116. /* Put MSR into magic page because we don't call mtmsr */
  117. STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  118. /* Check if we have to fetch an interrupt */
  119. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  120. cmpwi r31, 0
  121. beq+ no_mtmsr
  122. /* Check if we may trigger an interrupt */
  123. andi. r31, r30, MSR_EE
  124. bne do_mtmsr
  125. no_mtmsr:
  126. SCRATCH_RESTORE
  127. /* Go back to caller */
  128. kvm_emulate_mtmsr_branch:
  129. b .
  130. kvm_emulate_mtmsr_end:
  131. .global kvm_emulate_mtmsr_branch_offs
  132. kvm_emulate_mtmsr_branch_offs:
  133. .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
  134. .global kvm_emulate_mtmsr_reg1_offs
  135. kvm_emulate_mtmsr_reg1_offs:
  136. .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
  137. .global kvm_emulate_mtmsr_reg2_offs
  138. kvm_emulate_mtmsr_reg2_offs:
  139. .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
  140. .global kvm_emulate_mtmsr_orig_ins_offs
  141. kvm_emulate_mtmsr_orig_ins_offs:
  142. .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
  143. .global kvm_emulate_mtmsr_len
  144. kvm_emulate_mtmsr_len:
  145. .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
  146. #ifdef CONFIG_BOOKE
  147. /* also used for wrteei 1 */
  148. .global kvm_emulate_wrtee
  149. kvm_emulate_wrtee:
  150. SCRATCH_SAVE
  151. /* Fetch old MSR in r31 */
  152. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  153. /* Insert new MSR[EE] */
  154. kvm_emulate_wrtee_reg:
  155. ori r30, r0, 0
  156. rlwimi r31, r30, 0, MSR_EE
  157. /*
  158. * If MSR[EE] is now set, check for a pending interrupt.
  159. * We could skip this if MSR[EE] was already on, but that
  160. * should be rare, so don't bother.
  161. */
  162. andi. r30, r30, MSR_EE
  163. /* Put MSR into magic page because we don't call wrtee */
  164. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  165. beq no_wrtee
  166. /* Check if we have to fetch an interrupt */
  167. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  168. cmpwi r30, 0
  169. bne do_wrtee
  170. no_wrtee:
  171. SCRATCH_RESTORE
  172. /* Go back to caller */
  173. kvm_emulate_wrtee_branch:
  174. b .
  175. do_wrtee:
  176. SCRATCH_RESTORE
  177. /* Just fire off the wrtee if it's critical */
  178. kvm_emulate_wrtee_orig_ins:
  179. wrtee r0
  180. b kvm_emulate_wrtee_branch
  181. kvm_emulate_wrtee_end:
  182. .global kvm_emulate_wrtee_branch_offs
  183. kvm_emulate_wrtee_branch_offs:
  184. .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
  185. .global kvm_emulate_wrtee_reg_offs
  186. kvm_emulate_wrtee_reg_offs:
  187. .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
  188. .global kvm_emulate_wrtee_orig_ins_offs
  189. kvm_emulate_wrtee_orig_ins_offs:
  190. .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
  191. .global kvm_emulate_wrtee_len
  192. kvm_emulate_wrtee_len:
  193. .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
  194. .global kvm_emulate_wrteei_0
  195. kvm_emulate_wrteei_0:
  196. SCRATCH_SAVE
  197. /* Fetch old MSR in r31 */
  198. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  199. /* Remove MSR_EE from old MSR */
  200. rlwinm r31, r31, 0, ~MSR_EE
  201. /* Write new MSR value back */
  202. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  203. SCRATCH_RESTORE
  204. /* Go back to caller */
  205. kvm_emulate_wrteei_0_branch:
  206. b .
  207. kvm_emulate_wrteei_0_end:
  208. .global kvm_emulate_wrteei_0_branch_offs
  209. kvm_emulate_wrteei_0_branch_offs:
  210. .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
  211. .global kvm_emulate_wrteei_0_len
  212. kvm_emulate_wrteei_0_len:
  213. .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
  214. #endif /* CONFIG_BOOKE */
  215. #ifdef CONFIG_PPC_BOOK3S_32
  216. .global kvm_emulate_mtsrin
  217. kvm_emulate_mtsrin:
  218. SCRATCH_SAVE
  219. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  220. andi. r31, r31, MSR_DR | MSR_IR
  221. beq kvm_emulate_mtsrin_reg1
  222. SCRATCH_RESTORE
  223. kvm_emulate_mtsrin_orig_ins:
  224. nop
  225. b kvm_emulate_mtsrin_branch
  226. kvm_emulate_mtsrin_reg1:
  227. /* rX >> 26 */
  228. rlwinm r30,r0,6,26,29
  229. kvm_emulate_mtsrin_reg2:
  230. stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
  231. SCRATCH_RESTORE
  232. /* Go back to caller */
  233. kvm_emulate_mtsrin_branch:
  234. b .
  235. kvm_emulate_mtsrin_end:
  236. .global kvm_emulate_mtsrin_branch_offs
  237. kvm_emulate_mtsrin_branch_offs:
  238. .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
  239. .global kvm_emulate_mtsrin_reg1_offs
  240. kvm_emulate_mtsrin_reg1_offs:
  241. .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
  242. .global kvm_emulate_mtsrin_reg2_offs
  243. kvm_emulate_mtsrin_reg2_offs:
  244. .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
  245. .global kvm_emulate_mtsrin_orig_ins_offs
  246. kvm_emulate_mtsrin_orig_ins_offs:
  247. .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
  248. .global kvm_emulate_mtsrin_len
  249. kvm_emulate_mtsrin_len:
  250. .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
  251. #endif /* CONFIG_PPC_BOOK3S_32 */
  252. .balign 4
  253. .global kvm_tmp
  254. kvm_tmp:
  255. .space (64 * 1024)
  256. .global kvm_tmp_end
  257. kvm_tmp_end:
  258. .global kvm_template_end
  259. kvm_template_end: