book3s_hv_tm.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2017 Paul Mackerras, IBM Corp. <[email protected]>
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/kvm_host.h>
  7. #include <asm/kvm_ppc.h>
  8. #include <asm/kvm_book3s.h>
  9. #include <asm/kvm_book3s_64.h>
  10. #include <asm/reg.h>
  11. #include <asm/ppc-opcode.h>
  12. static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
  13. {
  14. u64 texasr, tfiar;
  15. u64 msr = vcpu->arch.shregs.msr;
  16. tfiar = vcpu->arch.regs.nip & ~0x3ull;
  17. texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
  18. if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
  19. texasr |= TEXASR_SUSP;
  20. if (msr & MSR_PR) {
  21. texasr |= TEXASR_PR;
  22. tfiar |= 1;
  23. }
  24. vcpu->arch.tfiar = tfiar;
  25. /* Preserve ROT and TL fields of existing TEXASR */
  26. vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
  27. }
  28. /*
  29. * This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
  30. * We expect to find a TM-related instruction to be emulated. The
  31. * instruction image is in vcpu->arch.emul_inst. If the guest was in
  32. * TM suspended or transactional state, the checkpointed state has been
  33. * reclaimed and is in the vcpu struct. The CPU is in virtual mode in
  34. * host context.
  35. */
  36. int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
  37. {
  38. u32 instr = vcpu->arch.emul_inst;
  39. u64 msr = vcpu->arch.shregs.msr;
  40. u64 newmsr, bescr;
  41. int ra, rs;
  42. /*
  43. * The TM softpatch interrupt sets NIP to the instruction following
  44. * the faulting instruction, which is not executed. Rewind nip to the
  45. * faulting instruction so it looks like a normal synchronous
  46. * interrupt, then update nip in the places where the instruction is
  47. * emulated.
  48. */
  49. vcpu->arch.regs.nip -= 4;
  50. /*
  51. * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
  52. * in these instructions, so masking bit 31 out doesn't change these
  53. * instructions. For treclaim., tsr., and trechkpt. instructions if bit
  54. * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
  55. * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
  56. * 31 is an acceptable way to handle these invalid forms that have
  57. * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
  58. * bit 31 set) can generate a softpatch interrupt. Hence both forms
  59. * are handled below for these instructions so they behave the same way.
  60. */
  61. switch (instr & PO_XOP_OPCODE_MASK) {
  62. case PPC_INST_RFID:
  63. /* XXX do we need to check for PR=0 here? */
  64. newmsr = vcpu->arch.shregs.srr1;
  65. /* should only get here for Sx -> T1 transition */
  66. WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
  67. MSR_TM_TRANSACTIONAL(newmsr) &&
  68. (newmsr & MSR_TM)));
  69. newmsr = sanitize_msr(newmsr);
  70. vcpu->arch.shregs.msr = newmsr;
  71. vcpu->arch.cfar = vcpu->arch.regs.nip;
  72. vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
  73. return RESUME_GUEST;
  74. case PPC_INST_RFEBB:
  75. if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
  76. /* generate an illegal instruction interrupt */
  77. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  78. return RESUME_GUEST;
  79. }
  80. /* check EBB facility is available */
  81. if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
  82. vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
  83. vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56;
  84. vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
  85. return -1; /* rerun host interrupt handler */
  86. }
  87. if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
  88. /* generate a facility unavailable interrupt */
  89. vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
  90. vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56;
  91. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
  92. return RESUME_GUEST;
  93. }
  94. bescr = vcpu->arch.bescr;
  95. /* expect to see a S->T transition requested */
  96. WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
  97. ((bescr >> 30) & 3) == 2));
  98. bescr &= ~BESCR_GE;
  99. if (instr & (1 << 11))
  100. bescr |= BESCR_GE;
  101. vcpu->arch.bescr = bescr;
  102. msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
  103. vcpu->arch.shregs.msr = msr;
  104. vcpu->arch.cfar = vcpu->arch.regs.nip;
  105. vcpu->arch.regs.nip = vcpu->arch.ebbrr;
  106. return RESUME_GUEST;
  107. case PPC_INST_MTMSRD:
  108. /* XXX do we need to check for PR=0 here? */
  109. rs = (instr >> 21) & 0x1f;
  110. newmsr = kvmppc_get_gpr(vcpu, rs);
  111. /* check this is a Sx -> T1 transition */
  112. WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
  113. MSR_TM_TRANSACTIONAL(newmsr) &&
  114. (newmsr & MSR_TM)));
  115. /* mtmsrd doesn't change LE */
  116. newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
  117. newmsr = sanitize_msr(newmsr);
  118. vcpu->arch.shregs.msr = newmsr;
  119. vcpu->arch.regs.nip += 4;
  120. return RESUME_GUEST;
  121. /* ignore bit 31, see comment above */
  122. case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
  123. /* check for PR=1 and arch 2.06 bit set in PCR */
  124. if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
  125. /* generate an illegal instruction interrupt */
  126. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  127. return RESUME_GUEST;
  128. }
  129. /* check for TM disabled in the HFSCR or MSR */
  130. if (!(vcpu->arch.hfscr & HFSCR_TM)) {
  131. vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
  132. vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
  133. vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
  134. return -1; /* rerun host interrupt handler */
  135. }
  136. if (!(msr & MSR_TM)) {
  137. /* generate a facility unavailable interrupt */
  138. vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
  139. vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
  140. kvmppc_book3s_queue_irqprio(vcpu,
  141. BOOK3S_INTERRUPT_FAC_UNAVAIL);
  142. return RESUME_GUEST;
  143. }
  144. /* Set CR0 to indicate previous transactional state */
  145. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
  146. (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
  147. /* L=1 => tresume, L=0 => tsuspend */
  148. if (instr & (1 << 21)) {
  149. if (MSR_TM_SUSPENDED(msr))
  150. msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
  151. } else {
  152. if (MSR_TM_TRANSACTIONAL(msr))
  153. msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
  154. }
  155. vcpu->arch.shregs.msr = msr;
  156. vcpu->arch.regs.nip += 4;
  157. return RESUME_GUEST;
  158. /* ignore bit 31, see comment above */
  159. case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
  160. /* check for TM disabled in the HFSCR or MSR */
  161. if (!(vcpu->arch.hfscr & HFSCR_TM)) {
  162. vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
  163. vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
  164. vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
  165. return -1; /* rerun host interrupt handler */
  166. }
  167. if (!(msr & MSR_TM)) {
  168. /* generate a facility unavailable interrupt */
  169. vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
  170. vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
  171. kvmppc_book3s_queue_irqprio(vcpu,
  172. BOOK3S_INTERRUPT_FAC_UNAVAIL);
  173. return RESUME_GUEST;
  174. }
  175. /* If no transaction active, generate TM bad thing */
  176. if (!MSR_TM_ACTIVE(msr)) {
  177. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  178. return RESUME_GUEST;
  179. }
  180. /* If failure was not previously recorded, recompute TEXASR */
  181. if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
  182. ra = (instr >> 16) & 0x1f;
  183. if (ra)
  184. ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
  185. emulate_tx_failure(vcpu, ra);
  186. }
  187. copy_from_checkpoint(vcpu);
  188. /* Set CR0 to indicate previous transactional state */
  189. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
  190. (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
  191. vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
  192. vcpu->arch.regs.nip += 4;
  193. return RESUME_GUEST;
  194. /* ignore bit 31, see comment above */
  195. case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
  196. /* XXX do we need to check for PR=0 here? */
  197. /* check for TM disabled in the HFSCR or MSR */
  198. if (!(vcpu->arch.hfscr & HFSCR_TM)) {
  199. vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
  200. vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
  201. vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
  202. return -1; /* rerun host interrupt handler */
  203. }
  204. if (!(msr & MSR_TM)) {
  205. /* generate a facility unavailable interrupt */
  206. vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
  207. vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
  208. kvmppc_book3s_queue_irqprio(vcpu,
  209. BOOK3S_INTERRUPT_FAC_UNAVAIL);
  210. return RESUME_GUEST;
  211. }
  212. /* If transaction active or TEXASR[FS] = 0, bad thing */
  213. if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
  214. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  215. return RESUME_GUEST;
  216. }
  217. copy_to_checkpoint(vcpu);
  218. /* Set CR0 to indicate previous transactional state */
  219. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
  220. (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
  221. vcpu->arch.shregs.msr = msr | MSR_TS_S;
  222. vcpu->arch.regs.nip += 4;
  223. return RESUME_GUEST;
  224. }
  225. /* What should we do here? We didn't recognize the instruction */
  226. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  227. pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
  228. return RESUME_GUEST;
  229. }