vcpu_exit.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Anup Patel <[email protected]>
  7. */
  8. #include <linux/kvm_host.h>
  9. #include <asm/csr.h>
  10. #include <asm/insn-def.h>
  11. static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
  12. struct kvm_cpu_trap *trap)
  13. {
  14. struct kvm_memory_slot *memslot;
  15. unsigned long hva, fault_addr;
  16. bool writable;
  17. gfn_t gfn;
  18. int ret;
  19. fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
  20. gfn = fault_addr >> PAGE_SHIFT;
  21. memslot = gfn_to_memslot(vcpu->kvm, gfn);
  22. hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
  23. if (kvm_is_error_hva(hva) ||
  24. (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
  25. switch (trap->scause) {
  26. case EXC_LOAD_GUEST_PAGE_FAULT:
  27. return kvm_riscv_vcpu_mmio_load(vcpu, run,
  28. fault_addr,
  29. trap->htinst);
  30. case EXC_STORE_GUEST_PAGE_FAULT:
  31. return kvm_riscv_vcpu_mmio_store(vcpu, run,
  32. fault_addr,
  33. trap->htinst);
  34. default:
  35. return -EOPNOTSUPP;
  36. };
  37. }
  38. ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
  39. (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
  40. if (ret < 0)
  41. return ret;
  42. return 1;
  43. }
  44. /**
  45. * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
  46. *
  47. * @vcpu: The VCPU pointer
  48. * @read_insn: Flag representing whether we are reading instruction
  49. * @guest_addr: Guest address to read
  50. * @trap: Output pointer to trap details
  51. */
  52. unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
  53. bool read_insn,
  54. unsigned long guest_addr,
  55. struct kvm_cpu_trap *trap)
  56. {
  57. register unsigned long taddr asm("a0") = (unsigned long)trap;
  58. register unsigned long ttmp asm("a1");
  59. unsigned long flags, val, tmp, old_stvec, old_hstatus;
  60. local_irq_save(flags);
  61. old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
  62. old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
  63. if (read_insn) {
  64. /*
  65. * HLVX.HU instruction
  66. * 0110010 00011 rs1 100 rd 1110011
  67. */
  68. asm volatile ("\n"
  69. ".option push\n"
  70. ".option norvc\n"
  71. "add %[ttmp], %[taddr], 0\n"
  72. HLVX_HU(%[val], %[addr])
  73. "andi %[tmp], %[val], 3\n"
  74. "addi %[tmp], %[tmp], -3\n"
  75. "bne %[tmp], zero, 2f\n"
  76. "addi %[addr], %[addr], 2\n"
  77. HLVX_HU(%[tmp], %[addr])
  78. "sll %[tmp], %[tmp], 16\n"
  79. "add %[val], %[val], %[tmp]\n"
  80. "2:\n"
  81. ".option pop"
  82. : [val] "=&r" (val), [tmp] "=&r" (tmp),
  83. [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
  84. [addr] "+&r" (guest_addr) : : "memory");
  85. if (trap->scause == EXC_LOAD_PAGE_FAULT)
  86. trap->scause = EXC_INST_PAGE_FAULT;
  87. } else {
  88. /*
  89. * HLV.D instruction
  90. * 0110110 00000 rs1 100 rd 1110011
  91. *
  92. * HLV.W instruction
  93. * 0110100 00000 rs1 100 rd 1110011
  94. */
  95. asm volatile ("\n"
  96. ".option push\n"
  97. ".option norvc\n"
  98. "add %[ttmp], %[taddr], 0\n"
  99. #ifdef CONFIG_64BIT
  100. HLV_D(%[val], %[addr])
  101. #else
  102. HLV_W(%[val], %[addr])
  103. #endif
  104. ".option pop"
  105. : [val] "=&r" (val),
  106. [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
  107. : [addr] "r" (guest_addr) : "memory");
  108. }
  109. csr_write(CSR_STVEC, old_stvec);
  110. csr_write(CSR_HSTATUS, old_hstatus);
  111. local_irq_restore(flags);
  112. return val;
  113. }
  114. /**
  115. * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
  116. *
  117. * @vcpu: The VCPU pointer
  118. * @trap: Trap details
  119. */
  120. void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
  121. struct kvm_cpu_trap *trap)
  122. {
  123. unsigned long vsstatus = csr_read(CSR_VSSTATUS);
  124. /* Change Guest SSTATUS.SPP bit */
  125. vsstatus &= ~SR_SPP;
  126. if (vcpu->arch.guest_context.sstatus & SR_SPP)
  127. vsstatus |= SR_SPP;
  128. /* Change Guest SSTATUS.SPIE bit */
  129. vsstatus &= ~SR_SPIE;
  130. if (vsstatus & SR_SIE)
  131. vsstatus |= SR_SPIE;
  132. /* Clear Guest SSTATUS.SIE bit */
  133. vsstatus &= ~SR_SIE;
  134. /* Update Guest SSTATUS */
  135. csr_write(CSR_VSSTATUS, vsstatus);
  136. /* Update Guest SCAUSE, STVAL, and SEPC */
  137. csr_write(CSR_VSCAUSE, trap->scause);
  138. csr_write(CSR_VSTVAL, trap->stval);
  139. csr_write(CSR_VSEPC, trap->sepc);
  140. /* Set Guest PC to Guest exception vector */
  141. vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
  142. }
  143. /*
  144. * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
  145. * proper exit to userspace.
  146. */
  147. int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
  148. struct kvm_cpu_trap *trap)
  149. {
  150. int ret;
  151. /* If we got host interrupt then do nothing */
  152. if (trap->scause & CAUSE_IRQ_FLAG)
  153. return 1;
  154. /* Handle guest traps */
  155. ret = -EFAULT;
  156. run->exit_reason = KVM_EXIT_UNKNOWN;
  157. switch (trap->scause) {
  158. case EXC_VIRTUAL_INST_FAULT:
  159. if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
  160. ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
  161. break;
  162. case EXC_INST_GUEST_PAGE_FAULT:
  163. case EXC_LOAD_GUEST_PAGE_FAULT:
  164. case EXC_STORE_GUEST_PAGE_FAULT:
  165. if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
  166. ret = gstage_page_fault(vcpu, run, trap);
  167. break;
  168. case EXC_SUPERVISOR_SYSCALL:
  169. if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
  170. ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
  171. break;
  172. default:
  173. break;
  174. }
  175. /* Print details in-case of error */
  176. if (ret < 0) {
  177. kvm_err("VCPU exit error %d\n", ret);
  178. kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
  179. vcpu->arch.guest_context.sepc,
  180. vcpu->arch.guest_context.sstatus,
  181. vcpu->arch.guest_context.hstatus);
  182. kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
  183. trap->scause, trap->stval, trap->htval, trap->htinst);
  184. }
  185. return ret;
  186. }