vcpu_sbi.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Atish Patra <[email protected]>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/err.h>
  10. #include <linux/kvm_host.h>
  11. #include <asm/sbi.h>
  12. #include <asm/kvm_vcpu_sbi.h>
  13. static int kvm_linux_err_map_sbi(int err)
  14. {
  15. switch (err) {
  16. case 0:
  17. return SBI_SUCCESS;
  18. case -EPERM:
  19. return SBI_ERR_DENIED;
  20. case -EINVAL:
  21. return SBI_ERR_INVALID_PARAM;
  22. case -EFAULT:
  23. return SBI_ERR_INVALID_ADDRESS;
  24. case -EOPNOTSUPP:
  25. return SBI_ERR_NOT_SUPPORTED;
  26. case -EALREADY:
  27. return SBI_ERR_ALREADY_AVAILABLE;
  28. default:
  29. return SBI_ERR_FAILURE;
  30. };
  31. }
  32. #ifndef CONFIG_RISCV_SBI_V01
  33. static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
  34. .extid_start = -1UL,
  35. .extid_end = -1UL,
  36. .handler = NULL,
  37. };
  38. #endif
  39. static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
  40. &vcpu_sbi_ext_v01,
  41. &vcpu_sbi_ext_base,
  42. &vcpu_sbi_ext_time,
  43. &vcpu_sbi_ext_ipi,
  44. &vcpu_sbi_ext_rfence,
  45. &vcpu_sbi_ext_srst,
  46. &vcpu_sbi_ext_hsm,
  47. &vcpu_sbi_ext_experimental,
  48. &vcpu_sbi_ext_vendor,
  49. };
  50. void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
  51. {
  52. struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
  53. vcpu->arch.sbi_context.return_handled = 0;
  54. vcpu->stat.ecall_exit_stat++;
  55. run->exit_reason = KVM_EXIT_RISCV_SBI;
  56. run->riscv_sbi.extension_id = cp->a7;
  57. run->riscv_sbi.function_id = cp->a6;
  58. run->riscv_sbi.args[0] = cp->a0;
  59. run->riscv_sbi.args[1] = cp->a1;
  60. run->riscv_sbi.args[2] = cp->a2;
  61. run->riscv_sbi.args[3] = cp->a3;
  62. run->riscv_sbi.args[4] = cp->a4;
  63. run->riscv_sbi.args[5] = cp->a5;
  64. run->riscv_sbi.ret[0] = cp->a0;
  65. run->riscv_sbi.ret[1] = cp->a1;
  66. }
  67. void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
  68. struct kvm_run *run,
  69. u32 type, u64 reason)
  70. {
  71. unsigned long i;
  72. struct kvm_vcpu *tmp;
  73. kvm_for_each_vcpu(i, tmp, vcpu->kvm)
  74. tmp->arch.power_off = true;
  75. kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
  76. memset(&run->system_event, 0, sizeof(run->system_event));
  77. run->system_event.type = type;
  78. run->system_event.ndata = 1;
  79. run->system_event.data[0] = reason;
  80. run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
  81. }
  82. int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
  83. {
  84. struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
  85. /* Handle SBI return only once */
  86. if (vcpu->arch.sbi_context.return_handled)
  87. return 0;
  88. vcpu->arch.sbi_context.return_handled = 1;
  89. /* Update return values */
  90. cp->a0 = run->riscv_sbi.ret[0];
  91. cp->a1 = run->riscv_sbi.ret[1];
  92. /* Move to next instruction */
  93. vcpu->arch.guest_context.sepc += 4;
  94. return 0;
  95. }
  96. const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid)
  97. {
  98. int i = 0;
  99. for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
  100. if (sbi_ext[i]->extid_start <= extid &&
  101. sbi_ext[i]->extid_end >= extid)
  102. return sbi_ext[i];
  103. }
  104. return NULL;
  105. }
  106. int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
  107. {
  108. int ret = 1;
  109. bool next_sepc = true;
  110. bool userspace_exit = false;
  111. struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
  112. const struct kvm_vcpu_sbi_extension *sbi_ext;
  113. struct kvm_cpu_trap utrap = { 0 };
  114. unsigned long out_val = 0;
  115. bool ext_is_v01 = false;
  116. sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7);
  117. if (sbi_ext && sbi_ext->handler) {
  118. #ifdef CONFIG_RISCV_SBI_V01
  119. if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
  120. cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
  121. ext_is_v01 = true;
  122. #endif
  123. ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit);
  124. } else {
  125. /* Return error for unsupported SBI calls */
  126. cp->a0 = SBI_ERR_NOT_SUPPORTED;
  127. goto ecall_done;
  128. }
  129. /* Handle special error cases i.e trap, exit or userspace forward */
  130. if (utrap.scause) {
  131. /* No need to increment sepc or exit ioctl loop */
  132. ret = 1;
  133. utrap.sepc = cp->sepc;
  134. kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
  135. next_sepc = false;
  136. goto ecall_done;
  137. }
  138. /* Exit ioctl loop or Propagate the error code the guest */
  139. if (userspace_exit) {
  140. next_sepc = false;
  141. ret = 0;
  142. } else {
  143. /**
  144. * SBI extension handler always returns an Linux error code. Convert
  145. * it to the SBI specific error code that can be propagated the SBI
  146. * caller.
  147. */
  148. ret = kvm_linux_err_map_sbi(ret);
  149. cp->a0 = ret;
  150. ret = 1;
  151. }
  152. ecall_done:
  153. if (next_sepc)
  154. cp->sepc += 4;
  155. if (!ext_is_v01)
  156. cp->a1 = out_val;
  157. return ret;
  158. }