vcpu_fp.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Atish Patra <[email protected]>
  7. * Anup Patel <[email protected]>
  8. */
  9. #include <linux/errno.h>
  10. #include <linux/err.h>
  11. #include <linux/kvm_host.h>
  12. #include <linux/uaccess.h>
  13. #include <asm/hwcap.h>
  14. #ifdef CONFIG_FPU
  15. void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
  16. {
  17. struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  18. cntx->sstatus &= ~SR_FS;
  19. if (riscv_isa_extension_available(vcpu->arch.isa, f) ||
  20. riscv_isa_extension_available(vcpu->arch.isa, d))
  21. cntx->sstatus |= SR_FS_INITIAL;
  22. else
  23. cntx->sstatus |= SR_FS_OFF;
  24. }
  25. static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
  26. {
  27. cntx->sstatus &= ~SR_FS;
  28. cntx->sstatus |= SR_FS_CLEAN;
  29. }
  30. void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
  31. const unsigned long *isa)
  32. {
  33. if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
  34. if (riscv_isa_extension_available(isa, d))
  35. __kvm_riscv_fp_d_save(cntx);
  36. else if (riscv_isa_extension_available(isa, f))
  37. __kvm_riscv_fp_f_save(cntx);
  38. kvm_riscv_vcpu_fp_clean(cntx);
  39. }
  40. }
  41. void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
  42. const unsigned long *isa)
  43. {
  44. if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
  45. if (riscv_isa_extension_available(isa, d))
  46. __kvm_riscv_fp_d_restore(cntx);
  47. else if (riscv_isa_extension_available(isa, f))
  48. __kvm_riscv_fp_f_restore(cntx);
  49. kvm_riscv_vcpu_fp_clean(cntx);
  50. }
  51. }
  52. void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
  53. {
  54. /* No need to check host sstatus as it can be modified outside */
  55. if (riscv_isa_extension_available(NULL, d))
  56. __kvm_riscv_fp_d_save(cntx);
  57. else if (riscv_isa_extension_available(NULL, f))
  58. __kvm_riscv_fp_f_save(cntx);
  59. }
  60. void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
  61. {
  62. if (riscv_isa_extension_available(NULL, d))
  63. __kvm_riscv_fp_d_restore(cntx);
  64. else if (riscv_isa_extension_available(NULL, f))
  65. __kvm_riscv_fp_f_restore(cntx);
  66. }
  67. #endif
  68. int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
  69. const struct kvm_one_reg *reg,
  70. unsigned long rtype)
  71. {
  72. struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  73. unsigned long __user *uaddr =
  74. (unsigned long __user *)(unsigned long)reg->addr;
  75. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  76. KVM_REG_SIZE_MASK |
  77. rtype);
  78. void *reg_val;
  79. if ((rtype == KVM_REG_RISCV_FP_F) &&
  80. riscv_isa_extension_available(vcpu->arch.isa, f)) {
  81. if (KVM_REG_SIZE(reg->id) != sizeof(u32))
  82. return -EINVAL;
  83. if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
  84. reg_val = &cntx->fp.f.fcsr;
  85. else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
  86. reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
  87. reg_val = &cntx->fp.f.f[reg_num];
  88. else
  89. return -EINVAL;
  90. } else if ((rtype == KVM_REG_RISCV_FP_D) &&
  91. riscv_isa_extension_available(vcpu->arch.isa, d)) {
  92. if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
  93. if (KVM_REG_SIZE(reg->id) != sizeof(u32))
  94. return -EINVAL;
  95. reg_val = &cntx->fp.d.fcsr;
  96. } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
  97. reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
  98. if (KVM_REG_SIZE(reg->id) != sizeof(u64))
  99. return -EINVAL;
  100. reg_val = &cntx->fp.d.f[reg_num];
  101. } else
  102. return -EINVAL;
  103. } else
  104. return -EINVAL;
  105. if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
  106. return -EFAULT;
  107. return 0;
  108. }
  109. int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
  110. const struct kvm_one_reg *reg,
  111. unsigned long rtype)
  112. {
  113. struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  114. unsigned long __user *uaddr =
  115. (unsigned long __user *)(unsigned long)reg->addr;
  116. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  117. KVM_REG_SIZE_MASK |
  118. rtype);
  119. void *reg_val;
  120. if ((rtype == KVM_REG_RISCV_FP_F) &&
  121. riscv_isa_extension_available(vcpu->arch.isa, f)) {
  122. if (KVM_REG_SIZE(reg->id) != sizeof(u32))
  123. return -EINVAL;
  124. if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
  125. reg_val = &cntx->fp.f.fcsr;
  126. else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
  127. reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
  128. reg_val = &cntx->fp.f.f[reg_num];
  129. else
  130. return -EINVAL;
  131. } else if ((rtype == KVM_REG_RISCV_FP_D) &&
  132. riscv_isa_extension_available(vcpu->arch.isa, d)) {
  133. if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
  134. if (KVM_REG_SIZE(reg->id) != sizeof(u32))
  135. return -EINVAL;
  136. reg_val = &cntx->fp.d.fcsr;
  137. } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
  138. reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
  139. if (KVM_REG_SIZE(reg->id) != sizeof(u64))
  140. return -EINVAL;
  141. reg_val = &cntx->fp.d.f[reg_num];
  142. } else
  143. return -EINVAL;
  144. } else
  145. return -EINVAL;
  146. if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  147. return -EFAULT;
  148. return 0;
  149. }