main.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Anup Patel <[email protected]>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/err.h>
  10. #include <linux/module.h>
  11. #include <linux/kvm_host.h>
  12. #include <asm/csr.h>
  13. #include <asm/hwcap.h>
  14. #include <asm/sbi.h>
  15. long kvm_arch_dev_ioctl(struct file *filp,
  16. unsigned int ioctl, unsigned long arg)
  17. {
  18. return -EINVAL;
  19. }
  20. int kvm_arch_check_processor_compat(void *opaque)
  21. {
  22. return 0;
  23. }
  24. int kvm_arch_hardware_setup(void *opaque)
  25. {
  26. return 0;
  27. }
  28. int kvm_arch_hardware_enable(void)
  29. {
  30. unsigned long hideleg, hedeleg;
  31. hedeleg = 0;
  32. hedeleg |= (1UL << EXC_INST_MISALIGNED);
  33. hedeleg |= (1UL << EXC_BREAKPOINT);
  34. hedeleg |= (1UL << EXC_SYSCALL);
  35. hedeleg |= (1UL << EXC_INST_PAGE_FAULT);
  36. hedeleg |= (1UL << EXC_LOAD_PAGE_FAULT);
  37. hedeleg |= (1UL << EXC_STORE_PAGE_FAULT);
  38. csr_write(CSR_HEDELEG, hedeleg);
  39. hideleg = 0;
  40. hideleg |= (1UL << IRQ_VS_SOFT);
  41. hideleg |= (1UL << IRQ_VS_TIMER);
  42. hideleg |= (1UL << IRQ_VS_EXT);
  43. csr_write(CSR_HIDELEG, hideleg);
  44. csr_write(CSR_HCOUNTEREN, -1UL);
  45. csr_write(CSR_HVIP, 0);
  46. return 0;
  47. }
  48. void kvm_arch_hardware_disable(void)
  49. {
  50. /*
  51. * After clearing the hideleg CSR, the host kernel will receive
  52. * spurious interrupts if hvip CSR has pending interrupts and the
  53. * corresponding enable bits in vsie CSR are asserted. To avoid it,
  54. * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
  55. */
  56. csr_write(CSR_VSIE, 0);
  57. csr_write(CSR_HVIP, 0);
  58. csr_write(CSR_HEDELEG, 0);
  59. csr_write(CSR_HIDELEG, 0);
  60. }
  61. int kvm_arch_init(void *opaque)
  62. {
  63. const char *str;
  64. if (!riscv_isa_extension_available(NULL, h)) {
  65. kvm_info("hypervisor extension not available\n");
  66. return -ENODEV;
  67. }
  68. if (sbi_spec_is_0_1()) {
  69. kvm_info("require SBI v0.2 or higher\n");
  70. return -ENODEV;
  71. }
  72. if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
  73. kvm_info("require SBI RFENCE extension\n");
  74. return -ENODEV;
  75. }
  76. kvm_riscv_gstage_mode_detect();
  77. kvm_riscv_gstage_vmid_detect();
  78. kvm_info("hypervisor extension available\n");
  79. switch (kvm_riscv_gstage_mode()) {
  80. case HGATP_MODE_SV32X4:
  81. str = "Sv32x4";
  82. break;
  83. case HGATP_MODE_SV39X4:
  84. str = "Sv39x4";
  85. break;
  86. case HGATP_MODE_SV48X4:
  87. str = "Sv48x4";
  88. break;
  89. case HGATP_MODE_SV57X4:
  90. str = "Sv57x4";
  91. break;
  92. default:
  93. return -ENODEV;
  94. }
  95. kvm_info("using %s G-stage page table format\n", str);
  96. kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
  97. return 0;
  98. }
  99. void kvm_arch_exit(void)
  100. {
  101. }
  102. static int __init riscv_kvm_init(void)
  103. {
  104. return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  105. }
  106. module_init(riscv_kvm_init);