vcpu.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Anup Patel <[email protected]>
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/entry-kvm.h>
  10. #include <linux/errno.h>
  11. #include <linux/err.h>
  12. #include <linux/kdebug.h>
  13. #include <linux/module.h>
  14. #include <linux/percpu.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/sched/signal.h>
  18. #include <linux/fs.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/csr.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/hwcap.h>
  23. const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
  24. KVM_GENERIC_VCPU_STATS(),
  25. STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
  26. STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
  27. STATS_DESC_COUNTER(VCPU, mmio_exit_user),
  28. STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
  29. STATS_DESC_COUNTER(VCPU, csr_exit_user),
  30. STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
  31. STATS_DESC_COUNTER(VCPU, signal_exits),
  32. STATS_DESC_COUNTER(VCPU, exits)
  33. };
  34. const struct kvm_stats_header kvm_vcpu_stats_header = {
  35. .name_size = KVM_STATS_NAME_SIZE,
  36. .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
  37. .id_offset = sizeof(struct kvm_stats_header),
  38. .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  39. .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  40. sizeof(kvm_vcpu_stats_desc),
  41. };
  42. #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
  43. #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
  44. /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
  45. static const unsigned long kvm_isa_ext_arr[] = {
  46. [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
  47. [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
  48. [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
  49. [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
  50. [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
  51. [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
  52. [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
  53. KVM_ISA_EXT_ARR(SSTC),
  54. KVM_ISA_EXT_ARR(SVINVAL),
  55. KVM_ISA_EXT_ARR(SVPBMT),
  56. KVM_ISA_EXT_ARR(ZIHINTPAUSE),
  57. KVM_ISA_EXT_ARR(ZICBOM),
  58. };
  59. static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
  60. {
  61. unsigned long i;
  62. for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
  63. if (kvm_isa_ext_arr[i] == base_ext)
  64. return i;
  65. }
  66. return KVM_RISCV_ISA_EXT_MAX;
  67. }
  68. static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
  69. {
  70. switch (ext) {
  71. case KVM_RISCV_ISA_EXT_H:
  72. return false;
  73. default:
  74. break;
  75. }
  76. return true;
  77. }
  78. static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
  79. {
  80. switch (ext) {
  81. case KVM_RISCV_ISA_EXT_A:
  82. case KVM_RISCV_ISA_EXT_C:
  83. case KVM_RISCV_ISA_EXT_I:
  84. case KVM_RISCV_ISA_EXT_M:
  85. case KVM_RISCV_ISA_EXT_SSTC:
  86. case KVM_RISCV_ISA_EXT_SVINVAL:
  87. case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
  88. return false;
  89. default:
  90. break;
  91. }
  92. return true;
  93. }
  94. static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
  95. {
  96. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  97. struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
  98. struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  99. struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
  100. bool loaded;
  101. /**
  102. * The preemption should be disabled here because it races with
  103. * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
  104. * also calls vcpu_load/put.
  105. */
  106. get_cpu();
  107. loaded = (vcpu->cpu != -1);
  108. if (loaded)
  109. kvm_arch_vcpu_put(vcpu);
  110. vcpu->arch.last_exit_cpu = -1;
  111. memcpy(csr, reset_csr, sizeof(*csr));
  112. memcpy(cntx, reset_cntx, sizeof(*cntx));
  113. kvm_riscv_vcpu_fp_reset(vcpu);
  114. kvm_riscv_vcpu_timer_reset(vcpu);
  115. WRITE_ONCE(vcpu->arch.irqs_pending, 0);
  116. WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
  117. vcpu->arch.hfence_head = 0;
  118. vcpu->arch.hfence_tail = 0;
  119. memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
  120. /* Reset the guest CSRs for hotplug usecase */
  121. if (loaded)
  122. kvm_arch_vcpu_load(vcpu, smp_processor_id());
  123. put_cpu();
  124. }
  125. int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
  126. {
  127. return 0;
  128. }
  129. int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
  130. {
  131. struct kvm_cpu_context *cntx;
  132. struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
  133. unsigned long host_isa, i;
  134. /* Mark this VCPU never ran */
  135. vcpu->arch.ran_atleast_once = false;
  136. vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
  137. bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
  138. /* Setup ISA features available to VCPU */
  139. for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
  140. host_isa = kvm_isa_ext_arr[i];
  141. if (__riscv_isa_extension_available(NULL, host_isa) &&
  142. kvm_riscv_vcpu_isa_enable_allowed(i))
  143. set_bit(host_isa, vcpu->arch.isa);
  144. }
  145. /* Setup VCPU hfence queue */
  146. spin_lock_init(&vcpu->arch.hfence_lock);
  147. /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
  148. cntx = &vcpu->arch.guest_reset_context;
  149. cntx->sstatus = SR_SPP | SR_SPIE;
  150. cntx->hstatus = 0;
  151. cntx->hstatus |= HSTATUS_VTW;
  152. cntx->hstatus |= HSTATUS_SPVP;
  153. cntx->hstatus |= HSTATUS_SPV;
  154. /* By default, make CY, TM, and IR counters accessible in VU mode */
  155. reset_csr->scounteren = 0x7;
  156. /* Setup VCPU timer */
  157. kvm_riscv_vcpu_timer_init(vcpu);
  158. /* Reset VCPU */
  159. kvm_riscv_reset_vcpu(vcpu);
  160. return 0;
  161. }
  162. void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  163. {
  164. /**
  165. * vcpu with id 0 is the designated boot cpu.
  166. * Keep all vcpus with non-zero id in power-off state so that
  167. * they can be brought up using SBI HSM extension.
  168. */
  169. if (vcpu->vcpu_idx != 0)
  170. kvm_riscv_vcpu_power_off(vcpu);
  171. }
  172. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  173. {
  174. /* Cleanup VCPU timer */
  175. kvm_riscv_vcpu_timer_deinit(vcpu);
  176. /* Free unused pages pre-allocated for G-stage page table mappings */
  177. kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
  178. }
  179. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  180. {
  181. return kvm_riscv_vcpu_timer_pending(vcpu);
  182. }
  183. void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
  184. {
  185. }
  186. void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
  187. {
  188. }
  189. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  190. {
  191. return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
  192. !vcpu->arch.power_off && !vcpu->arch.pause);
  193. }
  194. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  195. {
  196. return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
  197. }
  198. bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
  199. {
  200. return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
  201. }
  202. vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  203. {
  204. return VM_FAULT_SIGBUS;
  205. }
  206. static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
  207. const struct kvm_one_reg *reg)
  208. {
  209. unsigned long __user *uaddr =
  210. (unsigned long __user *)(unsigned long)reg->addr;
  211. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  212. KVM_REG_SIZE_MASK |
  213. KVM_REG_RISCV_CONFIG);
  214. unsigned long reg_val;
  215. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  216. return -EINVAL;
  217. switch (reg_num) {
  218. case KVM_REG_RISCV_CONFIG_REG(isa):
  219. reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
  220. break;
  221. case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
  222. if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
  223. return -EINVAL;
  224. reg_val = riscv_cbom_block_size;
  225. break;
  226. default:
  227. return -EINVAL;
  228. }
  229. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  230. return -EFAULT;
  231. return 0;
  232. }
  233. static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
  234. const struct kvm_one_reg *reg)
  235. {
  236. unsigned long __user *uaddr =
  237. (unsigned long __user *)(unsigned long)reg->addr;
  238. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  239. KVM_REG_SIZE_MASK |
  240. KVM_REG_RISCV_CONFIG);
  241. unsigned long i, isa_ext, reg_val;
  242. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  243. return -EINVAL;
  244. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  245. return -EFAULT;
  246. switch (reg_num) {
  247. case KVM_REG_RISCV_CONFIG_REG(isa):
  248. /*
  249. * This ONE REG interface is only defined for
  250. * single letter extensions.
  251. */
  252. if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
  253. return -EINVAL;
  254. if (!vcpu->arch.ran_atleast_once) {
  255. /* Ignore the enable/disable request for certain extensions */
  256. for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
  257. isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
  258. if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
  259. reg_val &= ~BIT(i);
  260. continue;
  261. }
  262. if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
  263. if (reg_val & BIT(i))
  264. reg_val &= ~BIT(i);
  265. if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
  266. if (!(reg_val & BIT(i)))
  267. reg_val |= BIT(i);
  268. }
  269. reg_val &= riscv_isa_extension_base(NULL);
  270. /* Do not modify anything beyond single letter extensions */
  271. reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
  272. (reg_val & KVM_RISCV_BASE_ISA_MASK);
  273. vcpu->arch.isa[0] = reg_val;
  274. kvm_riscv_vcpu_fp_reset(vcpu);
  275. } else {
  276. return -EOPNOTSUPP;
  277. }
  278. break;
  279. case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
  280. return -EOPNOTSUPP;
  281. default:
  282. return -EINVAL;
  283. }
  284. return 0;
  285. }
  286. static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
  287. const struct kvm_one_reg *reg)
  288. {
  289. struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  290. unsigned long __user *uaddr =
  291. (unsigned long __user *)(unsigned long)reg->addr;
  292. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  293. KVM_REG_SIZE_MASK |
  294. KVM_REG_RISCV_CORE);
  295. unsigned long reg_val;
  296. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  297. return -EINVAL;
  298. if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
  299. return -EINVAL;
  300. if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
  301. reg_val = cntx->sepc;
  302. else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
  303. reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
  304. reg_val = ((unsigned long *)cntx)[reg_num];
  305. else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
  306. reg_val = (cntx->sstatus & SR_SPP) ?
  307. KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
  308. else
  309. return -EINVAL;
  310. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  311. return -EFAULT;
  312. return 0;
  313. }
  314. static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
  315. const struct kvm_one_reg *reg)
  316. {
  317. struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  318. unsigned long __user *uaddr =
  319. (unsigned long __user *)(unsigned long)reg->addr;
  320. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  321. KVM_REG_SIZE_MASK |
  322. KVM_REG_RISCV_CORE);
  323. unsigned long reg_val;
  324. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  325. return -EINVAL;
  326. if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
  327. return -EINVAL;
  328. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  329. return -EFAULT;
  330. if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
  331. cntx->sepc = reg_val;
  332. else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
  333. reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
  334. ((unsigned long *)cntx)[reg_num] = reg_val;
  335. else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
  336. if (reg_val == KVM_RISCV_MODE_S)
  337. cntx->sstatus |= SR_SPP;
  338. else
  339. cntx->sstatus &= ~SR_SPP;
  340. } else
  341. return -EINVAL;
  342. return 0;
  343. }
  344. static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
  345. const struct kvm_one_reg *reg)
  346. {
  347. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  348. unsigned long __user *uaddr =
  349. (unsigned long __user *)(unsigned long)reg->addr;
  350. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  351. KVM_REG_SIZE_MASK |
  352. KVM_REG_RISCV_CSR);
  353. unsigned long reg_val;
  354. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  355. return -EINVAL;
  356. if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
  357. return -EINVAL;
  358. if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
  359. kvm_riscv_vcpu_flush_interrupts(vcpu);
  360. reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
  361. } else
  362. reg_val = ((unsigned long *)csr)[reg_num];
  363. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  364. return -EFAULT;
  365. return 0;
  366. }
  367. static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
  368. const struct kvm_one_reg *reg)
  369. {
  370. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  371. unsigned long __user *uaddr =
  372. (unsigned long __user *)(unsigned long)reg->addr;
  373. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  374. KVM_REG_SIZE_MASK |
  375. KVM_REG_RISCV_CSR);
  376. unsigned long reg_val;
  377. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  378. return -EINVAL;
  379. if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
  380. return -EINVAL;
  381. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  382. return -EFAULT;
  383. if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
  384. reg_val &= VSIP_VALID_MASK;
  385. reg_val <<= VSIP_TO_HVIP_SHIFT;
  386. }
  387. ((unsigned long *)csr)[reg_num] = reg_val;
  388. if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
  389. WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
  390. return 0;
  391. }
  392. static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
  393. const struct kvm_one_reg *reg)
  394. {
  395. unsigned long __user *uaddr =
  396. (unsigned long __user *)(unsigned long)reg->addr;
  397. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  398. KVM_REG_SIZE_MASK |
  399. KVM_REG_RISCV_ISA_EXT);
  400. unsigned long reg_val = 0;
  401. unsigned long host_isa_ext;
  402. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  403. return -EINVAL;
  404. if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
  405. reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
  406. return -EINVAL;
  407. host_isa_ext = kvm_isa_ext_arr[reg_num];
  408. if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
  409. reg_val = 1; /* Mark the given extension as available */
  410. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  411. return -EFAULT;
  412. return 0;
  413. }
  414. static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
  415. const struct kvm_one_reg *reg)
  416. {
  417. unsigned long __user *uaddr =
  418. (unsigned long __user *)(unsigned long)reg->addr;
  419. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  420. KVM_REG_SIZE_MASK |
  421. KVM_REG_RISCV_ISA_EXT);
  422. unsigned long reg_val;
  423. unsigned long host_isa_ext;
  424. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  425. return -EINVAL;
  426. if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
  427. reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
  428. return -EINVAL;
  429. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  430. return -EFAULT;
  431. host_isa_ext = kvm_isa_ext_arr[reg_num];
  432. if (!__riscv_isa_extension_available(NULL, host_isa_ext))
  433. return -EOPNOTSUPP;
  434. if (!vcpu->arch.ran_atleast_once) {
  435. /*
  436. * All multi-letter extension and a few single letter
  437. * extension can be disabled
  438. */
  439. if (reg_val == 1 &&
  440. kvm_riscv_vcpu_isa_enable_allowed(reg_num))
  441. set_bit(host_isa_ext, vcpu->arch.isa);
  442. else if (!reg_val &&
  443. kvm_riscv_vcpu_isa_disable_allowed(reg_num))
  444. clear_bit(host_isa_ext, vcpu->arch.isa);
  445. else
  446. return -EINVAL;
  447. kvm_riscv_vcpu_fp_reset(vcpu);
  448. } else {
  449. return -EOPNOTSUPP;
  450. }
  451. return 0;
  452. }
  453. static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
  454. const struct kvm_one_reg *reg)
  455. {
  456. if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
  457. return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
  458. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
  459. return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
  460. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
  461. return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
  462. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
  463. return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
  464. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
  465. return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
  466. KVM_REG_RISCV_FP_F);
  467. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
  468. return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
  469. KVM_REG_RISCV_FP_D);
  470. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
  471. return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
  472. return -EINVAL;
  473. }
  474. static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
  475. const struct kvm_one_reg *reg)
  476. {
  477. if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
  478. return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
  479. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
  480. return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
  481. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
  482. return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
  483. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
  484. return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
  485. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
  486. return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
  487. KVM_REG_RISCV_FP_F);
  488. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
  489. return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
  490. KVM_REG_RISCV_FP_D);
  491. else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
  492. return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
  493. return -EINVAL;
  494. }
  495. long kvm_arch_vcpu_async_ioctl(struct file *filp,
  496. unsigned int ioctl, unsigned long arg)
  497. {
  498. struct kvm_vcpu *vcpu = filp->private_data;
  499. void __user *argp = (void __user *)arg;
  500. if (ioctl == KVM_INTERRUPT) {
  501. struct kvm_interrupt irq;
  502. if (copy_from_user(&irq, argp, sizeof(irq)))
  503. return -EFAULT;
  504. if (irq.irq == KVM_INTERRUPT_SET)
  505. return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
  506. else
  507. return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
  508. }
  509. return -ENOIOCTLCMD;
  510. }
  511. long kvm_arch_vcpu_ioctl(struct file *filp,
  512. unsigned int ioctl, unsigned long arg)
  513. {
  514. struct kvm_vcpu *vcpu = filp->private_data;
  515. void __user *argp = (void __user *)arg;
  516. long r = -EINVAL;
  517. switch (ioctl) {
  518. case KVM_SET_ONE_REG:
  519. case KVM_GET_ONE_REG: {
  520. struct kvm_one_reg reg;
  521. r = -EFAULT;
  522. if (copy_from_user(&reg, argp, sizeof(reg)))
  523. break;
  524. if (ioctl == KVM_SET_ONE_REG)
  525. r = kvm_riscv_vcpu_set_reg(vcpu, &reg);
  526. else
  527. r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
  528. break;
  529. }
  530. default:
  531. break;
  532. }
  533. return r;
  534. }
  535. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  536. struct kvm_sregs *sregs)
  537. {
  538. return -EINVAL;
  539. }
  540. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  541. struct kvm_sregs *sregs)
  542. {
  543. return -EINVAL;
  544. }
  545. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  546. {
  547. return -EINVAL;
  548. }
  549. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  550. {
  551. return -EINVAL;
  552. }
  553. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  554. struct kvm_translation *tr)
  555. {
  556. return -EINVAL;
  557. }
  558. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  559. {
  560. return -EINVAL;
  561. }
  562. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  563. {
  564. return -EINVAL;
  565. }
  566. void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
  567. {
  568. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  569. unsigned long mask, val;
  570. if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
  571. mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
  572. val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
  573. csr->hvip &= ~mask;
  574. csr->hvip |= val;
  575. }
  576. }
  577. void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
  578. {
  579. unsigned long hvip;
  580. struct kvm_vcpu_arch *v = &vcpu->arch;
  581. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  582. /* Read current HVIP and VSIE CSRs */
  583. csr->vsie = csr_read(CSR_VSIE);
  584. /* Sync-up HVIP.VSSIP bit changes does by Guest */
  585. hvip = csr_read(CSR_HVIP);
  586. if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
  587. if (hvip & (1UL << IRQ_VS_SOFT)) {
  588. if (!test_and_set_bit(IRQ_VS_SOFT,
  589. &v->irqs_pending_mask))
  590. set_bit(IRQ_VS_SOFT, &v->irqs_pending);
  591. } else {
  592. if (!test_and_set_bit(IRQ_VS_SOFT,
  593. &v->irqs_pending_mask))
  594. clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
  595. }
  596. }
  597. /* Sync-up timer CSRs */
  598. kvm_riscv_vcpu_timer_sync(vcpu);
  599. }
  600. int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
  601. {
  602. if (irq != IRQ_VS_SOFT &&
  603. irq != IRQ_VS_TIMER &&
  604. irq != IRQ_VS_EXT)
  605. return -EINVAL;
  606. set_bit(irq, &vcpu->arch.irqs_pending);
  607. smp_mb__before_atomic();
  608. set_bit(irq, &vcpu->arch.irqs_pending_mask);
  609. kvm_vcpu_kick(vcpu);
  610. return 0;
  611. }
  612. int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
  613. {
  614. if (irq != IRQ_VS_SOFT &&
  615. irq != IRQ_VS_TIMER &&
  616. irq != IRQ_VS_EXT)
  617. return -EINVAL;
  618. clear_bit(irq, &vcpu->arch.irqs_pending);
  619. smp_mb__before_atomic();
  620. set_bit(irq, &vcpu->arch.irqs_pending_mask);
  621. return 0;
  622. }
  623. bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
  624. {
  625. unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
  626. << VSIP_TO_HVIP_SHIFT) & mask;
  627. return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
  628. }
  629. void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
  630. {
  631. vcpu->arch.power_off = true;
  632. kvm_make_request(KVM_REQ_SLEEP, vcpu);
  633. kvm_vcpu_kick(vcpu);
  634. }
  635. void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
  636. {
  637. vcpu->arch.power_off = false;
  638. kvm_vcpu_wake_up(vcpu);
  639. }
  640. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  641. struct kvm_mp_state *mp_state)
  642. {
  643. if (vcpu->arch.power_off)
  644. mp_state->mp_state = KVM_MP_STATE_STOPPED;
  645. else
  646. mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
  647. return 0;
  648. }
  649. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  650. struct kvm_mp_state *mp_state)
  651. {
  652. int ret = 0;
  653. switch (mp_state->mp_state) {
  654. case KVM_MP_STATE_RUNNABLE:
  655. vcpu->arch.power_off = false;
  656. break;
  657. case KVM_MP_STATE_STOPPED:
  658. kvm_riscv_vcpu_power_off(vcpu);
  659. break;
  660. default:
  661. ret = -EINVAL;
  662. }
  663. return ret;
  664. }
  665. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  666. struct kvm_guest_debug *dbg)
  667. {
  668. /* TODO; To be implemented later. */
  669. return -EINVAL;
  670. }
  671. static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
  672. {
  673. u64 henvcfg = 0;
  674. if (riscv_isa_extension_available(isa, SVPBMT))
  675. henvcfg |= ENVCFG_PBMTE;
  676. if (riscv_isa_extension_available(isa, SSTC))
  677. henvcfg |= ENVCFG_STCE;
  678. if (riscv_isa_extension_available(isa, ZICBOM))
  679. henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
  680. csr_write(CSR_HENVCFG, henvcfg);
  681. #ifdef CONFIG_32BIT
  682. csr_write(CSR_HENVCFGH, henvcfg >> 32);
  683. #endif
  684. }
  685. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  686. {
  687. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  688. csr_write(CSR_VSSTATUS, csr->vsstatus);
  689. csr_write(CSR_VSIE, csr->vsie);
  690. csr_write(CSR_VSTVEC, csr->vstvec);
  691. csr_write(CSR_VSSCRATCH, csr->vsscratch);
  692. csr_write(CSR_VSEPC, csr->vsepc);
  693. csr_write(CSR_VSCAUSE, csr->vscause);
  694. csr_write(CSR_VSTVAL, csr->vstval);
  695. csr_write(CSR_HVIP, csr->hvip);
  696. csr_write(CSR_VSATP, csr->vsatp);
  697. kvm_riscv_vcpu_update_config(vcpu->arch.isa);
  698. kvm_riscv_gstage_update_hgatp(vcpu);
  699. kvm_riscv_vcpu_timer_restore(vcpu);
  700. kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
  701. kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
  702. vcpu->arch.isa);
  703. vcpu->cpu = cpu;
  704. }
  705. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  706. {
  707. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  708. vcpu->cpu = -1;
  709. kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
  710. vcpu->arch.isa);
  711. kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
  712. kvm_riscv_vcpu_timer_save(vcpu);
  713. csr->vsstatus = csr_read(CSR_VSSTATUS);
  714. csr->vsie = csr_read(CSR_VSIE);
  715. csr->vstvec = csr_read(CSR_VSTVEC);
  716. csr->vsscratch = csr_read(CSR_VSSCRATCH);
  717. csr->vsepc = csr_read(CSR_VSEPC);
  718. csr->vscause = csr_read(CSR_VSCAUSE);
  719. csr->vstval = csr_read(CSR_VSTVAL);
  720. csr->hvip = csr_read(CSR_HVIP);
  721. csr->vsatp = csr_read(CSR_VSATP);
  722. }
  723. static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
  724. {
  725. struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
  726. if (kvm_request_pending(vcpu)) {
  727. if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
  728. kvm_vcpu_srcu_read_unlock(vcpu);
  729. rcuwait_wait_event(wait,
  730. (!vcpu->arch.power_off) && (!vcpu->arch.pause),
  731. TASK_INTERRUPTIBLE);
  732. kvm_vcpu_srcu_read_lock(vcpu);
  733. if (vcpu->arch.power_off || vcpu->arch.pause) {
  734. /*
  735. * Awaken to handle a signal, request to
  736. * sleep again later.
  737. */
  738. kvm_make_request(KVM_REQ_SLEEP, vcpu);
  739. }
  740. }
  741. if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
  742. kvm_riscv_reset_vcpu(vcpu);
  743. if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
  744. kvm_riscv_gstage_update_hgatp(vcpu);
  745. if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
  746. kvm_riscv_fence_i_process(vcpu);
  747. /*
  748. * The generic KVM_REQ_TLB_FLUSH is same as
  749. * KVM_REQ_HFENCE_GVMA_VMID_ALL
  750. */
  751. if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
  752. kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
  753. if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
  754. kvm_riscv_hfence_vvma_all_process(vcpu);
  755. if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
  756. kvm_riscv_hfence_process(vcpu);
  757. }
  758. }
  759. static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
  760. {
  761. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  762. csr_write(CSR_HVIP, csr->hvip);
  763. }
  764. /*
  765. * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
  766. * the vCPU is running.
  767. *
  768. * This must be noinstr as instrumentation may make use of RCU, and this is not
  769. * safe during the EQS.
  770. */
  771. static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
  772. {
  773. guest_state_enter_irqoff();
  774. __kvm_riscv_switch_to(&vcpu->arch);
  775. vcpu->arch.last_exit_cpu = vcpu->cpu;
  776. guest_state_exit_irqoff();
  777. }
  778. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
  779. {
  780. int ret;
  781. struct kvm_cpu_trap trap;
  782. struct kvm_run *run = vcpu->run;
  783. /* Mark this VCPU ran at least once */
  784. vcpu->arch.ran_atleast_once = true;
  785. kvm_vcpu_srcu_read_lock(vcpu);
  786. switch (run->exit_reason) {
  787. case KVM_EXIT_MMIO:
  788. /* Process MMIO value returned from user-space */
  789. ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
  790. break;
  791. case KVM_EXIT_RISCV_SBI:
  792. /* Process SBI value returned from user-space */
  793. ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
  794. break;
  795. case KVM_EXIT_RISCV_CSR:
  796. /* Process CSR value returned from user-space */
  797. ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
  798. break;
  799. default:
  800. ret = 0;
  801. break;
  802. }
  803. if (ret) {
  804. kvm_vcpu_srcu_read_unlock(vcpu);
  805. return ret;
  806. }
  807. if (run->immediate_exit) {
  808. kvm_vcpu_srcu_read_unlock(vcpu);
  809. return -EINTR;
  810. }
  811. vcpu_load(vcpu);
  812. kvm_sigset_activate(vcpu);
  813. ret = 1;
  814. run->exit_reason = KVM_EXIT_UNKNOWN;
  815. while (ret > 0) {
  816. /* Check conditions before entering the guest */
  817. ret = xfer_to_guest_mode_handle_work(vcpu);
  818. if (!ret)
  819. ret = 1;
  820. kvm_riscv_gstage_vmid_update(vcpu);
  821. kvm_riscv_check_vcpu_requests(vcpu);
  822. local_irq_disable();
  823. /*
  824. * Ensure we set mode to IN_GUEST_MODE after we disable
  825. * interrupts and before the final VCPU requests check.
  826. * See the comment in kvm_vcpu_exiting_guest_mode() and
  827. * Documentation/virt/kvm/vcpu-requests.rst
  828. */
  829. vcpu->mode = IN_GUEST_MODE;
  830. kvm_vcpu_srcu_read_unlock(vcpu);
  831. smp_mb__after_srcu_read_unlock();
  832. /*
  833. * We might have got VCPU interrupts updated asynchronously
  834. * so update it in HW.
  835. */
  836. kvm_riscv_vcpu_flush_interrupts(vcpu);
  837. /* Update HVIP CSR for current CPU */
  838. kvm_riscv_update_hvip(vcpu);
  839. if (ret <= 0 ||
  840. kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
  841. kvm_request_pending(vcpu) ||
  842. xfer_to_guest_mode_work_pending()) {
  843. vcpu->mode = OUTSIDE_GUEST_MODE;
  844. local_irq_enable();
  845. kvm_vcpu_srcu_read_lock(vcpu);
  846. continue;
  847. }
  848. /*
  849. * Cleanup stale TLB enteries
  850. *
  851. * Note: This should be done after G-stage VMID has been
  852. * updated using kvm_riscv_gstage_vmid_ver_changed()
  853. */
  854. kvm_riscv_local_tlb_sanitize(vcpu);
  855. guest_timing_enter_irqoff();
  856. kvm_riscv_vcpu_enter_exit(vcpu);
  857. vcpu->mode = OUTSIDE_GUEST_MODE;
  858. vcpu->stat.exits++;
  859. /*
  860. * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
  861. * get an interrupt between __kvm_riscv_switch_to() and
  862. * local_irq_enable() which can potentially change CSRs.
  863. */
  864. trap.sepc = vcpu->arch.guest_context.sepc;
  865. trap.scause = csr_read(CSR_SCAUSE);
  866. trap.stval = csr_read(CSR_STVAL);
  867. trap.htval = csr_read(CSR_HTVAL);
  868. trap.htinst = csr_read(CSR_HTINST);
  869. /* Syncup interrupts state with HW */
  870. kvm_riscv_vcpu_sync_interrupts(vcpu);
  871. preempt_disable();
  872. /*
  873. * We must ensure that any pending interrupts are taken before
  874. * we exit guest timing so that timer ticks are accounted as
  875. * guest time. Transiently unmask interrupts so that any
  876. * pending interrupts are taken.
  877. *
  878. * There's no barrier which ensures that pending interrupts are
  879. * recognised, so we just hope that the CPU takes any pending
  880. * interrupts between the enable and disable.
  881. */
  882. local_irq_enable();
  883. local_irq_disable();
  884. guest_timing_exit_irqoff();
  885. local_irq_enable();
  886. preempt_enable();
  887. kvm_vcpu_srcu_read_lock(vcpu);
  888. ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
  889. }
  890. kvm_sigset_deactivate(vcpu);
  891. vcpu_put(vcpu);
  892. kvm_vcpu_srcu_read_unlock(vcpu);
  893. return ret;
  894. }