hypercalls.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2019 Arm Ltd.
  3. #include <linux/arm-smccc.h>
  4. #include <linux/kvm_host.h>
  5. #include <asm/kvm_emulate.h>
  6. #include <asm/kvm_pkvm.h>
  7. #include <kvm/arm_hypercalls.h>
  8. #include <kvm/arm_psci.h>
  9. #define KVM_ARM_SMCCC_STD_FEATURES \
  10. GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0)
  11. #define KVM_ARM_SMCCC_STD_HYP_FEATURES \
  12. GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
  13. #define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES ({ \
  14. unsigned long f; \
  15. f = GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0); \
  16. if (is_protected_kvm_enabled()) { \
  17. f |= BIT(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO); \
  18. f |= BIT(ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH); \
  19. } \
  20. f; \
  21. })
  22. static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
  23. {
  24. struct system_time_snapshot systime_snapshot;
  25. u64 cycles = ~0UL;
  26. u32 feature;
  27. /*
  28. * system time and counter value must captured at the same
  29. * time to keep consistency and precision.
  30. */
  31. ktime_get_snapshot(&systime_snapshot);
  32. /*
  33. * This is only valid if the current clocksource is the
  34. * architected counter, as this is the only one the guest
  35. * can see.
  36. */
  37. if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER)
  38. return;
  39. /*
  40. * The guest selects one of the two reference counters
  41. * (virtual or physical) with the first argument of the SMCCC
  42. * call. In case the identifier is not supported, error out.
  43. */
  44. feature = smccc_get_arg1(vcpu);
  45. switch (feature) {
  46. case KVM_PTP_VIRT_COUNTER:
  47. cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2);
  48. break;
  49. case KVM_PTP_PHYS_COUNTER:
  50. cycles = systime_snapshot.cycles;
  51. break;
  52. default:
  53. return;
  54. }
  55. /*
  56. * This relies on the top bit of val[0] never being set for
  57. * valid values of system time, because that is *really* far
  58. * in the future (about 292 years from 1970, and at that stage
  59. * nobody will give a damn about it).
  60. */
  61. val[0] = upper_32_bits(systime_snapshot.real);
  62. val[1] = lower_32_bits(systime_snapshot.real);
  63. val[2] = upper_32_bits(cycles);
  64. val[3] = lower_32_bits(cycles);
  65. }
  66. static bool kvm_hvc_call_default_allowed(u32 func_id)
  67. {
  68. switch (func_id) {
  69. /*
  70. * List of function-ids that are not gated with the bitmapped
  71. * feature firmware registers, and are to be allowed for
  72. * servicing the call by default.
  73. */
  74. case ARM_SMCCC_VERSION_FUNC_ID:
  75. case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
  76. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID:
  77. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
  78. case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID:
  79. return true;
  80. default:
  81. /* PSCI 0.2 and up is in the 0:0x1f range */
  82. if (ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
  83. ARM_SMCCC_FUNC_NUM(func_id) <= 0x1f)
  84. return true;
  85. /*
  86. * KVM's PSCI 0.1 doesn't comply with SMCCC, and has
  87. * its own function-id base and range
  88. */
  89. if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3))
  90. return true;
  91. return false;
  92. }
  93. }
  94. static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id)
  95. {
  96. struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
  97. switch (func_id) {
  98. case ARM_SMCCC_TRNG_VERSION:
  99. case ARM_SMCCC_TRNG_FEATURES:
  100. case ARM_SMCCC_TRNG_GET_UUID:
  101. case ARM_SMCCC_TRNG_RND32:
  102. case ARM_SMCCC_TRNG_RND64:
  103. return test_bit(KVM_REG_ARM_STD_BIT_TRNG_V1_0,
  104. &smccc_feat->std_bmap);
  105. case ARM_SMCCC_HV_PV_TIME_FEATURES:
  106. case ARM_SMCCC_HV_PV_TIME_ST:
  107. return test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
  108. &smccc_feat->std_hyp_bmap);
  109. case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
  110. case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
  111. return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT,
  112. &smccc_feat->vendor_hyp_bmap);
  113. case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
  114. return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
  115. &smccc_feat->vendor_hyp_bmap);
  116. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
  117. return test_bit(ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH,
  118. &smccc_feat->vendor_hyp_bmap);
  119. default:
  120. return kvm_hvc_call_default_allowed(func_id);
  121. }
  122. }
  123. int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
  124. {
  125. struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
  126. u32 func_id = smccc_get_function(vcpu);
  127. u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
  128. u32 feature;
  129. gpa_t gpa;
  130. if (!kvm_hvc_call_allowed(vcpu, func_id))
  131. goto out;
  132. switch (func_id) {
  133. case ARM_SMCCC_VERSION_FUNC_ID:
  134. val[0] = ARM_SMCCC_VERSION_1_1;
  135. break;
  136. case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
  137. feature = smccc_get_arg1(vcpu);
  138. switch (feature) {
  139. case ARM_SMCCC_ARCH_WORKAROUND_1:
  140. switch (arm64_get_spectre_v2_state()) {
  141. case SPECTRE_VULNERABLE:
  142. break;
  143. case SPECTRE_MITIGATED:
  144. val[0] = SMCCC_RET_SUCCESS;
  145. break;
  146. case SPECTRE_UNAFFECTED:
  147. val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
  148. break;
  149. }
  150. break;
  151. case ARM_SMCCC_ARCH_WORKAROUND_2:
  152. switch (arm64_get_spectre_v4_state()) {
  153. case SPECTRE_VULNERABLE:
  154. break;
  155. case SPECTRE_MITIGATED:
  156. /*
  157. * SSBS everywhere: Indicate no firmware
  158. * support, as the SSBS support will be
  159. * indicated to the guest and the default is
  160. * safe.
  161. *
  162. * Otherwise, expose a permanent mitigation
  163. * to the guest, and hide SSBS so that the
  164. * guest stays protected.
  165. */
  166. if (cpus_have_final_cap(ARM64_SSBS))
  167. break;
  168. fallthrough;
  169. case SPECTRE_UNAFFECTED:
  170. val[0] = SMCCC_RET_NOT_REQUIRED;
  171. break;
  172. }
  173. break;
  174. case ARM_SMCCC_ARCH_WORKAROUND_3:
  175. switch (arm64_get_spectre_bhb_state()) {
  176. case SPECTRE_VULNERABLE:
  177. break;
  178. case SPECTRE_MITIGATED:
  179. val[0] = SMCCC_RET_SUCCESS;
  180. break;
  181. case SPECTRE_UNAFFECTED:
  182. val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
  183. break;
  184. }
  185. break;
  186. case ARM_SMCCC_HV_PV_TIME_FEATURES:
  187. if (test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
  188. &smccc_feat->std_hyp_bmap))
  189. val[0] = SMCCC_RET_SUCCESS;
  190. break;
  191. }
  192. break;
  193. case ARM_SMCCC_HV_PV_TIME_FEATURES:
  194. val[0] = kvm_hypercall_pv_features(vcpu);
  195. break;
  196. case ARM_SMCCC_HV_PV_TIME_ST:
  197. gpa = kvm_init_stolen_time(vcpu);
  198. if (gpa != GPA_INVALID)
  199. val[0] = gpa;
  200. break;
  201. case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
  202. val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
  203. val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
  204. val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2;
  205. val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
  206. break;
  207. case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
  208. val[0] = smccc_feat->vendor_hyp_bmap;
  209. break;
  210. case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
  211. kvm_ptp_get_time(vcpu, val);
  212. break;
  213. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID:
  214. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
  215. if (!kvm_vm_is_protected(vcpu->kvm))
  216. break;
  217. atomic64_add(
  218. func_id == ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID ?
  219. PAGE_SIZE : -PAGE_SIZE,
  220. &vcpu->kvm->stat.protected_shared_mem);
  221. val[0] = SMCCC_RET_SUCCESS;
  222. break;
  223. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
  224. pkvm_host_reclaim_page(vcpu->kvm, smccc_get_arg1(vcpu));
  225. val[0] = SMCCC_RET_SUCCESS;
  226. break;
  227. case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID:
  228. if (kvm_vm_is_protected(vcpu->kvm) && !topup_hyp_memcache(vcpu))
  229. val[0] = SMCCC_RET_SUCCESS;
  230. break;
  231. case ARM_SMCCC_TRNG_VERSION:
  232. case ARM_SMCCC_TRNG_FEATURES:
  233. case ARM_SMCCC_TRNG_GET_UUID:
  234. case ARM_SMCCC_TRNG_RND32:
  235. case ARM_SMCCC_TRNG_RND64:
  236. return kvm_trng_call(vcpu);
  237. default:
  238. return kvm_psci_call(vcpu);
  239. }
  240. out:
  241. smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
  242. return 1;
  243. }
  244. static const u64 kvm_arm_fw_reg_ids[] = {
  245. KVM_REG_ARM_PSCI_VERSION,
  246. KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1,
  247. KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2,
  248. KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3,
  249. KVM_REG_ARM_STD_BMAP,
  250. KVM_REG_ARM_STD_HYP_BMAP,
  251. KVM_REG_ARM_VENDOR_HYP_BMAP,
  252. };
  253. void kvm_arm_init_hypercalls(struct kvm *kvm)
  254. {
  255. struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
  256. smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES;
  257. smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
  258. smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
  259. }
  260. int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
  261. {
  262. return ARRAY_SIZE(kvm_arm_fw_reg_ids);
  263. }
  264. int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  265. {
  266. int i;
  267. for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) {
  268. if (put_user(kvm_arm_fw_reg_ids[i], uindices++))
  269. return -EFAULT;
  270. }
  271. return 0;
  272. }
  273. #define KVM_REG_FEATURE_LEVEL_MASK GENMASK(3, 0)
  274. /*
  275. * Convert the workaround level into an easy-to-compare number, where higher
  276. * values mean better protection.
  277. */
  278. static int get_kernel_wa_level(u64 regid)
  279. {
  280. switch (regid) {
  281. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
  282. switch (arm64_get_spectre_v2_state()) {
  283. case SPECTRE_VULNERABLE:
  284. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
  285. case SPECTRE_MITIGATED:
  286. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
  287. case SPECTRE_UNAFFECTED:
  288. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
  289. }
  290. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
  291. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
  292. switch (arm64_get_spectre_v4_state()) {
  293. case SPECTRE_MITIGATED:
  294. /*
  295. * As for the hypercall discovery, we pretend we
  296. * don't have any FW mitigation if SSBS is there at
  297. * all times.
  298. */
  299. if (cpus_have_final_cap(ARM64_SSBS))
  300. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
  301. fallthrough;
  302. case SPECTRE_UNAFFECTED:
  303. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
  304. case SPECTRE_VULNERABLE:
  305. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
  306. }
  307. break;
  308. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
  309. switch (arm64_get_spectre_bhb_state()) {
  310. case SPECTRE_VULNERABLE:
  311. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
  312. case SPECTRE_MITIGATED:
  313. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
  314. case SPECTRE_UNAFFECTED:
  315. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
  316. }
  317. return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
  318. }
  319. return -EINVAL;
  320. }
  321. int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  322. {
  323. struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
  324. void __user *uaddr = (void __user *)(long)reg->addr;
  325. u64 val;
  326. switch (reg->id) {
  327. case KVM_REG_ARM_PSCI_VERSION:
  328. val = kvm_psci_version(vcpu);
  329. break;
  330. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
  331. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
  332. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
  333. val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
  334. break;
  335. case KVM_REG_ARM_STD_BMAP:
  336. val = READ_ONCE(smccc_feat->std_bmap);
  337. break;
  338. case KVM_REG_ARM_STD_HYP_BMAP:
  339. val = READ_ONCE(smccc_feat->std_hyp_bmap);
  340. break;
  341. case KVM_REG_ARM_VENDOR_HYP_BMAP:
  342. val = READ_ONCE(smccc_feat->vendor_hyp_bmap);
  343. break;
  344. default:
  345. return -ENOENT;
  346. }
  347. if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
  348. return -EFAULT;
  349. return 0;
  350. }
  351. static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
  352. {
  353. int ret = 0;
  354. struct kvm *kvm = vcpu->kvm;
  355. struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
  356. unsigned long *fw_reg_bmap, fw_reg_features;
  357. switch (reg_id) {
  358. case KVM_REG_ARM_STD_BMAP:
  359. fw_reg_bmap = &smccc_feat->std_bmap;
  360. fw_reg_features = KVM_ARM_SMCCC_STD_FEATURES;
  361. break;
  362. case KVM_REG_ARM_STD_HYP_BMAP:
  363. fw_reg_bmap = &smccc_feat->std_hyp_bmap;
  364. fw_reg_features = KVM_ARM_SMCCC_STD_HYP_FEATURES;
  365. break;
  366. case KVM_REG_ARM_VENDOR_HYP_BMAP:
  367. fw_reg_bmap = &smccc_feat->vendor_hyp_bmap;
  368. fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
  369. break;
  370. default:
  371. return -ENOENT;
  372. }
  373. /* Check for unsupported bit */
  374. if (val & ~fw_reg_features)
  375. return -EINVAL;
  376. mutex_lock(&kvm->arch.config_lock);
  377. if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
  378. val != *fw_reg_bmap) {
  379. ret = -EBUSY;
  380. goto out;
  381. }
  382. WRITE_ONCE(*fw_reg_bmap, val);
  383. out:
  384. mutex_unlock(&kvm->arch.config_lock);
  385. return ret;
  386. }
  387. int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  388. {
  389. void __user *uaddr = (void __user *)(long)reg->addr;
  390. u64 val;
  391. int wa_level;
  392. if (KVM_REG_SIZE(reg->id) != sizeof(val))
  393. return -ENOENT;
  394. if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
  395. return -EFAULT;
  396. switch (reg->id) {
  397. case KVM_REG_ARM_PSCI_VERSION:
  398. {
  399. bool wants_02;
  400. wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
  401. switch (val) {
  402. case KVM_ARM_PSCI_0_1:
  403. if (wants_02)
  404. return -EINVAL;
  405. vcpu->kvm->arch.psci_version = val;
  406. return 0;
  407. case KVM_ARM_PSCI_0_2:
  408. case KVM_ARM_PSCI_1_0:
  409. case KVM_ARM_PSCI_1_1:
  410. if (!wants_02)
  411. return -EINVAL;
  412. vcpu->kvm->arch.psci_version = val;
  413. return 0;
  414. }
  415. break;
  416. }
  417. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
  418. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
  419. if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
  420. return -EINVAL;
  421. if (get_kernel_wa_level(reg->id) < val)
  422. return -EINVAL;
  423. return 0;
  424. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
  425. if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
  426. KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
  427. return -EINVAL;
  428. /* The enabled bit must not be set unless the level is AVAIL. */
  429. if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
  430. (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
  431. return -EINVAL;
  432. /*
  433. * Map all the possible incoming states to the only two we
  434. * really want to deal with.
  435. */
  436. switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
  437. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
  438. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
  439. wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
  440. break;
  441. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
  442. case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
  443. wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
  444. break;
  445. default:
  446. return -EINVAL;
  447. }
  448. /*
  449. * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
  450. * other way around.
  451. */
  452. if (get_kernel_wa_level(reg->id) < wa_level)
  453. return -EINVAL;
  454. return 0;
  455. case KVM_REG_ARM_STD_BMAP:
  456. case KVM_REG_ARM_STD_HYP_BMAP:
  457. case KVM_REG_ARM_VENDOR_HYP_BMAP:
  458. return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val);
  459. default:
  460. return -ENOENT;
  461. }
  462. return -EINVAL;
  463. }