pkvm.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2021 Google LLC
  4. * Author: Fuad Tabba <[email protected]>
  5. */
  6. #include <linux/kvm_host.h>
  7. #include <linux/mm.h>
  8. #include <kvm/arm_hypercalls.h>
  9. #include <kvm/arm_psci.h>
  10. #include <asm/kvm_emulate.h>
  11. #include <nvhe/arm-smccc.h>
  12. #include <nvhe/mem_protect.h>
  13. #include <nvhe/memory.h>
  14. #include <nvhe/mm.h>
  15. #include <nvhe/pkvm.h>
  16. #include <nvhe/trap_handler.h>
  17. /* Used by icache_is_vpipt(). */
  18. unsigned long __icache_flags;
  19. /* Used by kvm_get_vttbr(). */
  20. unsigned int kvm_arm_vmid_bits;
  21. unsigned int kvm_host_sve_max_vl;
  22. /*
  23. * The currently loaded hyp vCPU for each physical CPU. Used only when
  24. * protected KVM is enabled, but for both protected and non-protected VMs.
  25. */
  26. static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
  27. /*
  28. * Host fp state for all cpus. This could include the host simd state, as well
  29. * as the sve and sme states if supported. Written to when the guest accesses
  30. * its own FPSIMD state, and read when the guest state is live and we need to
  31. * switch back to the host.
  32. *
  33. * Only valid when (fp_state == FP_STATE_GUEST_OWNED) in the hyp vCPU structure.
  34. */
  35. unsigned long __ro_after_init kvm_arm_hyp_host_fp_state[NR_CPUS];
  36. static void *__get_host_fpsimd_bytes(void)
  37. {
  38. /*
  39. * The addresses in this array have been converted to hyp addresses
  40. * in finalize_init_hyp_mode().
  41. */
  42. return (void *)kvm_arm_hyp_host_fp_state[hyp_smp_processor_id()];
  43. }
  44. struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu)
  45. {
  46. if (likely(!is_protected_kvm_enabled()))
  47. return vcpu->arch.host_fpsimd_state;
  48. WARN_ON(system_supports_sve());
  49. return __get_host_fpsimd_bytes();
  50. }
  51. struct kvm_host_sve_state *get_host_sve_state(struct kvm_vcpu *vcpu)
  52. {
  53. WARN_ON(!system_supports_sve());
  54. WARN_ON(!is_protected_kvm_enabled());
  55. return __get_host_fpsimd_bytes();
  56. }
  57. /*
  58. * Set trap register values based on features in ID_AA64PFR0.
  59. */
  60. static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
  61. {
  62. const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
  63. u64 hcr_set = HCR_RW;
  64. u64 hcr_clear = 0;
  65. u64 cptr_set = 0;
  66. /* Protected KVM does not support AArch32 guests. */
  67. BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
  68. PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
  69. BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
  70. PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
  71. /*
  72. * Linux guests assume support for floating-point and Advanced SIMD. Do
  73. * not change the trapping behavior for these from the KVM default.
  74. */
  75. BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
  76. PVM_ID_AA64PFR0_ALLOW));
  77. BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
  78. PVM_ID_AA64PFR0_ALLOW));
  79. /* Trap RAS unless all current versions are supported */
  80. if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
  81. ID_AA64PFR0_EL1_RAS_V1P1) {
  82. hcr_set |= HCR_TERR | HCR_TEA;
  83. hcr_clear |= HCR_FIEN;
  84. }
  85. /* Trap AMU */
  86. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
  87. hcr_clear |= HCR_AMVOFFEN;
  88. cptr_set |= CPTR_EL2_TAM;
  89. }
  90. /* Trap SVE */
  91. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
  92. cptr_set |= CPTR_EL2_TZ;
  93. vcpu->arch.hcr_el2 |= hcr_set;
  94. vcpu->arch.hcr_el2 &= ~hcr_clear;
  95. vcpu->arch.cptr_el2 |= cptr_set;
  96. }
  97. /*
  98. * Set trap register values based on features in ID_AA64PFR1.
  99. */
  100. static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
  101. {
  102. const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
  103. u64 hcr_set = 0;
  104. u64 hcr_clear = 0;
  105. /* Memory Tagging: Trap and Treat as Untagged if not supported. */
  106. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
  107. hcr_set |= HCR_TID5;
  108. hcr_clear |= HCR_DCT | HCR_ATA;
  109. }
  110. vcpu->arch.hcr_el2 |= hcr_set;
  111. vcpu->arch.hcr_el2 &= ~hcr_clear;
  112. }
  113. /*
  114. * Set trap register values based on features in ID_AA64DFR0.
  115. */
  116. static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
  117. {
  118. const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
  119. u64 mdcr_set = 0;
  120. u64 mdcr_clear = 0;
  121. u64 cptr_set = 0;
  122. /* Trap/constrain PMU */
  123. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
  124. mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
  125. mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
  126. MDCR_EL2_HPMN_MASK;
  127. }
  128. /* Trap Debug */
  129. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
  130. mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
  131. /* Trap OS Double Lock */
  132. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
  133. mdcr_set |= MDCR_EL2_TDOSA;
  134. /* Trap SPE */
  135. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
  136. mdcr_set |= MDCR_EL2_TPMS;
  137. mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
  138. }
  139. /* Trap Trace Filter */
  140. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
  141. mdcr_set |= MDCR_EL2_TTRF;
  142. /* Trap Trace */
  143. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
  144. cptr_set |= CPTR_EL2_TTA;
  145. vcpu->arch.mdcr_el2 |= mdcr_set;
  146. vcpu->arch.mdcr_el2 &= ~mdcr_clear;
  147. vcpu->arch.cptr_el2 |= cptr_set;
  148. }
  149. /*
  150. * Set trap register values based on features in ID_AA64MMFR0.
  151. */
  152. static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
  153. {
  154. const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
  155. u64 mdcr_set = 0;
  156. /* Trap Debug Communications Channel registers */
  157. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
  158. mdcr_set |= MDCR_EL2_TDCC;
  159. vcpu->arch.mdcr_el2 |= mdcr_set;
  160. }
  161. /*
  162. * Set trap register values based on features in ID_AA64MMFR1.
  163. */
  164. static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
  165. {
  166. const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
  167. u64 hcr_set = 0;
  168. /* Trap LOR */
  169. if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
  170. hcr_set |= HCR_TLOR;
  171. vcpu->arch.hcr_el2 |= hcr_set;
  172. }
  173. /*
  174. * Set baseline trap register values.
  175. */
  176. static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
  177. {
  178. /*
  179. * Always trap:
  180. * - Feature id registers: to control features exposed to guests
  181. * - Implementation-defined features
  182. */
  183. vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS |
  184. HCR_TID3 | HCR_TACR | HCR_TIDCP | HCR_TID1;
  185. if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
  186. /* route synchronous external abort exceptions to EL2 */
  187. vcpu->arch.hcr_el2 |= HCR_TEA;
  188. /* trap error record accesses */
  189. vcpu->arch.hcr_el2 |= HCR_TERR;
  190. }
  191. if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
  192. vcpu->arch.hcr_el2 |= HCR_FWB;
  193. if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE))
  194. vcpu->arch.hcr_el2 |= HCR_TID2;
  195. }
  196. /*
  197. * Initialize trap register values for protected VMs.
  198. */
  199. static void pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
  200. {
  201. hyp_vcpu->vcpu.arch.cptr_el2 = CPTR_EL2_DEFAULT;
  202. hyp_vcpu->vcpu.arch.mdcr_el2 = 0;
  203. if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
  204. u64 hcr = READ_ONCE(hyp_vcpu->host_vcpu->arch.hcr_el2);
  205. hyp_vcpu->vcpu.arch.hcr_el2 = HCR_GUEST_FLAGS | hcr;
  206. return;
  207. }
  208. pvm_init_trap_regs(&hyp_vcpu->vcpu);
  209. pvm_init_traps_aa64pfr0(&hyp_vcpu->vcpu);
  210. pvm_init_traps_aa64pfr1(&hyp_vcpu->vcpu);
  211. pvm_init_traps_aa64dfr0(&hyp_vcpu->vcpu);
  212. pvm_init_traps_aa64mmfr0(&hyp_vcpu->vcpu);
  213. pvm_init_traps_aa64mmfr1(&hyp_vcpu->vcpu);
  214. }
  215. /*
  216. * Start the VM table handle at the offset defined instead of at 0.
  217. * Mainly for sanity checking and debugging.
  218. */
  219. #define HANDLE_OFFSET 0x1000
  220. static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
  221. {
  222. return handle - HANDLE_OFFSET;
  223. }
  224. static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
  225. {
  226. return idx + HANDLE_OFFSET;
  227. }
  228. /*
  229. * Spinlock for protecting state related to the VM table. Protects writes
  230. * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
  231. * 'last_hyp_vcpu_lookup'.
  232. */
  233. static DEFINE_HYP_SPINLOCK(vm_table_lock);
  234. /*
  235. * The table of VM entries for protected VMs in hyp.
  236. * Allocated at hyp initialization and setup.
  237. */
  238. static struct pkvm_hyp_vm **vm_table;
  239. void pkvm_hyp_vm_table_init(void *tbl)
  240. {
  241. WARN_ON(vm_table);
  242. vm_table = tbl;
  243. }
  244. /*
  245. * Return the hyp vm structure corresponding to the handle.
  246. */
  247. static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
  248. {
  249. unsigned int idx = vm_handle_to_idx(handle);
  250. if (unlikely(idx >= KVM_MAX_PVMS))
  251. return NULL;
  252. return vm_table[idx];
  253. }
  254. int __pkvm_reclaim_dying_guest_page(pkvm_handle_t handle, u64 pfn, u64 ipa)
  255. {
  256. struct pkvm_hyp_vm *hyp_vm;
  257. int ret = -EINVAL;
  258. hyp_spin_lock(&vm_table_lock);
  259. hyp_vm = get_vm_by_handle(handle);
  260. if (!hyp_vm || !hyp_vm->is_dying)
  261. goto unlock;
  262. ret = __pkvm_host_reclaim_page(hyp_vm, pfn, ipa);
  263. if (ret)
  264. goto unlock;
  265. drain_hyp_pool(hyp_vm, &hyp_vm->host_kvm->arch.pkvm.teardown_stage2_mc);
  266. unlock:
  267. hyp_spin_unlock(&vm_table_lock);
  268. return ret;
  269. }
  270. struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
  271. unsigned int vcpu_idx)
  272. {
  273. struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
  274. struct pkvm_hyp_vm *hyp_vm;
  275. /* Cannot load a new vcpu without putting the old one first. */
  276. if (__this_cpu_read(loaded_hyp_vcpu))
  277. return NULL;
  278. hyp_spin_lock(&vm_table_lock);
  279. hyp_vm = get_vm_by_handle(handle);
  280. if (!hyp_vm || hyp_vm->is_dying || hyp_vm->nr_vcpus <= vcpu_idx)
  281. goto unlock;
  282. hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
  283. /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
  284. if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
  285. hyp_vcpu = NULL;
  286. goto unlock;
  287. }
  288. hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
  289. hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
  290. unlock:
  291. hyp_spin_unlock(&vm_table_lock);
  292. if (hyp_vcpu)
  293. __this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
  294. return hyp_vcpu;
  295. }
  296. void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
  297. {
  298. struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
  299. hyp_spin_lock(&vm_table_lock);
  300. hyp_vcpu->loaded_hyp_vcpu = NULL;
  301. __this_cpu_write(loaded_hyp_vcpu, NULL);
  302. hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
  303. hyp_spin_unlock(&vm_table_lock);
  304. }
  305. struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
  306. {
  307. return __this_cpu_read(loaded_hyp_vcpu);
  308. }
  309. static void pkvm_vcpu_init_features_from_host(struct pkvm_hyp_vcpu *hyp_vcpu)
  310. {
  311. struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
  312. DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
  313. /* No restrictions for non-protected VMs. */
  314. if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
  315. bitmap_copy(hyp_vcpu->vcpu.arch.features,
  316. host_vcpu->arch.features,
  317. KVM_VCPU_MAX_FEATURES);
  318. return;
  319. }
  320. bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
  321. /*
  322. * For protected vms, always allow:
  323. * - CPU starting in poweroff state
  324. * - PSCI v0.2
  325. */
  326. set_bit(KVM_ARM_VCPU_POWER_OFF, allowed_features);
  327. set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
  328. /*
  329. * Check if remaining features are allowed:
  330. * - Performance Monitoring
  331. * - Scalable Vectors
  332. * - Pointer Authentication
  333. */
  334. if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW))
  335. set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
  336. if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW))
  337. set_bit(KVM_ARM_VCPU_SVE, allowed_features);
  338. if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_ALLOW) &&
  339. FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_ALLOW))
  340. set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
  341. if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
  342. FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA), PVM_ID_AA64ISAR1_ALLOW))
  343. set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
  344. bitmap_and(hyp_vcpu->vcpu.arch.features, host_vcpu->arch.features,
  345. allowed_features, KVM_VCPU_MAX_FEATURES);
  346. /*
  347. * Now sanitise the configuration flags that we have inherited
  348. * from the host, as they may refer to features that protected
  349. * mode doesn't support.
  350. */
  351. if (!vcpu_has_feature(&hyp_vcpu->vcpu,(KVM_ARM_VCPU_SVE))) {
  352. vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_SVE);
  353. vcpu_clear_flag(&hyp_vcpu->vcpu, VCPU_SVE_FINALIZED);
  354. }
  355. if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
  356. !vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))
  357. vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_PTRAUTH);
  358. }
  359. static int pkvm_vcpu_init_ptrauth(struct pkvm_hyp_vcpu *hyp_vcpu)
  360. {
  361. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  362. int ret = 0;
  363. if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
  364. test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features))
  365. ret = kvm_vcpu_enable_ptrauth(vcpu);
  366. return ret;
  367. }
  368. static int pkvm_vcpu_init_psci(struct pkvm_hyp_vcpu *hyp_vcpu)
  369. {
  370. struct vcpu_reset_state *reset_state = &hyp_vcpu->vcpu.arch.reset_state;
  371. struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
  372. if (test_bit(KVM_ARM_VCPU_POWER_OFF, hyp_vcpu->vcpu.arch.features)) {
  373. reset_state->reset = false;
  374. hyp_vcpu->power_state = PSCI_0_2_AFFINITY_LEVEL_OFF;
  375. } else if (pkvm_hyp_vm_has_pvmfw(hyp_vm)) {
  376. if (hyp_vm->pvmfw_entry_vcpu)
  377. return -EINVAL;
  378. hyp_vm->pvmfw_entry_vcpu = hyp_vcpu;
  379. reset_state->reset = true;
  380. hyp_vcpu->power_state = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
  381. } else {
  382. struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
  383. reset_state->pc = READ_ONCE(host_vcpu->arch.ctxt.regs.pc);
  384. reset_state->r0 = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
  385. reset_state->reset = true;
  386. hyp_vcpu->power_state = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
  387. }
  388. return 0;
  389. }
  390. static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
  391. {
  392. if (host_vcpu)
  393. hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
  394. }
  395. static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
  396. {
  397. void *sve_state;
  398. if (!test_bit(KVM_ARM_VCPU_SVE, hyp_vcpu->vcpu.arch.features))
  399. return;
  400. sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
  401. hyp_unpin_shared_mem(sve_state,
  402. sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
  403. }
  404. static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
  405. unsigned int nr_vcpus)
  406. {
  407. int i;
  408. for (i = 0; i < nr_vcpus; i++) {
  409. struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vcpus[i];
  410. unpin_host_vcpu(hyp_vcpu->host_vcpu);
  411. unpin_host_sve_state(hyp_vcpu);
  412. }
  413. }
  414. static size_t pkvm_get_last_ran_size(void)
  415. {
  416. return array_size(hyp_nr_cpus, sizeof(int));
  417. }
  418. static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
  419. int *last_ran, unsigned int nr_vcpus)
  420. {
  421. u64 pvmfw_load_addr = PVMFW_INVALID_LOAD_ADDR;
  422. hyp_vm->host_kvm = host_kvm;
  423. hyp_vm->kvm.created_vcpus = nr_vcpus;
  424. hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
  425. hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
  426. if (hyp_vm->kvm.arch.pkvm.enabled)
  427. pvmfw_load_addr = READ_ONCE(host_kvm->arch.pkvm.pvmfw_load_addr);
  428. hyp_vm->kvm.arch.pkvm.pvmfw_load_addr = pvmfw_load_addr;
  429. hyp_vm->kvm.arch.mmu.last_vcpu_ran = (int __percpu *)last_ran;
  430. memset(last_ran, -1, pkvm_get_last_ran_size());
  431. }
  432. static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
  433. struct pkvm_hyp_vm *hyp_vm,
  434. struct kvm_vcpu *host_vcpu,
  435. unsigned int vcpu_idx)
  436. {
  437. int ret = 0;
  438. if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
  439. return -EBUSY;
  440. if (host_vcpu->vcpu_idx != vcpu_idx) {
  441. ret = -EINVAL;
  442. goto done;
  443. }
  444. hyp_vcpu->host_vcpu = host_vcpu;
  445. hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
  446. hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
  447. hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
  448. hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
  449. hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
  450. hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
  451. hyp_vcpu->vcpu.arch.debug_ptr = &host_vcpu->arch.vcpu_debug_state;
  452. pkvm_vcpu_init_features_from_host(hyp_vcpu);
  453. ret = pkvm_vcpu_init_ptrauth(hyp_vcpu);
  454. if (ret)
  455. goto done;
  456. ret = pkvm_vcpu_init_psci(hyp_vcpu);
  457. if (ret)
  458. goto done;
  459. if (test_bit(KVM_ARM_VCPU_SVE, hyp_vcpu->vcpu.arch.features)) {
  460. size_t sve_state_size;
  461. void *sve_state;
  462. hyp_vcpu->vcpu.arch.sve_state = READ_ONCE(host_vcpu->arch.sve_state);
  463. hyp_vcpu->vcpu.arch.sve_max_vl = READ_ONCE(host_vcpu->arch.sve_max_vl);
  464. sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
  465. sve_state_size = vcpu_sve_state_size(&hyp_vcpu->vcpu);
  466. if (!hyp_vcpu->vcpu.arch.sve_state || !sve_state_size ||
  467. hyp_pin_shared_mem(sve_state, sve_state + sve_state_size)) {
  468. clear_bit(KVM_ARM_VCPU_SVE, hyp_vcpu->vcpu.arch.features);
  469. hyp_vcpu->vcpu.arch.sve_state = NULL;
  470. hyp_vcpu->vcpu.arch.sve_max_vl = 0;
  471. ret = -EINVAL;
  472. goto done;
  473. }
  474. }
  475. pkvm_vcpu_init_traps(hyp_vcpu);
  476. kvm_reset_pvm_sys_regs(&hyp_vcpu->vcpu);
  477. done:
  478. if (ret)
  479. unpin_host_vcpu(host_vcpu);
  480. return ret;
  481. }
  482. static int find_free_vm_table_entry(struct kvm *host_kvm)
  483. {
  484. int i;
  485. for (i = 0; i < KVM_MAX_PVMS; ++i) {
  486. if (!vm_table[i])
  487. return i;
  488. }
  489. return -ENOMEM;
  490. }
  491. /*
  492. * Allocate a VM table entry and insert a pointer to the new vm.
  493. *
  494. * Return a unique handle to the protected VM on success,
  495. * negative error code on failure.
  496. */
  497. static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
  498. struct pkvm_hyp_vm *hyp_vm)
  499. {
  500. struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
  501. int idx;
  502. hyp_assert_lock_held(&vm_table_lock);
  503. /*
  504. * Initializing protected state might have failed, yet a malicious
  505. * host could trigger this function. Thus, ensure that 'vm_table'
  506. * exists.
  507. */
  508. if (unlikely(!vm_table))
  509. return -EINVAL;
  510. idx = find_free_vm_table_entry(host_kvm);
  511. if (idx < 0)
  512. return idx;
  513. hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
  514. /* VMID 0 is reserved for the host */
  515. atomic64_set(&mmu->vmid.id, idx + 1);
  516. mmu->arch = &hyp_vm->kvm.arch;
  517. mmu->pgt = &hyp_vm->pgt;
  518. vm_table[idx] = hyp_vm;
  519. return hyp_vm->kvm.arch.pkvm.handle;
  520. }
  521. /*
  522. * Deallocate and remove the VM table entry corresponding to the handle.
  523. */
  524. static void remove_vm_table_entry(pkvm_handle_t handle)
  525. {
  526. hyp_assert_lock_held(&vm_table_lock);
  527. vm_table[vm_handle_to_idx(handle)] = NULL;
  528. }
  529. static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
  530. {
  531. return size_add(sizeof(struct pkvm_hyp_vm),
  532. size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
  533. }
  534. static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
  535. {
  536. void *va = (void *)kern_hyp_va(host_va);
  537. if (!PAGE_ALIGNED(va))
  538. return NULL;
  539. if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
  540. PAGE_ALIGN(size) >> PAGE_SHIFT))
  541. return NULL;
  542. return va;
  543. }
  544. static void *map_donated_memory(unsigned long host_va, size_t size)
  545. {
  546. void *va = map_donated_memory_noclear(host_va, size);
  547. if (va)
  548. memset(va, 0, size);
  549. return va;
  550. }
  551. static void __unmap_donated_memory(void *va, size_t size)
  552. {
  553. kvm_flush_dcache_to_poc(va, size);
  554. WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
  555. PAGE_ALIGN(size) >> PAGE_SHIFT));
  556. }
  557. static void unmap_donated_memory(void *va, size_t size)
  558. {
  559. if (!va)
  560. return;
  561. memset(va, 0, size);
  562. __unmap_donated_memory(va, size);
  563. }
  564. static void unmap_donated_memory_noclear(void *va, size_t size)
  565. {
  566. if (!va)
  567. return;
  568. __unmap_donated_memory(va, size);
  569. }
  570. /*
  571. * Initialize the hypervisor copy of the protected VM state using the
  572. * memory donated by the host.
  573. *
  574. * Unmaps the donated memory from the host at stage 2.
  575. *
  576. * host_kvm: A pointer to the host's struct kvm.
  577. * vm_hva: The host va of the area being donated for the VM state.
  578. * Must be page aligned.
  579. * pgd_hva: The host va of the area being donated for the stage-2 PGD for
  580. * the VM. Must be page aligned. Its size is implied by the VM's
  581. * VTCR.
  582. * last_ran_hva: The host va of the area being donated for hyp to use to track
  583. * the most recent physical cpu on which each vcpu has run.
  584. * Return a unique handle to the protected VM on success,
  585. * negative error code on failure.
  586. */
  587. int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
  588. unsigned long pgd_hva, unsigned long last_ran_hva)
  589. {
  590. struct pkvm_hyp_vm *hyp_vm = NULL;
  591. int *last_ran = NULL;
  592. size_t vm_size, pgd_size, last_ran_size;
  593. unsigned int nr_vcpus;
  594. void *pgd = NULL;
  595. int ret;
  596. ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
  597. if (ret)
  598. return ret;
  599. nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
  600. if (nr_vcpus < 1) {
  601. ret = -EINVAL;
  602. goto err_unpin_kvm;
  603. }
  604. vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
  605. last_ran_size = pkvm_get_last_ran_size();
  606. pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
  607. ret = -ENOMEM;
  608. hyp_vm = map_donated_memory(vm_hva, vm_size);
  609. if (!hyp_vm)
  610. goto err_remove_mappings;
  611. last_ran = map_donated_memory(last_ran_hva, last_ran_size);
  612. if (!last_ran)
  613. goto err_remove_mappings;
  614. pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
  615. if (!pgd)
  616. goto err_remove_mappings;
  617. init_pkvm_hyp_vm(host_kvm, hyp_vm, last_ran, nr_vcpus);
  618. hyp_spin_lock(&vm_table_lock);
  619. ret = insert_vm_table_entry(host_kvm, hyp_vm);
  620. if (ret < 0)
  621. goto err_unlock;
  622. ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
  623. if (ret)
  624. goto err_remove_vm_table_entry;
  625. hyp_spin_unlock(&vm_table_lock);
  626. return hyp_vm->kvm.arch.pkvm.handle;
  627. err_remove_vm_table_entry:
  628. remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
  629. err_unlock:
  630. hyp_spin_unlock(&vm_table_lock);
  631. err_remove_mappings:
  632. unmap_donated_memory(hyp_vm, vm_size);
  633. unmap_donated_memory(last_ran, last_ran_size);
  634. unmap_donated_memory(pgd, pgd_size);
  635. err_unpin_kvm:
  636. hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
  637. return ret;
  638. }
  639. /*
  640. * Initialize the hypervisor copy of the protected vCPU state using the
  641. * memory donated by the host.
  642. *
  643. * handle: The handle for the protected vm.
  644. * host_vcpu: A pointer to the corresponding host vcpu.
  645. * vcpu_hva: The host va of the area being donated for the vcpu state.
  646. * Must be page aligned. The size of the area must be equal to
  647. * the page-aligned size of 'struct pkvm_hyp_vcpu'.
  648. * Return 0 on success, negative error code on failure.
  649. */
  650. int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
  651. unsigned long vcpu_hva)
  652. {
  653. struct pkvm_hyp_vcpu *hyp_vcpu;
  654. struct pkvm_hyp_vm *hyp_vm;
  655. unsigned int idx;
  656. int ret;
  657. hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
  658. if (!hyp_vcpu)
  659. return -ENOMEM;
  660. hyp_spin_lock(&vm_table_lock);
  661. hyp_vm = get_vm_by_handle(handle);
  662. if (!hyp_vm) {
  663. ret = -ENOENT;
  664. goto unlock;
  665. }
  666. idx = hyp_vm->nr_vcpus;
  667. if (idx >= hyp_vm->kvm.created_vcpus) {
  668. ret = -EINVAL;
  669. goto unlock;
  670. }
  671. ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
  672. if (ret)
  673. goto unlock;
  674. hyp_vm->vcpus[idx] = hyp_vcpu;
  675. hyp_vm->nr_vcpus++;
  676. unlock:
  677. hyp_spin_unlock(&vm_table_lock);
  678. if (ret)
  679. unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
  680. return ret;
  681. }
  682. static void
  683. teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
  684. {
  685. void *start;
  686. size = PAGE_ALIGN(size);
  687. memset(addr, 0, size);
  688. for (start = addr; start < addr + size; start += PAGE_SIZE)
  689. push_hyp_memcache(mc, start, hyp_virt_to_phys);
  690. unmap_donated_memory_noclear(addr, size);
  691. }
  692. int __pkvm_start_teardown_vm(pkvm_handle_t handle)
  693. {
  694. struct pkvm_hyp_vm *hyp_vm;
  695. int ret = 0;
  696. hyp_spin_lock(&vm_table_lock);
  697. hyp_vm = get_vm_by_handle(handle);
  698. if (!hyp_vm) {
  699. ret = -ENOENT;
  700. goto unlock;
  701. } else if (WARN_ON(hyp_page_count(hyp_vm))) {
  702. ret = -EBUSY;
  703. goto unlock;
  704. } else if (hyp_vm->is_dying) {
  705. ret = -EINVAL;
  706. goto unlock;
  707. }
  708. hyp_vm->is_dying = true;
  709. unlock:
  710. hyp_spin_unlock(&vm_table_lock);
  711. return ret;
  712. }
  713. int __pkvm_finalize_teardown_vm(pkvm_handle_t handle)
  714. {
  715. struct kvm_hyp_memcache *mc, *stage2_mc;
  716. size_t vm_size, last_ran_size;
  717. int __percpu *last_vcpu_ran;
  718. struct pkvm_hyp_vm *hyp_vm;
  719. struct kvm *host_kvm;
  720. unsigned int idx;
  721. int err;
  722. hyp_spin_lock(&vm_table_lock);
  723. hyp_vm = get_vm_by_handle(handle);
  724. if (!hyp_vm) {
  725. err = -ENOENT;
  726. goto err_unlock;
  727. } else if (!hyp_vm->is_dying) {
  728. err = -EBUSY;
  729. goto err_unlock;
  730. }
  731. host_kvm = hyp_vm->host_kvm;
  732. /* Ensure the VMID is clean before it can be reallocated */
  733. __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
  734. remove_vm_table_entry(handle);
  735. hyp_spin_unlock(&vm_table_lock);
  736. mc = &host_kvm->arch.pkvm.teardown_mc;
  737. stage2_mc = &host_kvm->arch.pkvm.teardown_stage2_mc;
  738. destroy_hyp_vm_pgt(hyp_vm);
  739. drain_hyp_pool(hyp_vm, stage2_mc);
  740. unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
  741. /* Push the metadata pages to the teardown memcache */
  742. for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
  743. struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
  744. struct kvm_hyp_memcache *vcpu_mc;
  745. void *addr;
  746. vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
  747. while (vcpu_mc->nr_pages) {
  748. addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
  749. push_hyp_memcache(stage2_mc, addr, hyp_virt_to_phys);
  750. unmap_donated_memory_noclear(addr, PAGE_SIZE);
  751. }
  752. teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
  753. }
  754. last_vcpu_ran = hyp_vm->kvm.arch.mmu.last_vcpu_ran;
  755. last_ran_size = pkvm_get_last_ran_size();
  756. teardown_donated_memory(mc, (__force void *)last_vcpu_ran,
  757. last_ran_size);
  758. vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
  759. teardown_donated_memory(mc, hyp_vm, vm_size);
  760. hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
  761. return 0;
  762. err_unlock:
  763. hyp_spin_unlock(&vm_table_lock);
  764. return err;
  765. }
  766. int pkvm_load_pvmfw_pages(struct pkvm_hyp_vm *vm, u64 ipa, phys_addr_t phys,
  767. u64 size)
  768. {
  769. struct kvm_protected_vm *pkvm = &vm->kvm.arch.pkvm;
  770. u64 npages, offset = ipa - pkvm->pvmfw_load_addr;
  771. void *src = hyp_phys_to_virt(pvmfw_base) + offset;
  772. if (offset >= pvmfw_size)
  773. return -EINVAL;
  774. size = min(size, pvmfw_size - offset);
  775. if (!PAGE_ALIGNED(size) || !PAGE_ALIGNED(src))
  776. return -EINVAL;
  777. npages = size >> PAGE_SHIFT;
  778. while (npages--) {
  779. /*
  780. * No need for cache maintenance here, as the pgtable code will
  781. * take care of this when installing the pte in the guest's
  782. * stage-2 page table.
  783. */
  784. memcpy(hyp_fixmap_map(phys), src, PAGE_SIZE);
  785. hyp_fixmap_unmap();
  786. src += PAGE_SIZE;
  787. phys += PAGE_SIZE;
  788. }
  789. return 0;
  790. }
  791. void pkvm_poison_pvmfw_pages(void)
  792. {
  793. u64 npages = pvmfw_size >> PAGE_SHIFT;
  794. phys_addr_t addr = pvmfw_base;
  795. while (npages--) {
  796. hyp_poison_page(addr);
  797. addr += PAGE_SIZE;
  798. }
  799. }
  800. /*
  801. * This function sets the registers on the vcpu to their architecturally defined
  802. * reset values.
  803. *
  804. * Note: Can only be called by the vcpu on itself, after it has been turned on.
  805. */
  806. void pkvm_reset_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
  807. {
  808. struct vcpu_reset_state *reset_state = &hyp_vcpu->vcpu.arch.reset_state;
  809. struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
  810. WARN_ON(!reset_state->reset);
  811. pkvm_vcpu_init_ptrauth(hyp_vcpu);
  812. kvm_reset_vcpu_core(&hyp_vcpu->vcpu);
  813. kvm_reset_pvm_sys_regs(&hyp_vcpu->vcpu);
  814. /* Must be done after reseting sys registers. */
  815. kvm_reset_vcpu_psci(&hyp_vcpu->vcpu, reset_state);
  816. if (hyp_vm->pvmfw_entry_vcpu == hyp_vcpu) {
  817. struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
  818. u64 entry = hyp_vm->kvm.arch.pkvm.pvmfw_load_addr;
  819. int i;
  820. /* X0 - X14 provided by the VMM (preserved) */
  821. for (i = 0; i <= 14; ++i) {
  822. u64 val = vcpu_get_reg(host_vcpu, i);
  823. vcpu_set_reg(&hyp_vcpu->vcpu, i, val);
  824. }
  825. /* X15: Boot protocol version */
  826. vcpu_set_reg(&hyp_vcpu->vcpu, 15, 0);
  827. /* PC: IPA of pvmfw base */
  828. *vcpu_pc(&hyp_vcpu->vcpu) = entry;
  829. hyp_vm->pvmfw_entry_vcpu = NULL;
  830. /* Auto enroll MMIO guard */
  831. set_bit(KVM_ARCH_FLAG_MMIO_GUARD, &hyp_vm->kvm.arch.flags);
  832. }
  833. reset_state->reset = false;
  834. hyp_vcpu->exit_code = 0;
  835. WARN_ON(hyp_vcpu->power_state != PSCI_0_2_AFFINITY_LEVEL_ON_PENDING);
  836. WRITE_ONCE(hyp_vcpu->vcpu.arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
  837. WRITE_ONCE(hyp_vcpu->power_state, PSCI_0_2_AFFINITY_LEVEL_ON);
  838. }
  839. struct pkvm_hyp_vcpu *pkvm_mpidr_to_hyp_vcpu(struct pkvm_hyp_vm *hyp_vm,
  840. u64 mpidr)
  841. {
  842. int i;
  843. mpidr &= MPIDR_HWID_BITMASK;
  844. for (i = 0; i < hyp_vm->nr_vcpus; i++) {
  845. struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[i];
  846. if (mpidr == kvm_vcpu_get_mpidr_aff(&hyp_vcpu->vcpu))
  847. return hyp_vcpu;
  848. }
  849. return NULL;
  850. }
  851. /*
  852. * Returns true if the hypervisor has handled the PSCI call, and control should
  853. * go back to the guest, or false if the host needs to do some additional work
  854. * (i.e., wake up the vcpu).
  855. */
  856. static bool pvm_psci_vcpu_on(struct pkvm_hyp_vcpu *hyp_vcpu)
  857. {
  858. struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
  859. struct vcpu_reset_state *reset_state;
  860. struct pkvm_hyp_vcpu *target;
  861. unsigned long cpu_id, ret;
  862. int power_state;
  863. cpu_id = smccc_get_arg1(&hyp_vcpu->vcpu);
  864. if (!kvm_psci_valid_affinity(&hyp_vcpu->vcpu, cpu_id)) {
  865. ret = PSCI_RET_INVALID_PARAMS;
  866. goto error;
  867. }
  868. target = pkvm_mpidr_to_hyp_vcpu(hyp_vm, cpu_id);
  869. if (!target) {
  870. ret = PSCI_RET_INVALID_PARAMS;
  871. goto error;
  872. }
  873. /*
  874. * Make sure the requested vcpu is not on to begin with.
  875. * Atomic to avoid race between vcpus trying to power on the same vcpu.
  876. */
  877. power_state = cmpxchg(&target->power_state,
  878. PSCI_0_2_AFFINITY_LEVEL_OFF,
  879. PSCI_0_2_AFFINITY_LEVEL_ON_PENDING);
  880. switch (power_state) {
  881. case PSCI_0_2_AFFINITY_LEVEL_ON_PENDING:
  882. ret = PSCI_RET_ON_PENDING;
  883. goto error;
  884. case PSCI_0_2_AFFINITY_LEVEL_ON:
  885. ret = PSCI_RET_ALREADY_ON;
  886. goto error;
  887. case PSCI_0_2_AFFINITY_LEVEL_OFF:
  888. break;
  889. default:
  890. ret = PSCI_RET_INTERNAL_FAILURE;
  891. goto error;
  892. }
  893. reset_state = &target->vcpu.arch.reset_state;
  894. reset_state->pc = smccc_get_arg2(&hyp_vcpu->vcpu);
  895. reset_state->r0 = smccc_get_arg3(&hyp_vcpu->vcpu);
  896. /* Propagate caller endianness */
  897. reset_state->be = kvm_vcpu_is_be(&hyp_vcpu->vcpu);
  898. reset_state->reset = true;
  899. /*
  900. * Return to the host, which should make the KVM_REQ_VCPU_RESET request
  901. * as well as kvm_vcpu_wake_up() to schedule the vcpu.
  902. */
  903. return false;
  904. error:
  905. /* If there's an error go back straight to the guest. */
  906. smccc_set_retval(&hyp_vcpu->vcpu, ret, 0, 0, 0);
  907. return true;
  908. }
  909. static bool pvm_psci_vcpu_affinity_info(struct pkvm_hyp_vcpu *hyp_vcpu)
  910. {
  911. unsigned long target_affinity_mask, target_affinity, lowest_affinity_level;
  912. struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
  913. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  914. unsigned long mpidr, ret;
  915. int i, matching_cpus = 0;
  916. target_affinity = smccc_get_arg1(vcpu);
  917. lowest_affinity_level = smccc_get_arg2(vcpu);
  918. if (!kvm_psci_valid_affinity(vcpu, target_affinity)) {
  919. ret = PSCI_RET_INVALID_PARAMS;
  920. goto done;
  921. }
  922. /* Determine target affinity mask */
  923. target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
  924. if (!target_affinity_mask) {
  925. ret = PSCI_RET_INVALID_PARAMS;
  926. goto done;
  927. }
  928. /* Ignore other bits of target affinity */
  929. target_affinity &= target_affinity_mask;
  930. ret = PSCI_0_2_AFFINITY_LEVEL_OFF;
  931. /*
  932. * If at least one vcpu matching target affinity is ON then return ON,
  933. * then if at least one is PENDING_ON then return PENDING_ON.
  934. * Otherwise, return OFF.
  935. */
  936. for (i = 0; i < hyp_vm->nr_vcpus; i++) {
  937. struct pkvm_hyp_vcpu *target = hyp_vm->vcpus[i];
  938. mpidr = kvm_vcpu_get_mpidr_aff(&target->vcpu);
  939. if ((mpidr & target_affinity_mask) == target_affinity) {
  940. int power_state;
  941. matching_cpus++;
  942. power_state = READ_ONCE(target->power_state);
  943. switch (power_state) {
  944. case PSCI_0_2_AFFINITY_LEVEL_ON_PENDING:
  945. ret = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
  946. break;
  947. case PSCI_0_2_AFFINITY_LEVEL_ON:
  948. ret = PSCI_0_2_AFFINITY_LEVEL_ON;
  949. goto done;
  950. case PSCI_0_2_AFFINITY_LEVEL_OFF:
  951. break;
  952. default:
  953. ret = PSCI_RET_INTERNAL_FAILURE;
  954. goto done;
  955. }
  956. }
  957. }
  958. if (!matching_cpus)
  959. ret = PSCI_RET_INVALID_PARAMS;
  960. done:
  961. /* Nothing to be handled by the host. Go back to the guest. */
  962. smccc_set_retval(vcpu, ret, 0, 0, 0);
  963. return true;
  964. }
  965. /*
  966. * Returns true if the hypervisor has handled the PSCI call, and control should
  967. * go back to the guest, or false if the host needs to do some additional work
  968. * (e.g., turn off and update vcpu scheduling status).
  969. */
  970. static bool pvm_psci_vcpu_off(struct pkvm_hyp_vcpu *hyp_vcpu)
  971. {
  972. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  973. WARN_ON(vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED);
  974. WARN_ON(hyp_vcpu->power_state != PSCI_0_2_AFFINITY_LEVEL_ON);
  975. WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
  976. WRITE_ONCE(hyp_vcpu->power_state, PSCI_0_2_AFFINITY_LEVEL_OFF);
  977. /* Return to the host so that it can finish powering off the vcpu. */
  978. return false;
  979. }
  980. static bool pvm_psci_version(struct pkvm_hyp_vcpu *hyp_vcpu)
  981. {
  982. /* Nothing to be handled by the host. Go back to the guest. */
  983. smccc_set_retval(&hyp_vcpu->vcpu, KVM_ARM_PSCI_1_1, 0, 0, 0);
  984. return true;
  985. }
  986. static bool pvm_psci_not_supported(struct pkvm_hyp_vcpu *hyp_vcpu)
  987. {
  988. /* Nothing to be handled by the host. Go back to the guest. */
  989. smccc_set_retval(&hyp_vcpu->vcpu, PSCI_RET_NOT_SUPPORTED, 0, 0, 0);
  990. return true;
  991. }
  992. static bool pvm_psci_features(struct pkvm_hyp_vcpu *hyp_vcpu)
  993. {
  994. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  995. u32 feature = smccc_get_arg1(vcpu);
  996. unsigned long val;
  997. switch (feature) {
  998. case PSCI_0_2_FN_PSCI_VERSION:
  999. case PSCI_0_2_FN_CPU_SUSPEND:
  1000. case PSCI_0_2_FN64_CPU_SUSPEND:
  1001. case PSCI_0_2_FN_CPU_OFF:
  1002. case PSCI_0_2_FN_CPU_ON:
  1003. case PSCI_0_2_FN64_CPU_ON:
  1004. case PSCI_0_2_FN_AFFINITY_INFO:
  1005. case PSCI_0_2_FN64_AFFINITY_INFO:
  1006. case PSCI_0_2_FN_SYSTEM_OFF:
  1007. case PSCI_0_2_FN_SYSTEM_RESET:
  1008. case PSCI_1_0_FN_PSCI_FEATURES:
  1009. case PSCI_1_1_FN_SYSTEM_RESET2:
  1010. case PSCI_1_1_FN64_SYSTEM_RESET2:
  1011. case ARM_SMCCC_VERSION_FUNC_ID:
  1012. val = PSCI_RET_SUCCESS;
  1013. break;
  1014. default:
  1015. val = PSCI_RET_NOT_SUPPORTED;
  1016. break;
  1017. }
  1018. /* Nothing to be handled by the host. Go back to the guest. */
  1019. smccc_set_retval(vcpu, val, 0, 0, 0);
  1020. return true;
  1021. }
  1022. static bool pkvm_handle_psci(struct pkvm_hyp_vcpu *hyp_vcpu)
  1023. {
  1024. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  1025. u32 psci_fn = smccc_get_function(vcpu);
  1026. switch (psci_fn) {
  1027. case PSCI_0_2_FN_CPU_ON:
  1028. kvm_psci_narrow_to_32bit(vcpu);
  1029. fallthrough;
  1030. case PSCI_0_2_FN64_CPU_ON:
  1031. return pvm_psci_vcpu_on(hyp_vcpu);
  1032. case PSCI_0_2_FN_CPU_OFF:
  1033. return pvm_psci_vcpu_off(hyp_vcpu);
  1034. case PSCI_0_2_FN_AFFINITY_INFO:
  1035. kvm_psci_narrow_to_32bit(vcpu);
  1036. fallthrough;
  1037. case PSCI_0_2_FN64_AFFINITY_INFO:
  1038. return pvm_psci_vcpu_affinity_info(hyp_vcpu);
  1039. case PSCI_0_2_FN_PSCI_VERSION:
  1040. return pvm_psci_version(hyp_vcpu);
  1041. case PSCI_1_0_FN_PSCI_FEATURES:
  1042. return pvm_psci_features(hyp_vcpu);
  1043. case PSCI_0_2_FN_SYSTEM_RESET:
  1044. case PSCI_0_2_FN_CPU_SUSPEND:
  1045. case PSCI_0_2_FN64_CPU_SUSPEND:
  1046. case PSCI_0_2_FN_SYSTEM_OFF:
  1047. case PSCI_1_1_FN_SYSTEM_RESET2:
  1048. case PSCI_1_1_FN64_SYSTEM_RESET2:
  1049. return false; /* Handled by the host. */
  1050. default:
  1051. break;
  1052. }
  1053. return pvm_psci_not_supported(hyp_vcpu);
  1054. }
  1055. static u64 __pkvm_memshare_page_req(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa)
  1056. {
  1057. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  1058. u64 elr;
  1059. /* Fake up a data abort (Level 3 translation fault on write) */
  1060. vcpu->arch.fault.esr_el2 = (u32)ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT |
  1061. ESR_ELx_WNR | ESR_ELx_FSC_FAULT |
  1062. FIELD_PREP(ESR_ELx_FSC_LEVEL, 3);
  1063. /* Shuffle the IPA around into the HPFAR */
  1064. vcpu->arch.fault.hpfar_el2 = (ipa >> 8) & HPFAR_MASK;
  1065. /* This is a virtual address. 0's good. Let's go with 0. */
  1066. vcpu->arch.fault.far_el2 = 0;
  1067. /* Rewind the ELR so we return to the HVC once the IPA is mapped */
  1068. elr = read_sysreg(elr_el2);
  1069. elr -= 4;
  1070. write_sysreg(elr, elr_el2);
  1071. return ARM_EXCEPTION_TRAP;
  1072. }
  1073. static bool pkvm_memshare_call(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code)
  1074. {
  1075. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  1076. u64 ipa = smccc_get_arg1(vcpu);
  1077. u64 arg2 = smccc_get_arg2(vcpu);
  1078. u64 arg3 = smccc_get_arg3(vcpu);
  1079. int err;
  1080. if (arg2 || arg3)
  1081. goto out_guest_err;
  1082. err = __pkvm_guest_share_host(hyp_vcpu, ipa);
  1083. switch (err) {
  1084. case 0:
  1085. /* Success! Now tell the host. */
  1086. goto out_host;
  1087. case -EFAULT:
  1088. /*
  1089. * Convert the exception into a data abort so that the page
  1090. * being shared is mapped into the guest next time.
  1091. */
  1092. *exit_code = __pkvm_memshare_page_req(hyp_vcpu, ipa);
  1093. goto out_host;
  1094. }
  1095. out_guest_err:
  1096. smccc_set_retval(vcpu, SMCCC_RET_INVALID_PARAMETER, 0, 0, 0);
  1097. return true;
  1098. out_host:
  1099. return false;
  1100. }
  1101. static bool pkvm_memunshare_call(struct pkvm_hyp_vcpu *hyp_vcpu)
  1102. {
  1103. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  1104. u64 ipa = smccc_get_arg1(vcpu);
  1105. u64 arg2 = smccc_get_arg2(vcpu);
  1106. u64 arg3 = smccc_get_arg3(vcpu);
  1107. int err;
  1108. if (arg2 || arg3)
  1109. goto out_guest_err;
  1110. err = __pkvm_guest_unshare_host(hyp_vcpu, ipa);
  1111. if (err)
  1112. goto out_guest_err;
  1113. return false;
  1114. out_guest_err:
  1115. smccc_set_retval(vcpu, SMCCC_RET_INVALID_PARAMETER, 0, 0, 0);
  1116. return true;
  1117. }
  1118. static bool pkvm_meminfo_call(struct pkvm_hyp_vcpu *hyp_vcpu)
  1119. {
  1120. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  1121. u64 arg1 = smccc_get_arg1(vcpu);
  1122. u64 arg2 = smccc_get_arg2(vcpu);
  1123. u64 arg3 = smccc_get_arg3(vcpu);
  1124. if (arg1 || arg2 || arg3)
  1125. goto out_guest_err;
  1126. smccc_set_retval(vcpu, PAGE_SIZE, 0, 0, 0);
  1127. return true;
  1128. out_guest_err:
  1129. smccc_set_retval(vcpu, SMCCC_RET_INVALID_PARAMETER, 0, 0, 0);
  1130. return true;
  1131. }
  1132. static bool pkvm_memrelinquish_call(struct pkvm_hyp_vcpu *hyp_vcpu)
  1133. {
  1134. struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
  1135. u64 ipa = smccc_get_arg1(vcpu);
  1136. u64 arg2 = smccc_get_arg2(vcpu);
  1137. u64 arg3 = smccc_get_arg3(vcpu);
  1138. u64 pa = 0;
  1139. int ret;
  1140. if (arg2 || arg3)
  1141. goto out_guest_err;
  1142. ret = __pkvm_guest_relinquish_to_host(hyp_vcpu, ipa, &pa);
  1143. if (ret)
  1144. goto out_guest_err;
  1145. if (pa != 0) {
  1146. /* Now pass to host. */
  1147. return false;
  1148. }
  1149. /* This was a NOP as no page was actually mapped at the IPA. */
  1150. smccc_set_retval(vcpu, 0, 0, 0, 0);
  1151. return true;
  1152. out_guest_err:
  1153. smccc_set_retval(vcpu, SMCCC_RET_INVALID_PARAMETER, 0, 0, 0);
  1154. return true;
  1155. }
  1156. static bool pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code)
  1157. {
  1158. u64 retval = SMCCC_RET_SUCCESS;
  1159. u64 ipa = smccc_get_arg1(&hyp_vcpu->vcpu);
  1160. int ret;
  1161. ret = __pkvm_install_ioguard_page(hyp_vcpu, ipa);
  1162. if (ret == -ENOMEM) {
  1163. /*
  1164. * We ran out of memcache, let's ask for more. Cancel
  1165. * the effects of the HVC that took us here, and
  1166. * forward the hypercall to the host for page donation
  1167. * purposes.
  1168. */
  1169. write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
  1170. return false;
  1171. }
  1172. if (ret)
  1173. retval = SMCCC_RET_INVALID_PARAMETER;
  1174. smccc_set_retval(&hyp_vcpu->vcpu, retval, 0, 0, 0);
  1175. return true;
  1176. }
  1177. bool smccc_trng_available;
  1178. static bool pkvm_forward_trng(struct kvm_vcpu *vcpu)
  1179. {
  1180. u32 fn = smccc_get_function(vcpu);
  1181. struct arm_smccc_res res;
  1182. unsigned long arg1 = 0;
  1183. /*
  1184. * Forward TRNG calls to EL3, as we can't trust the host to handle
  1185. * these for us.
  1186. */
  1187. switch (fn) {
  1188. case ARM_SMCCC_TRNG_FEATURES:
  1189. case ARM_SMCCC_TRNG_RND32:
  1190. case ARM_SMCCC_TRNG_RND64:
  1191. arg1 = smccc_get_arg1(vcpu);
  1192. fallthrough;
  1193. case ARM_SMCCC_TRNG_VERSION:
  1194. case ARM_SMCCC_TRNG_GET_UUID:
  1195. arm_smccc_1_1_smc(fn, arg1, &res);
  1196. smccc_set_retval(vcpu, res.a0, res.a1, res.a2, res.a3);
  1197. memzero_explicit(&res, sizeof(res));
  1198. break;
  1199. }
  1200. return true;
  1201. }
  1202. /*
  1203. * Handler for protected VM HVC calls.
  1204. *
  1205. * Returns true if the hypervisor has handled the exit, and control should go
  1206. * back to the guest, or false if it hasn't.
  1207. */
  1208. bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
  1209. {
  1210. u64 val[4] = { SMCCC_RET_NOT_SUPPORTED };
  1211. u32 fn = smccc_get_function(vcpu);
  1212. struct pkvm_hyp_vcpu *hyp_vcpu;
  1213. hyp_vcpu = container_of(vcpu, struct pkvm_hyp_vcpu, vcpu);
  1214. switch (fn) {
  1215. case ARM_SMCCC_VERSION_FUNC_ID:
  1216. /* Nothing to be handled by the host. Go back to the guest. */
  1217. val[0] = ARM_SMCCC_VERSION_1_1;
  1218. break;
  1219. case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
  1220. val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
  1221. val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
  1222. val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2;
  1223. val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
  1224. break;
  1225. case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
  1226. val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES);
  1227. val[0] |= BIT(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO);
  1228. val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_SHARE);
  1229. val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE);
  1230. val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH);
  1231. val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_INFO);
  1232. val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL);
  1233. val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP);
  1234. val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP);
  1235. break;
  1236. case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID:
  1237. set_bit(KVM_ARCH_FLAG_MMIO_GUARD, &vcpu->kvm->arch.flags);
  1238. val[0] = SMCCC_RET_SUCCESS;
  1239. break;
  1240. case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID:
  1241. return pkvm_install_ioguard_page(hyp_vcpu, exit_code);
  1242. case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID:
  1243. if (__pkvm_remove_ioguard_page(hyp_vcpu, vcpu_get_reg(vcpu, 1)))
  1244. val[0] = SMCCC_RET_INVALID_PARAMETER;
  1245. else
  1246. val[0] = SMCCC_RET_SUCCESS;
  1247. break;
  1248. case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID:
  1249. case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
  1250. return pkvm_meminfo_call(hyp_vcpu);
  1251. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID:
  1252. return pkvm_memshare_call(hyp_vcpu, exit_code);
  1253. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
  1254. return pkvm_memunshare_call(hyp_vcpu);
  1255. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
  1256. return pkvm_memrelinquish_call(hyp_vcpu);
  1257. case ARM_SMCCC_TRNG_VERSION ... ARM_SMCCC_TRNG_RND32:
  1258. case ARM_SMCCC_TRNG_RND64:
  1259. if (smccc_trng_available)
  1260. return pkvm_forward_trng(vcpu);
  1261. break;
  1262. default:
  1263. return pkvm_handle_psci(hyp_vcpu);
  1264. }
  1265. smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
  1266. return true;
  1267. }
  1268. /*
  1269. * Handler for non-protected VM HVC calls.
  1270. *
  1271. * Returns true if the hypervisor has handled the exit, and control should go
  1272. * back to the guest, or false if it hasn't.
  1273. */
  1274. bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
  1275. {
  1276. u32 fn = smccc_get_function(vcpu);
  1277. struct pkvm_hyp_vcpu *hyp_vcpu;
  1278. hyp_vcpu = container_of(vcpu, struct pkvm_hyp_vcpu, vcpu);
  1279. switch (fn) {
  1280. case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
  1281. return pkvm_meminfo_call(hyp_vcpu);
  1282. case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
  1283. return pkvm_memrelinquish_call(hyp_vcpu);
  1284. }
  1285. return false;
  1286. }