arm.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  4. * Author: Christoffer Dall <[email protected]>
  5. */
  6. #include <linux/bug.h>
  7. #include <linux/cpu_pm.h>
  8. #include <linux/entry-kvm.h>
  9. #include <linux/errno.h>
  10. #include <linux/err.h>
  11. #include <linux/kvm_host.h>
  12. #include <linux/list.h>
  13. #include <linux/module.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/fs.h>
  16. #include <linux/mman.h>
  17. #include <linux/sched.h>
  18. #include <linux/kvm.h>
  19. #include <linux/kvm_irqfd.h>
  20. #include <linux/irqbypass.h>
  21. #include <linux/sched/stat.h>
  22. #include <linux/psci.h>
  23. #include <trace/events/kvm.h>
  24. #define CREATE_TRACE_POINTS
  25. #include "trace_arm.h"
  26. #include "hyp_trace.h"
  27. #include <linux/uaccess.h>
  28. #include <asm/ptrace.h>
  29. #include <asm/mman.h>
  30. #include <asm/tlbflush.h>
  31. #include <asm/cacheflush.h>
  32. #include <asm/cpufeature.h>
  33. #include <asm/virt.h>
  34. #include <asm/kvm_arm.h>
  35. #include <asm/kvm_asm.h>
  36. #include <asm/kvm_mmu.h>
  37. #include <asm/kvm_pkvm.h>
  38. #include <asm/kvm_emulate.h>
  39. #include <asm/sections.h>
  40. #include <kvm/arm_hypercalls.h>
  41. #include <kvm/arm_pmu.h>
  42. #include <kvm/arm_psci.h>
  43. static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
  44. DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
  45. DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
  46. DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
  47. DECLARE_KVM_NVHE_PER_CPU(int, hyp_cpu_number);
  48. static bool vgic_present;
  49. static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
  50. DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
  51. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  52. {
  53. return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
  54. }
  55. int kvm_arch_hardware_setup(void *opaque)
  56. {
  57. return 0;
  58. }
  59. int kvm_arch_check_processor_compat(void *opaque)
  60. {
  61. return 0;
  62. }
  63. int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
  64. struct kvm_enable_cap *cap)
  65. {
  66. int r;
  67. /* Capabilities with flags */
  68. switch (cap->cap) {
  69. case KVM_CAP_ARM_PROTECTED_VM:
  70. return pkvm_vm_ioctl_enable_cap(kvm, cap);
  71. default:
  72. if (cap->flags)
  73. return -EINVAL;
  74. }
  75. /* Capabilities without flags */
  76. switch (cap->cap) {
  77. case KVM_CAP_ARM_NISV_TO_USER:
  78. if (kvm_vm_is_protected(kvm)) {
  79. r = -EINVAL;
  80. } else {
  81. r = 0;
  82. set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
  83. &kvm->arch.flags);
  84. }
  85. break;
  86. case KVM_CAP_ARM_MTE:
  87. mutex_lock(&kvm->lock);
  88. if (!system_supports_mte() ||
  89. kvm_vm_is_protected(kvm) ||
  90. kvm->created_vcpus) {
  91. r = -EINVAL;
  92. } else {
  93. r = 0;
  94. set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
  95. }
  96. mutex_unlock(&kvm->lock);
  97. break;
  98. case KVM_CAP_ARM_SYSTEM_SUSPEND:
  99. r = 0;
  100. set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
  101. break;
  102. default:
  103. r = -EINVAL;
  104. break;
  105. }
  106. return r;
  107. }
  108. static int kvm_arm_default_max_vcpus(void)
  109. {
  110. return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
  111. }
  112. static void set_default_spectre(struct kvm *kvm)
  113. {
  114. /*
  115. * The default is to expose CSV2 == 1 if the HW isn't affected.
  116. * Although this is a per-CPU feature, we make it global because
  117. * asymmetric systems are just a nuisance.
  118. *
  119. * Userspace can override this as long as it doesn't promise
  120. * the impossible.
  121. */
  122. if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
  123. kvm->arch.pfr0_csv2 = 1;
  124. if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
  125. kvm->arch.pfr0_csv3 = 1;
  126. }
  127. /**
  128. * kvm_arch_init_vm - initializes a VM data structure
  129. * @kvm: pointer to the KVM struct
  130. */
  131. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  132. {
  133. int ret;
  134. if (type & ~KVM_VM_TYPE_MASK)
  135. return -EINVAL;
  136. mutex_init(&kvm->arch.config_lock);
  137. #ifdef CONFIG_LOCKDEP
  138. /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
  139. mutex_lock(&kvm->lock);
  140. mutex_lock(&kvm->arch.config_lock);
  141. mutex_unlock(&kvm->arch.config_lock);
  142. mutex_unlock(&kvm->lock);
  143. #endif
  144. ret = kvm_share_hyp(kvm, kvm + 1);
  145. if (ret)
  146. return ret;
  147. ret = pkvm_init_host_vm(kvm, type);
  148. if (ret)
  149. goto err_unshare_kvm;
  150. if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) {
  151. ret = -ENOMEM;
  152. goto err_unshare_kvm;
  153. }
  154. cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
  155. ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type);
  156. if (ret)
  157. goto err_free_cpumask;
  158. kvm_vgic_early_init(kvm);
  159. /* The maximum number of VCPUs is limited by the host's GIC model */
  160. kvm->max_vcpus = kvm_arm_default_max_vcpus();
  161. set_default_spectre(kvm);
  162. kvm_arm_init_hypercalls(kvm);
  163. return 0;
  164. err_free_cpumask:
  165. free_cpumask_var(kvm->arch.supported_cpus);
  166. err_unshare_kvm:
  167. kvm_unshare_hyp(kvm, kvm + 1);
  168. return ret;
  169. }
  170. vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  171. {
  172. return VM_FAULT_SIGBUS;
  173. }
  174. /**
  175. * kvm_arch_destroy_vm - destroy the VM data structure
  176. * @kvm: pointer to the KVM struct
  177. */
  178. void kvm_arch_destroy_vm(struct kvm *kvm)
  179. {
  180. bitmap_free(kvm->arch.pmu_filter);
  181. free_cpumask_var(kvm->arch.supported_cpus);
  182. kvm_vgic_destroy(kvm);
  183. if (is_protected_kvm_enabled())
  184. pkvm_destroy_hyp_vm(kvm);
  185. kvm_destroy_vcpus(kvm);
  186. if (atomic64_read(&kvm->stat.protected_hyp_mem))
  187. pr_warn("%lluB of donations to the nVHE hyp are missing\n",
  188. atomic64_read(&kvm->stat.protected_hyp_mem));
  189. kvm_unshare_hyp(kvm, kvm + 1);
  190. }
  191. static int kvm_check_extension(struct kvm *kvm, long ext)
  192. {
  193. int r;
  194. switch (ext) {
  195. case KVM_CAP_IRQCHIP:
  196. r = vgic_present;
  197. break;
  198. case KVM_CAP_IOEVENTFD:
  199. case KVM_CAP_DEVICE_CTRL:
  200. case KVM_CAP_USER_MEMORY:
  201. case KVM_CAP_SYNC_MMU:
  202. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  203. case KVM_CAP_ONE_REG:
  204. case KVM_CAP_ARM_PSCI:
  205. case KVM_CAP_ARM_PSCI_0_2:
  206. case KVM_CAP_READONLY_MEM:
  207. case KVM_CAP_MP_STATE:
  208. case KVM_CAP_IMMEDIATE_EXIT:
  209. case KVM_CAP_VCPU_EVENTS:
  210. case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
  211. case KVM_CAP_ARM_INJECT_EXT_DABT:
  212. case KVM_CAP_SET_GUEST_DEBUG:
  213. case KVM_CAP_VCPU_ATTRIBUTES:
  214. case KVM_CAP_PTP_KVM:
  215. case KVM_CAP_ARM_SYSTEM_SUSPEND:
  216. r = 1;
  217. break;
  218. case KVM_CAP_ARM_NISV_TO_USER:
  219. r = !kvm || !kvm_vm_is_protected(kvm);
  220. break;
  221. case KVM_CAP_SET_GUEST_DEBUG2:
  222. return KVM_GUESTDBG_VALID_MASK;
  223. case KVM_CAP_ARM_SET_DEVICE_ADDR:
  224. r = 1;
  225. break;
  226. case KVM_CAP_NR_VCPUS:
  227. /*
  228. * ARM64 treats KVM_CAP_NR_CPUS differently from all other
  229. * architectures, as it does not always bound it to
  230. * KVM_CAP_MAX_VCPUS. It should not matter much because
  231. * this is just an advisory value.
  232. */
  233. r = min_t(unsigned int, num_online_cpus(),
  234. kvm_arm_default_max_vcpus());
  235. break;
  236. case KVM_CAP_MAX_VCPUS:
  237. case KVM_CAP_MAX_VCPU_ID:
  238. if (kvm)
  239. r = kvm->max_vcpus;
  240. else
  241. r = kvm_arm_default_max_vcpus();
  242. break;
  243. case KVM_CAP_MSI_DEVID:
  244. if (!kvm)
  245. r = -EINVAL;
  246. else
  247. r = kvm->arch.vgic.msis_require_devid;
  248. break;
  249. case KVM_CAP_ARM_USER_IRQ:
  250. /*
  251. * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
  252. * (bump this number if adding more devices)
  253. */
  254. r = 1;
  255. break;
  256. case KVM_CAP_ARM_MTE:
  257. r = system_supports_mte();
  258. break;
  259. case KVM_CAP_STEAL_TIME:
  260. r = kvm_arm_pvtime_supported();
  261. break;
  262. case KVM_CAP_ARM_EL1_32BIT:
  263. r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1);
  264. break;
  265. case KVM_CAP_GUEST_DEBUG_HW_BPS:
  266. r = get_num_brps();
  267. break;
  268. case KVM_CAP_GUEST_DEBUG_HW_WPS:
  269. r = get_num_wrps();
  270. break;
  271. case KVM_CAP_ARM_PMU_V3:
  272. r = kvm_arm_support_pmu_v3();
  273. break;
  274. case KVM_CAP_ARM_INJECT_SERROR_ESR:
  275. r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
  276. break;
  277. case KVM_CAP_ARM_VM_IPA_SIZE:
  278. r = get_kvm_ipa_limit();
  279. break;
  280. case KVM_CAP_ARM_SVE:
  281. r = system_supports_sve();
  282. break;
  283. case KVM_CAP_ARM_PTRAUTH_ADDRESS:
  284. case KVM_CAP_ARM_PTRAUTH_GENERIC:
  285. r = system_has_full_ptr_auth();
  286. break;
  287. default:
  288. r = 0;
  289. }
  290. return r;
  291. }
  292. /*
  293. * Checks whether the extension specified in ext is supported in protected
  294. * mode for the specified vm.
  295. * The capabilities supported by kvm in general are passed in kvm_cap.
  296. */
  297. static int pkvm_check_extension(struct kvm *kvm, long ext, int kvm_cap)
  298. {
  299. int r;
  300. switch (ext) {
  301. case KVM_CAP_IRQCHIP:
  302. case KVM_CAP_ARM_PSCI:
  303. case KVM_CAP_ARM_PSCI_0_2:
  304. case KVM_CAP_NR_VCPUS:
  305. case KVM_CAP_MAX_VCPUS:
  306. case KVM_CAP_MAX_VCPU_ID:
  307. case KVM_CAP_MSI_DEVID:
  308. case KVM_CAP_ARM_VM_IPA_SIZE:
  309. r = kvm_cap;
  310. break;
  311. case KVM_CAP_GUEST_DEBUG_HW_BPS:
  312. r = min(kvm_cap, pkvm_get_max_brps());
  313. break;
  314. case KVM_CAP_GUEST_DEBUG_HW_WPS:
  315. r = min(kvm_cap, pkvm_get_max_wrps());
  316. break;
  317. case KVM_CAP_ARM_PMU_V3:
  318. r = kvm_cap && FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
  319. PVM_ID_AA64DFR0_ALLOW);
  320. break;
  321. case KVM_CAP_ARM_SVE:
  322. r = kvm_cap && FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE),
  323. PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
  324. break;
  325. case KVM_CAP_ARM_PTRAUTH_ADDRESS:
  326. r = kvm_cap &&
  327. FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API),
  328. PVM_ID_AA64ISAR1_ALLOW) &&
  329. FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA),
  330. PVM_ID_AA64ISAR1_ALLOW);
  331. break;
  332. case KVM_CAP_ARM_PTRAUTH_GENERIC:
  333. r = kvm_cap &&
  334. FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI),
  335. PVM_ID_AA64ISAR1_ALLOW) &&
  336. FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA),
  337. PVM_ID_AA64ISAR1_ALLOW);
  338. break;
  339. case KVM_CAP_ARM_PROTECTED_VM:
  340. r = 1;
  341. break;
  342. default:
  343. r = 0;
  344. break;
  345. }
  346. return r;
  347. }
  348. int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
  349. {
  350. int r = kvm_check_extension(kvm, ext);
  351. if (kvm && kvm_vm_is_protected(kvm))
  352. r = pkvm_check_extension(kvm, ext, r);
  353. return r;
  354. }
  355. long kvm_arch_dev_ioctl(struct file *filp,
  356. unsigned int ioctl, unsigned long arg)
  357. {
  358. return -EINVAL;
  359. }
  360. struct kvm *kvm_arch_alloc_vm(void)
  361. {
  362. size_t sz = sizeof(struct kvm);
  363. if (!has_vhe())
  364. return kzalloc(sz, GFP_KERNEL_ACCOUNT);
  365. return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO);
  366. }
  367. int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
  368. {
  369. if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
  370. return -EBUSY;
  371. if (id >= kvm->max_vcpus)
  372. return -EINVAL;
  373. return 0;
  374. }
  375. int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
  376. {
  377. int err;
  378. spin_lock_init(&vcpu->arch.mp_state_lock);
  379. #ifdef CONFIG_LOCKDEP
  380. /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
  381. mutex_lock(&vcpu->mutex);
  382. mutex_lock(&vcpu->kvm->arch.config_lock);
  383. mutex_unlock(&vcpu->kvm->arch.config_lock);
  384. mutex_unlock(&vcpu->mutex);
  385. #endif
  386. /* Force users to call KVM_ARM_VCPU_INIT */
  387. vcpu->arch.target = -1;
  388. bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
  389. vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
  390. /*
  391. * Default value for the FP state, will be overloaded at load
  392. * time if we support FP (pretty likely)
  393. */
  394. vcpu->arch.fp_state = FP_STATE_FREE;
  395. /* Set up the timer */
  396. kvm_timer_vcpu_init(vcpu);
  397. kvm_pmu_vcpu_init(vcpu);
  398. kvm_arm_reset_debug_ptr(vcpu);
  399. kvm_arm_pvtime_vcpu_init(&vcpu->arch);
  400. vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
  401. err = kvm_vgic_vcpu_init(vcpu);
  402. if (err)
  403. return err;
  404. return kvm_share_hyp(vcpu, vcpu + 1);
  405. }
  406. void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  407. {
  408. }
  409. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  410. {
  411. if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
  412. static_branch_dec(&userspace_irqchip_in_use);
  413. if (is_protected_kvm_enabled())
  414. free_hyp_stage2_memcache(&vcpu->arch.pkvm_memcache, vcpu->kvm);
  415. else
  416. kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
  417. kvm_timer_vcpu_terminate(vcpu);
  418. kvm_pmu_vcpu_destroy(vcpu);
  419. kvm_arm_vcpu_destroy(vcpu);
  420. }
  421. void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
  422. {
  423. }
  424. void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
  425. {
  426. }
  427. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  428. {
  429. struct kvm_s2_mmu *mmu;
  430. int *last_ran;
  431. if (is_protected_kvm_enabled())
  432. goto nommu;
  433. mmu = vcpu->arch.hw_mmu;
  434. last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
  435. /*
  436. * We guarantee that both TLBs and I-cache are private to each
  437. * vcpu. If detecting that a vcpu from the same VM has
  438. * previously run on the same physical CPU, call into the
  439. * hypervisor code to nuke the relevant contexts.
  440. *
  441. * We might get preempted before the vCPU actually runs, but
  442. * over-invalidation doesn't affect correctness.
  443. */
  444. if (*last_ran != vcpu->vcpu_id) {
  445. kvm_call_hyp(__kvm_flush_cpu_context, mmu);
  446. *last_ran = vcpu->vcpu_id;
  447. }
  448. nommu:
  449. vcpu->cpu = cpu;
  450. kvm_vgic_load(vcpu);
  451. kvm_timer_vcpu_load(vcpu);
  452. if (has_vhe())
  453. kvm_vcpu_load_sysregs_vhe(vcpu);
  454. kvm_arch_vcpu_load_fp(vcpu);
  455. kvm_vcpu_pmu_restore_guest(vcpu);
  456. if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
  457. kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
  458. if (single_task_running())
  459. vcpu_clear_wfx_traps(vcpu);
  460. else
  461. vcpu_set_wfx_traps(vcpu);
  462. if (vcpu_has_ptrauth(vcpu))
  463. vcpu_ptrauth_disable(vcpu);
  464. kvm_arch_vcpu_load_debug_state_flags(vcpu);
  465. if (is_protected_kvm_enabled()) {
  466. kvm_call_hyp_nvhe(__pkvm_vcpu_load,
  467. vcpu->kvm->arch.pkvm.handle,
  468. vcpu->vcpu_idx, vcpu->arch.hcr_el2);
  469. kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
  470. &vcpu->arch.vgic_cpu.vgic_v3);
  471. }
  472. if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
  473. vcpu_set_on_unsupported_cpu(vcpu);
  474. }
  475. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  476. {
  477. if (is_protected_kvm_enabled()) {
  478. kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
  479. &vcpu->arch.vgic_cpu.vgic_v3);
  480. kvm_call_hyp_nvhe(__pkvm_vcpu_put);
  481. /* __pkvm_vcpu_put implies a sync of the state */
  482. if (!kvm_vm_is_protected(vcpu->kvm))
  483. vcpu_set_flag(vcpu, PKVM_HOST_STATE_DIRTY);
  484. }
  485. kvm_arch_vcpu_put_debug_state_flags(vcpu);
  486. kvm_arch_vcpu_put_fp(vcpu);
  487. if (has_vhe())
  488. kvm_vcpu_put_sysregs_vhe(vcpu);
  489. kvm_timer_vcpu_put(vcpu);
  490. kvm_vgic_put(vcpu, false);
  491. kvm_vcpu_pmu_restore_host(vcpu);
  492. kvm_arm_vmid_clear_active();
  493. vcpu_clear_on_unsupported_cpu(vcpu);
  494. vcpu->cpu = -1;
  495. }
  496. static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
  497. {
  498. WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
  499. kvm_make_request(KVM_REQ_SLEEP, vcpu);
  500. kvm_vcpu_kick(vcpu);
  501. }
  502. void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
  503. {
  504. spin_lock(&vcpu->arch.mp_state_lock);
  505. __kvm_arm_vcpu_power_off(vcpu);
  506. spin_unlock(&vcpu->arch.mp_state_lock);
  507. }
  508. bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
  509. {
  510. return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
  511. }
  512. static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
  513. {
  514. WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
  515. kvm_make_request(KVM_REQ_SUSPEND, vcpu);
  516. kvm_vcpu_kick(vcpu);
  517. }
  518. static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
  519. {
  520. return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
  521. }
  522. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  523. struct kvm_mp_state *mp_state)
  524. {
  525. *mp_state = READ_ONCE(vcpu->arch.mp_state);
  526. return 0;
  527. }
  528. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  529. struct kvm_mp_state *mp_state)
  530. {
  531. int ret = 0;
  532. spin_lock(&vcpu->arch.mp_state_lock);
  533. switch (mp_state->mp_state) {
  534. case KVM_MP_STATE_RUNNABLE:
  535. WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
  536. break;
  537. case KVM_MP_STATE_STOPPED:
  538. __kvm_arm_vcpu_power_off(vcpu);
  539. break;
  540. case KVM_MP_STATE_SUSPENDED:
  541. kvm_arm_vcpu_suspend(vcpu);
  542. break;
  543. default:
  544. ret = -EINVAL;
  545. }
  546. spin_unlock(&vcpu->arch.mp_state_lock);
  547. return ret;
  548. }
  549. /**
  550. * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
  551. * @v: The VCPU pointer
  552. *
  553. * If the guest CPU is not waiting for interrupts or an interrupt line is
  554. * asserted, the CPU is by definition runnable.
  555. */
  556. int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  557. {
  558. bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
  559. return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
  560. && !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
  561. }
  562. bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
  563. {
  564. return vcpu_mode_priv(vcpu);
  565. }
  566. #ifdef CONFIG_GUEST_PERF_EVENTS
  567. unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
  568. {
  569. return *vcpu_pc(vcpu);
  570. }
  571. #endif
  572. static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
  573. {
  574. return vcpu->arch.target >= 0;
  575. }
  576. /*
  577. * Handle both the initialisation that is being done when the vcpu is
  578. * run for the first time, as well as the updates that must be
  579. * performed each time we get a new thread dealing with this vcpu.
  580. */
  581. int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
  582. {
  583. struct kvm *kvm = vcpu->kvm;
  584. int ret;
  585. if (!kvm_vcpu_initialized(vcpu))
  586. return -ENOEXEC;
  587. if (!kvm_arm_vcpu_is_finalized(vcpu))
  588. return -EPERM;
  589. ret = kvm_arch_vcpu_run_map_fp(vcpu);
  590. if (ret)
  591. return ret;
  592. if (likely(vcpu_has_run_once(vcpu)))
  593. return 0;
  594. kvm_arm_vcpu_init_debug(vcpu);
  595. if (likely(irqchip_in_kernel(kvm))) {
  596. /*
  597. * Map the VGIC hardware resources before running a vcpu the
  598. * first time on this VM.
  599. */
  600. ret = kvm_vgic_map_resources(kvm);
  601. if (ret)
  602. return ret;
  603. }
  604. ret = kvm_timer_enable(vcpu);
  605. if (ret)
  606. return ret;
  607. ret = kvm_arm_pmu_v3_enable(vcpu);
  608. if (ret)
  609. return ret;
  610. if (is_protected_kvm_enabled()) {
  611. /* Start with the vcpu in a dirty state */
  612. if (!kvm_vm_is_protected(vcpu->kvm))
  613. vcpu_set_flag(vcpu, PKVM_HOST_STATE_DIRTY);
  614. ret = pkvm_create_hyp_vm(kvm);
  615. if (ret)
  616. return ret;
  617. }
  618. if (!irqchip_in_kernel(kvm)) {
  619. /*
  620. * Tell the rest of the code that there are userspace irqchip
  621. * VMs in the wild.
  622. */
  623. static_branch_inc(&userspace_irqchip_in_use);
  624. }
  625. mutex_lock(&kvm->arch.config_lock);
  626. set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
  627. mutex_unlock(&kvm->arch.config_lock);
  628. return ret;
  629. }
  630. bool kvm_arch_intc_initialized(struct kvm *kvm)
  631. {
  632. return vgic_initialized(kvm);
  633. }
  634. void kvm_arm_halt_guest(struct kvm *kvm)
  635. {
  636. unsigned long i;
  637. struct kvm_vcpu *vcpu;
  638. kvm_for_each_vcpu(i, vcpu, kvm)
  639. vcpu->arch.pause = true;
  640. kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
  641. }
  642. void kvm_arm_resume_guest(struct kvm *kvm)
  643. {
  644. unsigned long i;
  645. struct kvm_vcpu *vcpu;
  646. kvm_for_each_vcpu(i, vcpu, kvm) {
  647. vcpu->arch.pause = false;
  648. __kvm_vcpu_wake_up(vcpu);
  649. }
  650. }
  651. static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu)
  652. {
  653. struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
  654. rcuwait_wait_event(wait,
  655. (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
  656. TASK_INTERRUPTIBLE);
  657. if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) {
  658. /* Awaken to handle a signal, request we sleep again later. */
  659. kvm_make_request(KVM_REQ_SLEEP, vcpu);
  660. }
  661. /*
  662. * Make sure we will observe a potential reset request if we've
  663. * observed a change to the power state. Pairs with the smp_wmb() in
  664. * kvm_psci_vcpu_on().
  665. */
  666. smp_rmb();
  667. }
  668. /**
  669. * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
  670. * @vcpu: The VCPU pointer
  671. *
  672. * Suspend execution of a vCPU until a valid wake event is detected, i.e. until
  673. * the vCPU is runnable. The vCPU may or may not be scheduled out, depending
  674. * on when a wake event arrives, e.g. there may already be a pending wake event.
  675. */
  676. void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
  677. {
  678. /*
  679. * Sync back the state of the GIC CPU interface so that we have
  680. * the latest PMR and group enables. This ensures that
  681. * kvm_arch_vcpu_runnable has up-to-date data to decide whether
  682. * we have pending interrupts, e.g. when determining if the
  683. * vCPU should block.
  684. *
  685. * For the same reason, we want to tell GICv4 that we need
  686. * doorbells to be signalled, should an interrupt become pending.
  687. */
  688. preempt_disable();
  689. kvm_vgic_put(vcpu, true);
  690. vcpu_set_flag(vcpu, IN_WFI);
  691. vgic_v4_put(vcpu);
  692. preempt_enable();
  693. kvm_vcpu_halt(vcpu);
  694. vcpu_clear_flag(vcpu, IN_WFIT);
  695. preempt_disable();
  696. kvm_vgic_load(vcpu);
  697. vcpu_clear_flag(vcpu, IN_WFI);
  698. vgic_v4_load(vcpu);
  699. preempt_enable();
  700. }
  701. static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
  702. {
  703. if (!kvm_arm_vcpu_suspended(vcpu))
  704. return 1;
  705. kvm_vcpu_wfi(vcpu);
  706. /*
  707. * The suspend state is sticky; we do not leave it until userspace
  708. * explicitly marks the vCPU as runnable. Request that we suspend again
  709. * later.
  710. */
  711. kvm_make_request(KVM_REQ_SUSPEND, vcpu);
  712. /*
  713. * Check to make sure the vCPU is actually runnable. If so, exit to
  714. * userspace informing it of the wakeup condition.
  715. */
  716. if (kvm_arch_vcpu_runnable(vcpu)) {
  717. memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
  718. vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
  719. vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
  720. return 0;
  721. }
  722. /*
  723. * Otherwise, we were unblocked to process a different event, such as a
  724. * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to
  725. * process the event.
  726. */
  727. return 1;
  728. }
  729. /**
  730. * check_vcpu_requests - check and handle pending vCPU requests
  731. * @vcpu: the VCPU pointer
  732. *
  733. * Return: 1 if we should enter the guest
  734. * 0 if we should exit to userspace
  735. * < 0 if we should exit to userspace, where the return value indicates
  736. * an error
  737. */
  738. static int check_vcpu_requests(struct kvm_vcpu *vcpu)
  739. {
  740. if (kvm_request_pending(vcpu)) {
  741. if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
  742. kvm_vcpu_sleep(vcpu);
  743. if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
  744. kvm_reset_vcpu(vcpu);
  745. /*
  746. * Clear IRQ_PENDING requests that were made to guarantee
  747. * that a VCPU sees new virtual interrupts.
  748. */
  749. kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
  750. if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
  751. kvm_update_stolen_time(vcpu);
  752. if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
  753. /* The distributor enable bits were changed */
  754. preempt_disable();
  755. vgic_v4_put(vcpu);
  756. vgic_v4_load(vcpu);
  757. preempt_enable();
  758. }
  759. if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
  760. kvm_pmu_handle_pmcr(vcpu,
  761. __vcpu_sys_reg(vcpu, PMCR_EL0));
  762. if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
  763. return kvm_vcpu_suspend(vcpu);
  764. }
  765. return 1;
  766. }
  767. static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
  768. {
  769. if (likely(!vcpu_mode_is_32bit(vcpu)))
  770. return false;
  771. return !kvm_supports_32bit_el0();
  772. }
  773. /**
  774. * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
  775. * @vcpu: The VCPU pointer
  776. * @ret: Pointer to write optional return code
  777. *
  778. * Returns: true if the VCPU needs to return to a preemptible + interruptible
  779. * and skip guest entry.
  780. *
  781. * This function disambiguates between two different types of exits: exits to a
  782. * preemptible + interruptible kernel context and exits to userspace. For an
  783. * exit to userspace, this function will write the return code to ret and return
  784. * true. For an exit to preemptible + interruptible kernel context (i.e. check
  785. * for pending work and re-enter), return true without writing to ret.
  786. */
  787. static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
  788. {
  789. struct kvm_run *run = vcpu->run;
  790. /*
  791. * If we're using a userspace irqchip, then check if we need
  792. * to tell a userspace irqchip about timer or PMU level
  793. * changes and if so, exit to userspace (the actual level
  794. * state gets updated in kvm_timer_update_run and
  795. * kvm_pmu_update_run below).
  796. */
  797. if (static_branch_unlikely(&userspace_irqchip_in_use)) {
  798. if (kvm_timer_should_notify_user(vcpu) ||
  799. kvm_pmu_should_notify_user(vcpu)) {
  800. *ret = -EINTR;
  801. run->exit_reason = KVM_EXIT_INTR;
  802. return true;
  803. }
  804. }
  805. if (unlikely(vcpu_on_unsupported_cpu(vcpu))) {
  806. run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  807. run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED;
  808. run->fail_entry.cpu = smp_processor_id();
  809. *ret = 0;
  810. return true;
  811. }
  812. return kvm_request_pending(vcpu) ||
  813. xfer_to_guest_mode_work_pending();
  814. }
  815. /*
  816. * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
  817. * the vCPU is running.
  818. *
  819. * This must be noinstr as instrumentation may make use of RCU, and this is not
  820. * safe during the EQS.
  821. */
  822. static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
  823. {
  824. int ret;
  825. guest_state_enter_irqoff();
  826. ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
  827. guest_state_exit_irqoff();
  828. return ret;
  829. }
  830. /**
  831. * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  832. * @vcpu: The VCPU pointer
  833. *
  834. * This function is called through the VCPU_RUN ioctl called from user space. It
  835. * will execute VM code in a loop until the time slice for the process is used
  836. * or some emulation is needed from user space in which case the function will
  837. * return with return value 0 and with the kvm_run structure filled in with the
  838. * required data for the requested emulation.
  839. */
  840. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
  841. {
  842. struct kvm_run *run = vcpu->run;
  843. int ret;
  844. if (run->exit_reason == KVM_EXIT_MMIO) {
  845. ret = kvm_handle_mmio_return(vcpu);
  846. if (ret)
  847. return ret;
  848. }
  849. vcpu_load(vcpu);
  850. if (run->immediate_exit) {
  851. ret = -EINTR;
  852. goto out;
  853. }
  854. kvm_sigset_activate(vcpu);
  855. ret = 1;
  856. run->exit_reason = KVM_EXIT_UNKNOWN;
  857. run->flags = 0;
  858. while (ret > 0) {
  859. /*
  860. * Check conditions before entering the guest
  861. */
  862. ret = xfer_to_guest_mode_handle_work(vcpu);
  863. if (!ret)
  864. ret = 1;
  865. if (ret > 0)
  866. ret = check_vcpu_requests(vcpu);
  867. /*
  868. * Preparing the interrupts to be injected also
  869. * involves poking the GIC, which must be done in a
  870. * non-preemptible context.
  871. */
  872. preempt_disable();
  873. /*
  874. * The VMID allocator only tracks active VMIDs per
  875. * physical CPU, and therefore the VMID allocated may not be
  876. * preserved on VMID roll-over if the task was preempted,
  877. * making a thread's VMID inactive. So we need to call
  878. * kvm_arm_vmid_update() in non-premptible context.
  879. */
  880. kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
  881. kvm_pmu_flush_hwstate(vcpu);
  882. local_irq_disable();
  883. kvm_vgic_flush_hwstate(vcpu);
  884. kvm_pmu_update_vcpu_events(vcpu);
  885. /*
  886. * Ensure we set mode to IN_GUEST_MODE after we disable
  887. * interrupts and before the final VCPU requests check.
  888. * See the comment in kvm_vcpu_exiting_guest_mode() and
  889. * Documentation/virt/kvm/vcpu-requests.rst
  890. */
  891. smp_store_mb(vcpu->mode, IN_GUEST_MODE);
  892. if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
  893. vcpu->mode = OUTSIDE_GUEST_MODE;
  894. isb(); /* Ensure work in x_flush_hwstate is committed */
  895. kvm_pmu_sync_hwstate(vcpu);
  896. if (static_branch_unlikely(&userspace_irqchip_in_use))
  897. kvm_timer_sync_user(vcpu);
  898. kvm_vgic_sync_hwstate(vcpu);
  899. local_irq_enable();
  900. preempt_enable();
  901. continue;
  902. }
  903. kvm_arm_setup_debug(vcpu);
  904. kvm_arch_vcpu_ctxflush_fp(vcpu);
  905. /**************************************************************
  906. * Enter the guest
  907. */
  908. trace_kvm_entry(*vcpu_pc(vcpu));
  909. guest_timing_enter_irqoff();
  910. ret = kvm_arm_vcpu_enter_exit(vcpu);
  911. vcpu->mode = OUTSIDE_GUEST_MODE;
  912. vcpu->stat.exits++;
  913. /*
  914. * Back from guest
  915. *************************************************************/
  916. kvm_arm_clear_debug(vcpu);
  917. /*
  918. * We must sync the PMU state before the vgic state so
  919. * that the vgic can properly sample the updated state of the
  920. * interrupt line.
  921. */
  922. kvm_pmu_sync_hwstate(vcpu);
  923. /*
  924. * Sync the vgic state before syncing the timer state because
  925. * the timer code needs to know if the virtual timer
  926. * interrupts are active.
  927. */
  928. kvm_vgic_sync_hwstate(vcpu);
  929. /*
  930. * Sync the timer hardware state before enabling interrupts as
  931. * we don't want vtimer interrupts to race with syncing the
  932. * timer virtual interrupt state.
  933. */
  934. if (static_branch_unlikely(&userspace_irqchip_in_use))
  935. kvm_timer_sync_user(vcpu);
  936. kvm_arch_vcpu_ctxsync_fp(vcpu);
  937. /*
  938. * We must ensure that any pending interrupts are taken before
  939. * we exit guest timing so that timer ticks are accounted as
  940. * guest time. Transiently unmask interrupts so that any
  941. * pending interrupts are taken.
  942. *
  943. * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
  944. * context synchronization event) is necessary to ensure that
  945. * pending interrupts are taken.
  946. */
  947. if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) {
  948. local_irq_enable();
  949. isb();
  950. local_irq_disable();
  951. }
  952. guest_timing_exit_irqoff();
  953. local_irq_enable();
  954. trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
  955. /* Exit types that need handling before we can be preempted */
  956. handle_exit_early(vcpu, ret);
  957. preempt_enable();
  958. /*
  959. * The ARMv8 architecture doesn't give the hypervisor
  960. * a mechanism to prevent a guest from dropping to AArch32 EL0
  961. * if implemented by the CPU. If we spot the guest in such
  962. * state and that we decided it wasn't supposed to do so (like
  963. * with the asymmetric AArch32 case), return to userspace with
  964. * a fatal error.
  965. */
  966. if (vcpu_mode_is_bad_32bit(vcpu)) {
  967. /*
  968. * As we have caught the guest red-handed, decide that
  969. * it isn't fit for purpose anymore by making the vcpu
  970. * invalid. The VMM can try and fix it by issuing a
  971. * KVM_ARM_VCPU_INIT if it really wants to.
  972. */
  973. vcpu->arch.target = -1;
  974. ret = ARM_EXCEPTION_IL;
  975. }
  976. ret = handle_exit(vcpu, ret);
  977. }
  978. /* Tell userspace about in-kernel device output levels */
  979. if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
  980. kvm_timer_update_run(vcpu);
  981. kvm_pmu_update_run(vcpu);
  982. }
  983. kvm_sigset_deactivate(vcpu);
  984. out:
  985. /*
  986. * In the unlikely event that we are returning to userspace
  987. * with pending exceptions or PC adjustment, commit these
  988. * adjustments in order to give userspace a consistent view of
  989. * the vcpu state. Note that this relies on __kvm_adjust_pc()
  990. * being preempt-safe on VHE.
  991. */
  992. if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
  993. vcpu_get_flag(vcpu, INCREMENT_PC)))
  994. kvm_call_hyp(__kvm_adjust_pc, vcpu);
  995. vcpu_put(vcpu);
  996. return ret;
  997. }
  998. static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
  999. {
  1000. int bit_index;
  1001. bool set;
  1002. unsigned long *hcr;
  1003. if (number == KVM_ARM_IRQ_CPU_IRQ)
  1004. bit_index = __ffs(HCR_VI);
  1005. else /* KVM_ARM_IRQ_CPU_FIQ */
  1006. bit_index = __ffs(HCR_VF);
  1007. hcr = vcpu_hcr(vcpu);
  1008. if (level)
  1009. set = test_and_set_bit(bit_index, hcr);
  1010. else
  1011. set = test_and_clear_bit(bit_index, hcr);
  1012. /*
  1013. * If we didn't change anything, no need to wake up or kick other CPUs
  1014. */
  1015. if (set == level)
  1016. return 0;
  1017. /*
  1018. * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
  1019. * trigger a world-switch round on the running physical CPU to set the
  1020. * virtual IRQ/FIQ fields in the HCR appropriately.
  1021. */
  1022. kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
  1023. kvm_vcpu_kick(vcpu);
  1024. return 0;
  1025. }
  1026. int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
  1027. bool line_status)
  1028. {
  1029. u32 irq = irq_level->irq;
  1030. unsigned int irq_type, vcpu_idx, irq_num;
  1031. int nrcpus = atomic_read(&kvm->online_vcpus);
  1032. struct kvm_vcpu *vcpu = NULL;
  1033. bool level = irq_level->level;
  1034. irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
  1035. vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
  1036. vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
  1037. irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
  1038. trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
  1039. switch (irq_type) {
  1040. case KVM_ARM_IRQ_TYPE_CPU:
  1041. if (irqchip_in_kernel(kvm))
  1042. return -ENXIO;
  1043. if (vcpu_idx >= nrcpus)
  1044. return -EINVAL;
  1045. vcpu = kvm_get_vcpu(kvm, vcpu_idx);
  1046. if (!vcpu)
  1047. return -EINVAL;
  1048. if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
  1049. return -EINVAL;
  1050. return vcpu_interrupt_line(vcpu, irq_num, level);
  1051. case KVM_ARM_IRQ_TYPE_PPI:
  1052. if (!irqchip_in_kernel(kvm))
  1053. return -ENXIO;
  1054. if (vcpu_idx >= nrcpus)
  1055. return -EINVAL;
  1056. vcpu = kvm_get_vcpu(kvm, vcpu_idx);
  1057. if (!vcpu)
  1058. return -EINVAL;
  1059. if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
  1060. return -EINVAL;
  1061. return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
  1062. case KVM_ARM_IRQ_TYPE_SPI:
  1063. if (!irqchip_in_kernel(kvm))
  1064. return -ENXIO;
  1065. if (irq_num < VGIC_NR_PRIVATE_IRQS)
  1066. return -EINVAL;
  1067. return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
  1068. }
  1069. return -EINVAL;
  1070. }
  1071. static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
  1072. const struct kvm_vcpu_init *init)
  1073. {
  1074. unsigned int i, ret;
  1075. u32 phys_target = kvm_target_cpu();
  1076. if (init->target != phys_target)
  1077. return -EINVAL;
  1078. /*
  1079. * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
  1080. * use the same target.
  1081. */
  1082. if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
  1083. return -EINVAL;
  1084. /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
  1085. for (i = 0; i < sizeof(init->features) * 8; i++) {
  1086. bool set = (init->features[i / 32] & (1 << (i % 32)));
  1087. if (set && i >= KVM_VCPU_MAX_FEATURES)
  1088. return -ENOENT;
  1089. /*
  1090. * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
  1091. * use the same feature set.
  1092. */
  1093. if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
  1094. test_bit(i, vcpu->arch.features) != set)
  1095. return -EINVAL;
  1096. if (set)
  1097. set_bit(i, vcpu->arch.features);
  1098. }
  1099. vcpu->arch.target = phys_target;
  1100. /* Now we know what it is, we can reset it. */
  1101. ret = kvm_reset_vcpu(vcpu);
  1102. if (ret) {
  1103. vcpu->arch.target = -1;
  1104. bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
  1105. }
  1106. return ret;
  1107. }
  1108. static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
  1109. struct kvm_vcpu_init *init)
  1110. {
  1111. int ret;
  1112. ret = kvm_vcpu_set_target(vcpu, init);
  1113. if (ret)
  1114. return ret;
  1115. /*
  1116. * Ensure a rebooted VM will fault in RAM pages and detect if the
  1117. * guest MMU is turned off and flush the caches as needed.
  1118. *
  1119. * S2FWB enforces all memory accesses to RAM being cacheable,
  1120. * ensuring that the data side is always coherent. We still
  1121. * need to invalidate the I-cache though, as FWB does *not*
  1122. * imply CTR_EL0.DIC.
  1123. */
  1124. if (vcpu_has_run_once(vcpu)) {
  1125. if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
  1126. stage2_unmap_vm(vcpu->kvm);
  1127. else
  1128. icache_inval_all_pou();
  1129. }
  1130. vcpu_reset_hcr(vcpu);
  1131. vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
  1132. /*
  1133. * Handle the "start in power-off" case.
  1134. */
  1135. if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
  1136. kvm_arm_vcpu_power_off(vcpu);
  1137. else
  1138. WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
  1139. return 0;
  1140. }
  1141. static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
  1142. struct kvm_device_attr *attr)
  1143. {
  1144. int ret = -ENXIO;
  1145. switch (attr->group) {
  1146. default:
  1147. ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
  1148. break;
  1149. }
  1150. return ret;
  1151. }
  1152. static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
  1153. struct kvm_device_attr *attr)
  1154. {
  1155. int ret = -ENXIO;
  1156. switch (attr->group) {
  1157. default:
  1158. ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
  1159. break;
  1160. }
  1161. return ret;
  1162. }
  1163. static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
  1164. struct kvm_device_attr *attr)
  1165. {
  1166. int ret = -ENXIO;
  1167. switch (attr->group) {
  1168. default:
  1169. ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
  1170. break;
  1171. }
  1172. return ret;
  1173. }
  1174. static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
  1175. struct kvm_vcpu_events *events)
  1176. {
  1177. memset(events, 0, sizeof(*events));
  1178. return __kvm_arm_vcpu_get_events(vcpu, events);
  1179. }
  1180. static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
  1181. struct kvm_vcpu_events *events)
  1182. {
  1183. int i;
  1184. /* check whether the reserved field is zero */
  1185. for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
  1186. if (events->reserved[i])
  1187. return -EINVAL;
  1188. /* check whether the pad field is zero */
  1189. for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
  1190. if (events->exception.pad[i])
  1191. return -EINVAL;
  1192. return __kvm_arm_vcpu_set_events(vcpu, events);
  1193. }
  1194. long kvm_arch_vcpu_ioctl(struct file *filp,
  1195. unsigned int ioctl, unsigned long arg)
  1196. {
  1197. struct kvm_vcpu *vcpu = filp->private_data;
  1198. void __user *argp = (void __user *)arg;
  1199. struct kvm_device_attr attr;
  1200. long r;
  1201. switch (ioctl) {
  1202. case KVM_ARM_VCPU_INIT: {
  1203. struct kvm_vcpu_init init;
  1204. r = -EFAULT;
  1205. if (copy_from_user(&init, argp, sizeof(init)))
  1206. break;
  1207. r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
  1208. break;
  1209. }
  1210. case KVM_SET_ONE_REG:
  1211. case KVM_GET_ONE_REG: {
  1212. struct kvm_one_reg reg;
  1213. r = -ENOEXEC;
  1214. if (unlikely(!kvm_vcpu_initialized(vcpu)))
  1215. break;
  1216. r = -EFAULT;
  1217. if (copy_from_user(&reg, argp, sizeof(reg)))
  1218. break;
  1219. /*
  1220. * We could owe a reset due to PSCI. Handle the pending reset
  1221. * here to ensure userspace register accesses are ordered after
  1222. * the reset.
  1223. */
  1224. if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
  1225. kvm_reset_vcpu(vcpu);
  1226. if (ioctl == KVM_SET_ONE_REG)
  1227. r = kvm_arm_set_reg(vcpu, &reg);
  1228. else
  1229. r = kvm_arm_get_reg(vcpu, &reg);
  1230. break;
  1231. }
  1232. case KVM_GET_REG_LIST: {
  1233. struct kvm_reg_list __user *user_list = argp;
  1234. struct kvm_reg_list reg_list;
  1235. unsigned n;
  1236. r = -ENOEXEC;
  1237. if (unlikely(!kvm_vcpu_initialized(vcpu)))
  1238. break;
  1239. r = -EPERM;
  1240. if (!kvm_arm_vcpu_is_finalized(vcpu))
  1241. break;
  1242. r = -EFAULT;
  1243. if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
  1244. break;
  1245. n = reg_list.n;
  1246. reg_list.n = kvm_arm_num_regs(vcpu);
  1247. if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
  1248. break;
  1249. r = -E2BIG;
  1250. if (n < reg_list.n)
  1251. break;
  1252. r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
  1253. break;
  1254. }
  1255. case KVM_SET_DEVICE_ATTR: {
  1256. r = -EFAULT;
  1257. if (copy_from_user(&attr, argp, sizeof(attr)))
  1258. break;
  1259. r = kvm_arm_vcpu_set_attr(vcpu, &attr);
  1260. break;
  1261. }
  1262. case KVM_GET_DEVICE_ATTR: {
  1263. r = -EFAULT;
  1264. if (copy_from_user(&attr, argp, sizeof(attr)))
  1265. break;
  1266. r = kvm_arm_vcpu_get_attr(vcpu, &attr);
  1267. break;
  1268. }
  1269. case KVM_HAS_DEVICE_ATTR: {
  1270. r = -EFAULT;
  1271. if (copy_from_user(&attr, argp, sizeof(attr)))
  1272. break;
  1273. r = kvm_arm_vcpu_has_attr(vcpu, &attr);
  1274. break;
  1275. }
  1276. case KVM_GET_VCPU_EVENTS: {
  1277. struct kvm_vcpu_events events;
  1278. if (kvm_arm_vcpu_get_events(vcpu, &events))
  1279. return -EINVAL;
  1280. if (copy_to_user(argp, &events, sizeof(events)))
  1281. return -EFAULT;
  1282. return 0;
  1283. }
  1284. case KVM_SET_VCPU_EVENTS: {
  1285. struct kvm_vcpu_events events;
  1286. if (copy_from_user(&events, argp, sizeof(events)))
  1287. return -EFAULT;
  1288. return kvm_arm_vcpu_set_events(vcpu, &events);
  1289. }
  1290. case KVM_ARM_VCPU_FINALIZE: {
  1291. int what;
  1292. if (!kvm_vcpu_initialized(vcpu))
  1293. return -ENOEXEC;
  1294. if (get_user(what, (const int __user *)argp))
  1295. return -EFAULT;
  1296. return kvm_arm_vcpu_finalize(vcpu, what);
  1297. }
  1298. default:
  1299. r = -EINVAL;
  1300. }
  1301. return r;
  1302. }
  1303. void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
  1304. {
  1305. }
  1306. void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
  1307. const struct kvm_memory_slot *memslot)
  1308. {
  1309. kvm_flush_remote_tlbs(kvm);
  1310. }
  1311. static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
  1312. struct kvm_arm_device_addr *dev_addr)
  1313. {
  1314. switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
  1315. case KVM_ARM_DEVICE_VGIC_V2:
  1316. if (!vgic_present)
  1317. return -ENXIO;
  1318. return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
  1319. default:
  1320. return -ENODEV;
  1321. }
  1322. }
  1323. long kvm_arch_vm_ioctl(struct file *filp,
  1324. unsigned int ioctl, unsigned long arg)
  1325. {
  1326. struct kvm *kvm = filp->private_data;
  1327. void __user *argp = (void __user *)arg;
  1328. switch (ioctl) {
  1329. case KVM_CREATE_IRQCHIP: {
  1330. int ret;
  1331. if (!vgic_present)
  1332. return -ENXIO;
  1333. mutex_lock(&kvm->lock);
  1334. ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
  1335. mutex_unlock(&kvm->lock);
  1336. return ret;
  1337. }
  1338. case KVM_ARM_SET_DEVICE_ADDR: {
  1339. struct kvm_arm_device_addr dev_addr;
  1340. if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
  1341. return -EFAULT;
  1342. return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
  1343. }
  1344. case KVM_ARM_PREFERRED_TARGET: {
  1345. struct kvm_vcpu_init init;
  1346. kvm_vcpu_preferred_target(&init);
  1347. if (copy_to_user(argp, &init, sizeof(init)))
  1348. return -EFAULT;
  1349. return 0;
  1350. }
  1351. case KVM_ARM_MTE_COPY_TAGS: {
  1352. struct kvm_arm_copy_mte_tags copy_tags;
  1353. if (copy_from_user(&copy_tags, argp, sizeof(copy_tags)))
  1354. return -EFAULT;
  1355. return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
  1356. }
  1357. default:
  1358. return -EINVAL;
  1359. }
  1360. }
  1361. static unsigned long nvhe_percpu_size(void)
  1362. {
  1363. return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
  1364. (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
  1365. }
  1366. static unsigned long nvhe_percpu_order(void)
  1367. {
  1368. unsigned long size = nvhe_percpu_size();
  1369. return size ? get_order(size) : 0;
  1370. }
  1371. static inline size_t pkvm_host_fp_state_order(void)
  1372. {
  1373. return get_order(pkvm_host_fp_state_size());
  1374. }
  1375. /* A lookup table holding the hypervisor VA for each vector slot */
  1376. static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
  1377. static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
  1378. {
  1379. hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot);
  1380. }
  1381. static int kvm_init_vector_slots(void)
  1382. {
  1383. int err;
  1384. void *base;
  1385. base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
  1386. kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
  1387. base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
  1388. kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
  1389. if (kvm_system_needs_idmapped_vectors() &&
  1390. !is_protected_kvm_enabled()) {
  1391. err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
  1392. __BP_HARDEN_HYP_VECS_SZ, &base);
  1393. if (err)
  1394. return err;
  1395. }
  1396. kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
  1397. kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
  1398. return 0;
  1399. }
  1400. static void cpu_prepare_hyp_mode(int cpu)
  1401. {
  1402. struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
  1403. unsigned long tcr;
  1404. int *hyp_cpu_number_ptr = per_cpu_ptr_nvhe_sym(hyp_cpu_number, cpu);
  1405. *hyp_cpu_number_ptr = cpu;
  1406. /*
  1407. * Calculate the raw per-cpu offset without a translation from the
  1408. * kernel's mapping to the linear mapping, and store it in tpidr_el2
  1409. * so that we can use adr_l to access per-cpu variables in EL2.
  1410. * Also drop the KASAN tag which gets in the way...
  1411. */
  1412. params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
  1413. (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
  1414. params->mair_el2 = read_sysreg(mair_el1);
  1415. /*
  1416. * The ID map may be configured to use an extended virtual address
  1417. * range. This is only the case if system RAM is out of range for the
  1418. * currently configured page size and VA_BITS, in which case we will
  1419. * also need the extended virtual range for the HYP ID map, or we won't
  1420. * be able to enable the EL2 MMU.
  1421. *
  1422. * However, at EL2, there is only one TTBR register, and we can't switch
  1423. * between translation tables *and* update TCR_EL2.T0SZ at the same
  1424. * time. Bottom line: we need to use the extended range with *both* our
  1425. * translation tables.
  1426. *
  1427. * So use the same T0SZ value we use for the ID map.
  1428. */
  1429. tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1;
  1430. tcr &= ~TCR_T0SZ_MASK;
  1431. tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
  1432. params->tcr_el2 = tcr;
  1433. params->pgd_pa = kvm_mmu_get_httbr();
  1434. if (is_protected_kvm_enabled())
  1435. params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
  1436. else
  1437. params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
  1438. params->vttbr = params->vtcr = 0;
  1439. params->hfgwtr_el2 = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
  1440. /*
  1441. * Flush the init params from the data cache because the struct will
  1442. * be read while the MMU is off.
  1443. */
  1444. kvm_flush_dcache_to_poc(params, sizeof(*params));
  1445. }
  1446. static void hyp_install_host_vector(void)
  1447. {
  1448. struct kvm_nvhe_init_params *params;
  1449. struct arm_smccc_res res;
  1450. /* Switch from the HYP stub to our own HYP init vector */
  1451. __hyp_set_vectors(kvm_get_idmap_vector());
  1452. /*
  1453. * Call initialization code, and switch to the full blown HYP code.
  1454. * If the cpucaps haven't been finalized yet, something has gone very
  1455. * wrong, and hyp will crash and burn when it uses any
  1456. * cpus_have_const_cap() wrapper.
  1457. */
  1458. BUG_ON(!system_capabilities_finalized());
  1459. params = this_cpu_ptr_nvhe_sym(kvm_init_params);
  1460. arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
  1461. WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
  1462. }
  1463. static void cpu_init_hyp_mode(void)
  1464. {
  1465. hyp_install_host_vector();
  1466. /*
  1467. * Disabling SSBD on a non-VHE system requires us to enable SSBS
  1468. * at EL2.
  1469. */
  1470. if (this_cpu_has_cap(ARM64_SSBS) &&
  1471. arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
  1472. kvm_call_hyp_nvhe(__kvm_enable_ssbs);
  1473. }
  1474. }
  1475. static void cpu_hyp_reset(void)
  1476. {
  1477. if (!is_kernel_in_hyp_mode())
  1478. __hyp_reset_vectors();
  1479. }
  1480. /*
  1481. * EL2 vectors can be mapped and rerouted in a number of ways,
  1482. * depending on the kernel configuration and CPU present:
  1483. *
  1484. * - If the CPU is affected by Spectre-v2, the hardening sequence is
  1485. * placed in one of the vector slots, which is executed before jumping
  1486. * to the real vectors.
  1487. *
  1488. * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
  1489. * containing the hardening sequence is mapped next to the idmap page,
  1490. * and executed before jumping to the real vectors.
  1491. *
  1492. * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
  1493. * empty slot is selected, mapped next to the idmap page, and
  1494. * executed before jumping to the real vectors.
  1495. *
  1496. * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
  1497. * VHE, as we don't have hypervisor-specific mappings. If the system
  1498. * is VHE and yet selects this capability, it will be ignored.
  1499. */
  1500. static void cpu_set_hyp_vector(void)
  1501. {
  1502. struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
  1503. void *vector = hyp_spectre_vector_selector[data->slot];
  1504. if (!is_protected_kvm_enabled())
  1505. *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
  1506. else
  1507. kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
  1508. }
  1509. static void cpu_hyp_init_context(void)
  1510. {
  1511. kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
  1512. if (!is_kernel_in_hyp_mode())
  1513. cpu_init_hyp_mode();
  1514. }
  1515. static void cpu_hyp_init_features(void)
  1516. {
  1517. cpu_set_hyp_vector();
  1518. kvm_arm_init_debug();
  1519. if (is_kernel_in_hyp_mode())
  1520. kvm_timer_init_vhe();
  1521. if (vgic_present)
  1522. kvm_vgic_init_cpu_hardware();
  1523. }
  1524. static void cpu_hyp_reinit(void)
  1525. {
  1526. cpu_hyp_reset();
  1527. cpu_hyp_init_context();
  1528. cpu_hyp_init_features();
  1529. }
  1530. static void _kvm_arch_hardware_enable(void *discard)
  1531. {
  1532. if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
  1533. cpu_hyp_reinit();
  1534. __this_cpu_write(kvm_arm_hardware_enabled, 1);
  1535. }
  1536. }
  1537. int kvm_arch_hardware_enable(void)
  1538. {
  1539. _kvm_arch_hardware_enable(NULL);
  1540. return 0;
  1541. }
  1542. static void _kvm_arch_hardware_disable(void *discard)
  1543. {
  1544. if (__this_cpu_read(kvm_arm_hardware_enabled)) {
  1545. cpu_hyp_reset();
  1546. __this_cpu_write(kvm_arm_hardware_enabled, 0);
  1547. }
  1548. }
  1549. void kvm_arch_hardware_disable(void)
  1550. {
  1551. if (!is_protected_kvm_enabled())
  1552. _kvm_arch_hardware_disable(NULL);
  1553. }
  1554. #ifdef CONFIG_CPU_PM
  1555. static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
  1556. unsigned long cmd,
  1557. void *v)
  1558. {
  1559. /*
  1560. * kvm_arm_hardware_enabled is left with its old value over
  1561. * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
  1562. * re-enable hyp.
  1563. */
  1564. switch (cmd) {
  1565. case CPU_PM_ENTER:
  1566. if (__this_cpu_read(kvm_arm_hardware_enabled))
  1567. /*
  1568. * don't update kvm_arm_hardware_enabled here
  1569. * so that the hardware will be re-enabled
  1570. * when we resume. See below.
  1571. */
  1572. cpu_hyp_reset();
  1573. return NOTIFY_OK;
  1574. case CPU_PM_ENTER_FAILED:
  1575. case CPU_PM_EXIT:
  1576. if (__this_cpu_read(kvm_arm_hardware_enabled))
  1577. /* The hardware was enabled before suspend. */
  1578. cpu_hyp_reinit();
  1579. return NOTIFY_OK;
  1580. default:
  1581. return NOTIFY_DONE;
  1582. }
  1583. }
  1584. static struct notifier_block hyp_init_cpu_pm_nb = {
  1585. .notifier_call = hyp_init_cpu_pm_notifier,
  1586. };
  1587. static void hyp_cpu_pm_init(void)
  1588. {
  1589. if (!is_protected_kvm_enabled())
  1590. cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
  1591. }
  1592. static void hyp_cpu_pm_exit(void)
  1593. {
  1594. if (!is_protected_kvm_enabled())
  1595. cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
  1596. }
  1597. #else
  1598. static inline void hyp_cpu_pm_init(void)
  1599. {
  1600. }
  1601. static inline void hyp_cpu_pm_exit(void)
  1602. {
  1603. }
  1604. #endif
  1605. static void init_cpu_logical_map(void)
  1606. {
  1607. unsigned int cpu;
  1608. /*
  1609. * Copy the MPIDR <-> logical CPU ID mapping to hyp.
  1610. * Only copy the set of online CPUs whose features have been checked
  1611. * against the finalized system capabilities. The hypervisor will not
  1612. * allow any other CPUs from the `possible` set to boot.
  1613. */
  1614. for_each_online_cpu(cpu)
  1615. hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
  1616. }
  1617. #define init_psci_0_1_impl_state(config, what) \
  1618. config.psci_0_1_ ## what ## _implemented = psci_ops.what
  1619. static bool init_psci_relay(void)
  1620. {
  1621. /*
  1622. * If PSCI has not been initialized, protected KVM cannot install
  1623. * itself on newly booted CPUs.
  1624. */
  1625. if (!psci_ops.get_version) {
  1626. kvm_err("Cannot initialize protected mode without PSCI\n");
  1627. return false;
  1628. }
  1629. kvm_host_psci_config.version = psci_ops.get_version();
  1630. kvm_host_psci_config.smccc_version = arm_smccc_get_version();
  1631. if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
  1632. kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
  1633. init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
  1634. init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
  1635. init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
  1636. init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
  1637. }
  1638. return true;
  1639. }
  1640. static int init_subsystems(void)
  1641. {
  1642. int err = 0;
  1643. /*
  1644. * Enable hardware so that subsystem initialisation can access EL2.
  1645. */
  1646. on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
  1647. /*
  1648. * Register CPU lower-power notifier
  1649. */
  1650. hyp_cpu_pm_init();
  1651. /*
  1652. * Init HYP view of VGIC
  1653. */
  1654. err = kvm_vgic_hyp_init();
  1655. switch (err) {
  1656. case 0:
  1657. vgic_present = true;
  1658. break;
  1659. case -ENODEV:
  1660. case -ENXIO:
  1661. vgic_present = false;
  1662. err = 0;
  1663. break;
  1664. default:
  1665. goto out;
  1666. }
  1667. /*
  1668. * Init HYP architected timer support
  1669. */
  1670. err = kvm_timer_hyp_init(vgic_present);
  1671. if (err)
  1672. goto out;
  1673. kvm_register_perf_callbacks(NULL);
  1674. out:
  1675. if (err || !is_protected_kvm_enabled())
  1676. on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
  1677. return err;
  1678. }
  1679. static void teardown_hyp_mode(void)
  1680. {
  1681. int cpu;
  1682. free_hyp_pgds();
  1683. for_each_possible_cpu(cpu) {
  1684. free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
  1685. free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
  1686. free_pages(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu],
  1687. pkvm_host_fp_state_order());
  1688. }
  1689. }
  1690. static int do_pkvm_init(u32 hyp_va_bits)
  1691. {
  1692. void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
  1693. int ret;
  1694. preempt_disable();
  1695. cpu_hyp_init_context();
  1696. ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
  1697. num_possible_cpus(), kern_hyp_va(per_cpu_base),
  1698. hyp_va_bits);
  1699. cpu_hyp_init_features();
  1700. /*
  1701. * The stub hypercalls are now disabled, so set our local flag to
  1702. * prevent a later re-init attempt in kvm_arch_hardware_enable().
  1703. */
  1704. __this_cpu_write(kvm_arm_hardware_enabled, 1);
  1705. preempt_enable();
  1706. return ret;
  1707. }
  1708. static u64 get_hyp_id_aa64pfr0_el1(void)
  1709. {
  1710. /*
  1711. * Track whether the system isn't affected by spectre/meltdown in the
  1712. * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
  1713. * Although this is per-CPU, we make it global for simplicity, e.g., not
  1714. * to have to worry about vcpu migration.
  1715. *
  1716. * Unlike for non-protected VMs, userspace cannot override this for
  1717. * protected VMs.
  1718. */
  1719. u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  1720. val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
  1721. ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
  1722. val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
  1723. arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
  1724. val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
  1725. arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
  1726. return val;
  1727. }
  1728. static void kvm_hyp_init_symbols(void)
  1729. {
  1730. kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
  1731. kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
  1732. kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
  1733. kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
  1734. kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
  1735. kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
  1736. kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
  1737. kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
  1738. kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
  1739. kvm_nvhe_sym(__icache_flags) = __icache_flags;
  1740. kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
  1741. kvm_nvhe_sym(smccc_trng_available) = smccc_trng_available;
  1742. kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
  1743. }
  1744. int kvm_hyp_init_events(void);
  1745. static int kvm_hyp_init_protection(u32 hyp_va_bits)
  1746. {
  1747. void *addr = phys_to_virt(hyp_mem_base);
  1748. int ret;
  1749. ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
  1750. if (ret)
  1751. return ret;
  1752. ret = do_pkvm_init(hyp_va_bits);
  1753. if (ret)
  1754. return ret;
  1755. free_hyp_pgds();
  1756. return 0;
  1757. }
  1758. static int init_pkvm_host_fp_state(void)
  1759. {
  1760. int cpu;
  1761. if (!is_protected_kvm_enabled())
  1762. return 0;
  1763. /* Allocate pages for protected-mode host-fp state. */
  1764. for_each_possible_cpu(cpu) {
  1765. struct page *page;
  1766. unsigned long addr;
  1767. page = alloc_pages(GFP_KERNEL, pkvm_host_fp_state_order());
  1768. if (!page)
  1769. return -ENOMEM;
  1770. addr = (unsigned long)page_address(page);
  1771. kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu] = addr;
  1772. }
  1773. /*
  1774. * Don't map the pages in hyp since these are only used in protected
  1775. * mode, which will (re)create its own mapping when initialized.
  1776. */
  1777. return 0;
  1778. }
  1779. /*
  1780. * Finalizes the initialization of hyp mode, once everything else is initialized
  1781. * and the initialziation process cannot fail.
  1782. */
  1783. static void finalize_init_hyp_mode(void)
  1784. {
  1785. int cpu;
  1786. for_each_possible_cpu(cpu) {
  1787. kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu] =
  1788. kern_hyp_va(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu]);
  1789. }
  1790. }
  1791. /**
  1792. * Inits Hyp-mode on all online CPUs
  1793. */
  1794. static int init_hyp_mode(void)
  1795. {
  1796. u32 hyp_va_bits;
  1797. int cpu;
  1798. int err = -ENOMEM;
  1799. /*
  1800. * The protected Hyp-mode cannot be initialized if the memory pool
  1801. * allocation has failed.
  1802. */
  1803. if (is_protected_kvm_enabled() && !hyp_mem_base)
  1804. goto out_err;
  1805. /*
  1806. * Allocate Hyp PGD and setup Hyp identity mapping
  1807. */
  1808. err = kvm_mmu_init(&hyp_va_bits);
  1809. if (err)
  1810. goto out_err;
  1811. /*
  1812. * Allocate stack pages for Hypervisor-mode
  1813. */
  1814. for_each_possible_cpu(cpu) {
  1815. unsigned long stack_base;
  1816. stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
  1817. if (!stack_base) {
  1818. err = -ENOMEM;
  1819. goto out_err;
  1820. }
  1821. per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
  1822. }
  1823. /*
  1824. * Allocate and initialize pages for Hypervisor-mode percpu regions.
  1825. */
  1826. for_each_possible_cpu(cpu) {
  1827. struct page *page;
  1828. void *page_addr;
  1829. page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
  1830. if (!page) {
  1831. err = -ENOMEM;
  1832. goto out_err;
  1833. }
  1834. page_addr = page_address(page);
  1835. memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
  1836. kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
  1837. }
  1838. /*
  1839. * Map the Hyp-code called directly from the host
  1840. */
  1841. err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
  1842. kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
  1843. if (err) {
  1844. kvm_err("Cannot map world-switch code\n");
  1845. goto out_err;
  1846. }
  1847. err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_start),
  1848. kvm_ksym_ref(__hyp_data_end), PAGE_HYP);
  1849. if (err) {
  1850. kvm_err("Cannot map .hyp.data section\n");
  1851. goto out_err;
  1852. }
  1853. err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
  1854. kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
  1855. if (err) {
  1856. kvm_err("Cannot map .hyp.rodata section\n");
  1857. goto out_err;
  1858. }
  1859. err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
  1860. kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
  1861. if (err) {
  1862. kvm_err("Cannot map rodata section\n");
  1863. goto out_err;
  1864. }
  1865. /*
  1866. * .hyp.bss is guaranteed to be placed at the beginning of the .bss
  1867. * section thanks to an assertion in the linker script. Map it RW and
  1868. * the rest of .bss RO.
  1869. */
  1870. err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start),
  1871. kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
  1872. if (err) {
  1873. kvm_err("Cannot map hyp bss section: %d\n", err);
  1874. goto out_err;
  1875. }
  1876. err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
  1877. kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
  1878. if (err) {
  1879. kvm_err("Cannot map bss section\n");
  1880. goto out_err;
  1881. }
  1882. /*
  1883. * Map the Hyp stack pages
  1884. */
  1885. for_each_possible_cpu(cpu) {
  1886. struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
  1887. char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
  1888. unsigned long hyp_addr;
  1889. /*
  1890. * Allocate a contiguous HYP private VA range for the stack
  1891. * and guard page. The allocation is also aligned based on
  1892. * the order of its size.
  1893. */
  1894. err = hyp_alloc_private_va_range(NVHE_STACK_SIZE * 2, &hyp_addr);
  1895. if (err) {
  1896. kvm_err("Cannot allocate hyp stack guard page\n");
  1897. goto out_err;
  1898. }
  1899. /*
  1900. * Since the stack grows downwards, map the stack to the page
  1901. * at the higher address and leave the lower guard page
  1902. * unbacked.
  1903. *
  1904. * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
  1905. * and addresses corresponding to the guard page have the
  1906. * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
  1907. */
  1908. err = __create_hyp_mappings(hyp_addr + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
  1909. __pa(stack_base), PAGE_HYP);
  1910. if (err) {
  1911. kvm_err("Cannot map hyp stack\n");
  1912. goto out_err;
  1913. }
  1914. /*
  1915. * Save the stack PA in nvhe_init_params. This will be needed
  1916. * to recreate the stack mapping in protected nVHE mode.
  1917. * __hyp_pa() won't do the right thing there, since the stack
  1918. * has been mapped in the flexible private VA space.
  1919. */
  1920. params->stack_pa = __pa(stack_base);
  1921. params->stack_hyp_va = hyp_addr + (2 * NVHE_STACK_SIZE);
  1922. }
  1923. for_each_possible_cpu(cpu) {
  1924. char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
  1925. char *percpu_end = percpu_begin + nvhe_percpu_size();
  1926. /* Map Hyp percpu pages */
  1927. err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
  1928. if (err) {
  1929. kvm_err("Cannot map hyp percpu region\n");
  1930. goto out_err;
  1931. }
  1932. /* Prepare the CPU initialization parameters */
  1933. cpu_prepare_hyp_mode(cpu);
  1934. }
  1935. err = init_pkvm_host_fp_state();
  1936. if (err)
  1937. goto out_err;
  1938. kvm_hyp_init_symbols();
  1939. /* TODO: Real .h interface */
  1940. #ifdef CONFIG_TRACING
  1941. kvm_hyp_init_events();
  1942. #endif
  1943. if (is_protected_kvm_enabled()) {
  1944. init_cpu_logical_map();
  1945. if (!init_psci_relay()) {
  1946. err = -ENODEV;
  1947. goto out_err;
  1948. }
  1949. err = kvm_hyp_init_protection(hyp_va_bits);
  1950. if (err) {
  1951. kvm_err("Failed to init hyp memory protection\n");
  1952. goto out_err;
  1953. }
  1954. }
  1955. return 0;
  1956. out_err:
  1957. teardown_hyp_mode();
  1958. kvm_err("error initializing Hyp mode: %d\n", err);
  1959. return err;
  1960. }
  1961. struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
  1962. {
  1963. struct kvm_vcpu *vcpu;
  1964. unsigned long i;
  1965. mpidr &= MPIDR_HWID_BITMASK;
  1966. kvm_for_each_vcpu(i, vcpu, kvm) {
  1967. if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
  1968. return vcpu;
  1969. }
  1970. return NULL;
  1971. }
  1972. bool kvm_arch_has_irq_bypass(void)
  1973. {
  1974. return true;
  1975. }
  1976. int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
  1977. struct irq_bypass_producer *prod)
  1978. {
  1979. struct kvm_kernel_irqfd *irqfd =
  1980. container_of(cons, struct kvm_kernel_irqfd, consumer);
  1981. return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
  1982. &irqfd->irq_entry);
  1983. }
  1984. void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
  1985. struct irq_bypass_producer *prod)
  1986. {
  1987. struct kvm_kernel_irqfd *irqfd =
  1988. container_of(cons, struct kvm_kernel_irqfd, consumer);
  1989. kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
  1990. &irqfd->irq_entry);
  1991. }
  1992. void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
  1993. {
  1994. struct kvm_kernel_irqfd *irqfd =
  1995. container_of(cons, struct kvm_kernel_irqfd, consumer);
  1996. kvm_arm_halt_guest(irqfd->kvm);
  1997. }
  1998. void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
  1999. {
  2000. struct kvm_kernel_irqfd *irqfd =
  2001. container_of(cons, struct kvm_kernel_irqfd, consumer);
  2002. kvm_arm_resume_guest(irqfd->kvm);
  2003. }
  2004. /**
  2005. * Initialize Hyp-mode and memory mappings on all CPUs.
  2006. */
  2007. int kvm_arch_init(void *opaque)
  2008. {
  2009. int err;
  2010. bool in_hyp_mode;
  2011. if (!is_hyp_mode_available()) {
  2012. kvm_info("HYP mode not available\n");
  2013. return -ENODEV;
  2014. }
  2015. if (kvm_get_mode() == KVM_MODE_NONE) {
  2016. kvm_info("KVM disabled from command line\n");
  2017. return -ENODEV;
  2018. }
  2019. err = kvm_sys_reg_table_init();
  2020. if (err) {
  2021. kvm_info("Error initializing system register tables");
  2022. return err;
  2023. }
  2024. in_hyp_mode = is_kernel_in_hyp_mode();
  2025. if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
  2026. cpus_have_final_cap(ARM64_WORKAROUND_1508412))
  2027. kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
  2028. "Only trusted guests should be used on this system.\n");
  2029. err = kvm_set_ipa_limit();
  2030. if (err)
  2031. return err;
  2032. err = kvm_arm_init_sve();
  2033. if (err)
  2034. return err;
  2035. err = kvm_arm_vmid_alloc_init();
  2036. if (err) {
  2037. kvm_err("Failed to initialize VMID allocator.\n");
  2038. return err;
  2039. }
  2040. if (!in_hyp_mode) {
  2041. err = init_hyp_mode();
  2042. if (err)
  2043. goto out_err;
  2044. }
  2045. err = kvm_init_vector_slots();
  2046. if (err) {
  2047. kvm_err("Cannot initialise vector slots\n");
  2048. goto out_err;
  2049. }
  2050. err = init_subsystems();
  2051. if (err)
  2052. goto out_hyp;
  2053. if (!in_hyp_mode) {
  2054. err = init_hyp_tracefs();
  2055. if (err)
  2056. kvm_err("Failed to initialize Hyp tracing\n");
  2057. }
  2058. if (is_protected_kvm_enabled()) {
  2059. kvm_info("Protected nVHE mode initialized successfully\n");
  2060. } else if (in_hyp_mode) {
  2061. kvm_info("VHE mode initialized successfully\n");
  2062. } else {
  2063. kvm_info("Hyp mode initialized successfully\n");
  2064. }
  2065. /*
  2066. * This should be called after initialization is done and failure isn't
  2067. * possible anymore.
  2068. */
  2069. if (!in_hyp_mode)
  2070. finalize_init_hyp_mode();
  2071. return 0;
  2072. out_hyp:
  2073. hyp_cpu_pm_exit();
  2074. if (!in_hyp_mode)
  2075. teardown_hyp_mode();
  2076. out_err:
  2077. kvm_arm_vmid_alloc_free();
  2078. return err;
  2079. }
  2080. /* NOP: Compiling as a module not supported */
  2081. void kvm_arch_exit(void)
  2082. {
  2083. kvm_unregister_perf_callbacks();
  2084. }
  2085. static int __init early_kvm_mode_cfg(char *arg)
  2086. {
  2087. if (!arg)
  2088. return -EINVAL;
  2089. if (strcmp(arg, "none") == 0) {
  2090. kvm_mode = KVM_MODE_NONE;
  2091. return 0;
  2092. }
  2093. if (!is_hyp_mode_available()) {
  2094. pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
  2095. return 0;
  2096. }
  2097. if (strcmp(arg, "protected") == 0) {
  2098. if (!is_kernel_in_hyp_mode())
  2099. kvm_mode = KVM_MODE_PROTECTED;
  2100. else
  2101. pr_warn_once("Protected KVM not available with VHE\n");
  2102. return 0;
  2103. }
  2104. if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
  2105. kvm_mode = KVM_MODE_DEFAULT;
  2106. return 0;
  2107. }
  2108. return -EINVAL;
  2109. }
  2110. early_param("kvm-arm.mode", early_kvm_mode_cfg);
  2111. enum kvm_mode kvm_get_mode(void)
  2112. {
  2113. return kvm_mode;
  2114. }
  2115. static int arm_init(void)
  2116. {
  2117. int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  2118. return rc;
  2119. }
  2120. module_init(arm_init);