guest.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012,2013 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. *
  6. * Derived from arch/arm/kvm/guest.c:
  7. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  8. * Author: Christoffer Dall <[email protected]>
  9. */
  10. #include <linux/bits.h>
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/nospec.h>
  14. #include <linux/kvm_host.h>
  15. #include <linux/module.h>
  16. #include <linux/stddef.h>
  17. #include <linux/string.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/fs.h>
  20. #include <kvm/arm_hypercalls.h>
  21. #include <asm/cputype.h>
  22. #include <linux/uaccess.h>
  23. #include <asm/fpsimd.h>
  24. #include <asm/kvm.h>
  25. #include <asm/kvm_emulate.h>
  26. #include <asm/sigcontext.h>
  27. #include "trace.h"
  28. const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
  29. KVM_GENERIC_VM_STATS(),
  30. STATS_DESC_ICOUNTER(VM, protected_hyp_mem),
  31. STATS_DESC_ICOUNTER(VM, protected_shared_mem),
  32. };
  33. const struct kvm_stats_header kvm_vm_stats_header = {
  34. .name_size = KVM_STATS_NAME_SIZE,
  35. .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
  36. .id_offset = sizeof(struct kvm_stats_header),
  37. .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  38. .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  39. sizeof(kvm_vm_stats_desc),
  40. };
  41. const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
  42. KVM_GENERIC_VCPU_STATS(),
  43. STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
  44. STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
  45. STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
  46. STATS_DESC_COUNTER(VCPU, mmio_exit_user),
  47. STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
  48. STATS_DESC_COUNTER(VCPU, signal_exits),
  49. STATS_DESC_COUNTER(VCPU, exits)
  50. };
  51. const struct kvm_stats_header kvm_vcpu_stats_header = {
  52. .name_size = KVM_STATS_NAME_SIZE,
  53. .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
  54. .id_offset = sizeof(struct kvm_stats_header),
  55. .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  56. .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  57. sizeof(kvm_vcpu_stats_desc),
  58. };
  59. static bool core_reg_offset_is_vreg(u64 off)
  60. {
  61. return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
  62. off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
  63. }
  64. static u64 core_reg_offset_from_id(u64 id)
  65. {
  66. return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
  67. }
  68. static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
  69. {
  70. int size;
  71. switch (off) {
  72. case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
  73. KVM_REG_ARM_CORE_REG(regs.regs[30]):
  74. case KVM_REG_ARM_CORE_REG(regs.sp):
  75. case KVM_REG_ARM_CORE_REG(regs.pc):
  76. case KVM_REG_ARM_CORE_REG(regs.pstate):
  77. case KVM_REG_ARM_CORE_REG(sp_el1):
  78. case KVM_REG_ARM_CORE_REG(elr_el1):
  79. case KVM_REG_ARM_CORE_REG(spsr[0]) ...
  80. KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
  81. size = sizeof(__u64);
  82. break;
  83. case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
  84. KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
  85. size = sizeof(__uint128_t);
  86. break;
  87. case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
  88. case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
  89. size = sizeof(__u32);
  90. break;
  91. default:
  92. return -EINVAL;
  93. }
  94. if (!IS_ALIGNED(off, size / sizeof(__u32)))
  95. return -EINVAL;
  96. /*
  97. * The KVM_REG_ARM64_SVE regs must be used instead of
  98. * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
  99. * SVE-enabled vcpus:
  100. */
  101. if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
  102. return -EINVAL;
  103. return size;
  104. }
  105. static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  106. {
  107. u64 off = core_reg_offset_from_id(reg->id);
  108. int size = core_reg_size_from_offset(vcpu, off);
  109. if (size < 0)
  110. return NULL;
  111. if (KVM_REG_SIZE(reg->id) != size)
  112. return NULL;
  113. switch (off) {
  114. case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
  115. KVM_REG_ARM_CORE_REG(regs.regs[30]):
  116. off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
  117. off /= 2;
  118. return &vcpu->arch.ctxt.regs.regs[off];
  119. case KVM_REG_ARM_CORE_REG(regs.sp):
  120. return &vcpu->arch.ctxt.regs.sp;
  121. case KVM_REG_ARM_CORE_REG(regs.pc):
  122. return &vcpu->arch.ctxt.regs.pc;
  123. case KVM_REG_ARM_CORE_REG(regs.pstate):
  124. return &vcpu->arch.ctxt.regs.pstate;
  125. case KVM_REG_ARM_CORE_REG(sp_el1):
  126. return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
  127. case KVM_REG_ARM_CORE_REG(elr_el1):
  128. return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
  129. case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
  130. return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
  131. case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
  132. return &vcpu->arch.ctxt.spsr_abt;
  133. case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
  134. return &vcpu->arch.ctxt.spsr_und;
  135. case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
  136. return &vcpu->arch.ctxt.spsr_irq;
  137. case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
  138. return &vcpu->arch.ctxt.spsr_fiq;
  139. case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
  140. KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
  141. off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
  142. off /= 4;
  143. return &vcpu->arch.ctxt.fp_regs.vregs[off];
  144. case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
  145. return &vcpu->arch.ctxt.fp_regs.fpsr;
  146. case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
  147. return &vcpu->arch.ctxt.fp_regs.fpcr;
  148. default:
  149. return NULL;
  150. }
  151. }
  152. static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  153. {
  154. /*
  155. * Because the kvm_regs structure is a mix of 32, 64 and
  156. * 128bit fields, we index it as if it was a 32bit
  157. * array. Hence below, nr_regs is the number of entries, and
  158. * off the index in the "array".
  159. */
  160. __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
  161. int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
  162. void *addr;
  163. u32 off;
  164. /* Our ID is an index into the kvm_regs struct. */
  165. off = core_reg_offset_from_id(reg->id);
  166. if (off >= nr_regs ||
  167. (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
  168. return -ENOENT;
  169. addr = core_reg_addr(vcpu, reg);
  170. if (!addr)
  171. return -EINVAL;
  172. if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
  173. return -EFAULT;
  174. return 0;
  175. }
  176. static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  177. {
  178. __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
  179. int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
  180. __uint128_t tmp;
  181. void *valp = &tmp, *addr;
  182. u64 off;
  183. int err = 0;
  184. /* Our ID is an index into the kvm_regs struct. */
  185. off = core_reg_offset_from_id(reg->id);
  186. if (off >= nr_regs ||
  187. (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
  188. return -ENOENT;
  189. addr = core_reg_addr(vcpu, reg);
  190. if (!addr)
  191. return -EINVAL;
  192. if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
  193. return -EINVAL;
  194. if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
  195. err = -EFAULT;
  196. goto out;
  197. }
  198. if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
  199. u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
  200. switch (mode) {
  201. case PSR_AA32_MODE_USR:
  202. if (!kvm_supports_32bit_el0())
  203. return -EINVAL;
  204. break;
  205. case PSR_AA32_MODE_FIQ:
  206. case PSR_AA32_MODE_IRQ:
  207. case PSR_AA32_MODE_SVC:
  208. case PSR_AA32_MODE_ABT:
  209. case PSR_AA32_MODE_UND:
  210. if (!vcpu_el1_is_32bit(vcpu))
  211. return -EINVAL;
  212. break;
  213. case PSR_MODE_EL0t:
  214. case PSR_MODE_EL1t:
  215. case PSR_MODE_EL1h:
  216. if (vcpu_el1_is_32bit(vcpu))
  217. return -EINVAL;
  218. break;
  219. default:
  220. err = -EINVAL;
  221. goto out;
  222. }
  223. }
  224. memcpy(addr, valp, KVM_REG_SIZE(reg->id));
  225. if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
  226. int i, nr_reg;
  227. switch (*vcpu_cpsr(vcpu)) {
  228. /*
  229. * Either we are dealing with user mode, and only the
  230. * first 15 registers (+ PC) must be narrowed to 32bit.
  231. * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
  232. */
  233. case PSR_AA32_MODE_USR:
  234. case PSR_AA32_MODE_SYS:
  235. nr_reg = 15;
  236. break;
  237. /*
  238. * Otherwise, this is a privileged mode, and *all* the
  239. * registers must be narrowed to 32bit.
  240. */
  241. default:
  242. nr_reg = 31;
  243. break;
  244. }
  245. for (i = 0; i < nr_reg; i++)
  246. vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
  247. *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
  248. }
  249. out:
  250. return err;
  251. }
  252. #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
  253. #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
  254. #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
  255. static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  256. {
  257. unsigned int max_vq, vq;
  258. u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
  259. if (!vcpu_has_sve(vcpu))
  260. return -ENOENT;
  261. if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
  262. return -EINVAL;
  263. memset(vqs, 0, sizeof(vqs));
  264. max_vq = vcpu_sve_max_vq(vcpu);
  265. for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
  266. if (sve_vq_available(vq))
  267. vqs[vq_word(vq)] |= vq_mask(vq);
  268. if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
  269. return -EFAULT;
  270. return 0;
  271. }
  272. static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  273. {
  274. unsigned int max_vq, vq;
  275. u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
  276. if (!vcpu_has_sve(vcpu))
  277. return -ENOENT;
  278. if (kvm_arm_vcpu_sve_finalized(vcpu))
  279. return -EPERM; /* too late! */
  280. if (WARN_ON(vcpu->arch.sve_state))
  281. return -EINVAL;
  282. if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
  283. return -EFAULT;
  284. max_vq = 0;
  285. for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
  286. if (vq_present(vqs, vq))
  287. max_vq = vq;
  288. if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
  289. return -EINVAL;
  290. /*
  291. * Vector lengths supported by the host can't currently be
  292. * hidden from the guest individually: instead we can only set a
  293. * maximum via ZCR_EL2.LEN. So, make sure the available vector
  294. * lengths match the set requested exactly up to the requested
  295. * maximum:
  296. */
  297. for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
  298. if (vq_present(vqs, vq) != sve_vq_available(vq))
  299. return -EINVAL;
  300. /* Can't run with no vector lengths at all: */
  301. if (max_vq < SVE_VQ_MIN)
  302. return -EINVAL;
  303. /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
  304. vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
  305. return 0;
  306. }
  307. #define SVE_REG_SLICE_SHIFT 0
  308. #define SVE_REG_SLICE_BITS 5
  309. #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
  310. #define SVE_REG_ID_BITS 5
  311. #define SVE_REG_SLICE_MASK \
  312. GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
  313. SVE_REG_SLICE_SHIFT)
  314. #define SVE_REG_ID_MASK \
  315. GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
  316. #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
  317. #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
  318. #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
  319. /*
  320. * Number of register slices required to cover each whole SVE register.
  321. * NOTE: Only the first slice every exists, for now.
  322. * If you are tempted to modify this, you must also rework sve_reg_to_region()
  323. * to match:
  324. */
  325. #define vcpu_sve_slices(vcpu) 1
  326. /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
  327. struct sve_state_reg_region {
  328. unsigned int koffset; /* offset into sve_state in kernel memory */
  329. unsigned int klen; /* length in kernel memory */
  330. unsigned int upad; /* extra trailing padding in user memory */
  331. };
  332. /*
  333. * Validate SVE register ID and get sanitised bounds for user/kernel SVE
  334. * register copy
  335. */
  336. static int sve_reg_to_region(struct sve_state_reg_region *region,
  337. struct kvm_vcpu *vcpu,
  338. const struct kvm_one_reg *reg)
  339. {
  340. /* reg ID ranges for Z- registers */
  341. const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
  342. const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
  343. SVE_NUM_SLICES - 1);
  344. /* reg ID ranges for P- registers and FFR (which are contiguous) */
  345. const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
  346. const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
  347. unsigned int vq;
  348. unsigned int reg_num;
  349. unsigned int reqoffset, reqlen; /* User-requested offset and length */
  350. unsigned int maxlen; /* Maximum permitted length */
  351. size_t sve_state_size;
  352. const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
  353. SVE_NUM_SLICES - 1);
  354. /* Verify that the P-regs and FFR really do have contiguous IDs: */
  355. BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
  356. /* Verify that we match the UAPI header: */
  357. BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
  358. reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
  359. if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
  360. if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
  361. return -ENOENT;
  362. vq = vcpu_sve_max_vq(vcpu);
  363. reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
  364. SVE_SIG_REGS_OFFSET;
  365. reqlen = KVM_SVE_ZREG_SIZE;
  366. maxlen = SVE_SIG_ZREG_SIZE(vq);
  367. } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
  368. if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
  369. return -ENOENT;
  370. vq = vcpu_sve_max_vq(vcpu);
  371. reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
  372. SVE_SIG_REGS_OFFSET;
  373. reqlen = KVM_SVE_PREG_SIZE;
  374. maxlen = SVE_SIG_PREG_SIZE(vq);
  375. } else {
  376. return -EINVAL;
  377. }
  378. sve_state_size = vcpu_sve_state_size(vcpu);
  379. if (WARN_ON(!sve_state_size))
  380. return -EINVAL;
  381. region->koffset = array_index_nospec(reqoffset, sve_state_size);
  382. region->klen = min(maxlen, reqlen);
  383. region->upad = reqlen - region->klen;
  384. return 0;
  385. }
  386. static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  387. {
  388. int ret;
  389. struct sve_state_reg_region region;
  390. char __user *uptr = (char __user *)reg->addr;
  391. /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
  392. if (reg->id == KVM_REG_ARM64_SVE_VLS)
  393. return get_sve_vls(vcpu, reg);
  394. /* Try to interpret reg ID as an architectural SVE register... */
  395. ret = sve_reg_to_region(&region, vcpu, reg);
  396. if (ret)
  397. return ret;
  398. if (!kvm_arm_vcpu_sve_finalized(vcpu))
  399. return -EPERM;
  400. if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
  401. region.klen) ||
  402. clear_user(uptr + region.klen, region.upad))
  403. return -EFAULT;
  404. return 0;
  405. }
  406. static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  407. {
  408. int ret;
  409. struct sve_state_reg_region region;
  410. const char __user *uptr = (const char __user *)reg->addr;
  411. /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
  412. if (reg->id == KVM_REG_ARM64_SVE_VLS)
  413. return set_sve_vls(vcpu, reg);
  414. /* Try to interpret reg ID as an architectural SVE register... */
  415. ret = sve_reg_to_region(&region, vcpu, reg);
  416. if (ret)
  417. return ret;
  418. if (!kvm_arm_vcpu_sve_finalized(vcpu))
  419. return -EPERM;
  420. if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
  421. region.klen))
  422. return -EFAULT;
  423. return 0;
  424. }
  425. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  426. {
  427. return -EINVAL;
  428. }
  429. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  430. {
  431. return -EINVAL;
  432. }
  433. static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
  434. u64 __user *uindices)
  435. {
  436. unsigned int i;
  437. int n = 0;
  438. for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
  439. u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
  440. int size = core_reg_size_from_offset(vcpu, i);
  441. if (size < 0)
  442. continue;
  443. switch (size) {
  444. case sizeof(__u32):
  445. reg |= KVM_REG_SIZE_U32;
  446. break;
  447. case sizeof(__u64):
  448. reg |= KVM_REG_SIZE_U64;
  449. break;
  450. case sizeof(__uint128_t):
  451. reg |= KVM_REG_SIZE_U128;
  452. break;
  453. default:
  454. WARN_ON(1);
  455. continue;
  456. }
  457. if (uindices) {
  458. if (put_user(reg, uindices))
  459. return -EFAULT;
  460. uindices++;
  461. }
  462. n++;
  463. }
  464. return n;
  465. }
  466. static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
  467. {
  468. return copy_core_reg_indices(vcpu, NULL);
  469. }
  470. /**
  471. * ARM64 versions of the TIMER registers, always available on arm64
  472. */
  473. #define NUM_TIMER_REGS 3
  474. static bool is_timer_reg(u64 index)
  475. {
  476. switch (index) {
  477. case KVM_REG_ARM_TIMER_CTL:
  478. case KVM_REG_ARM_TIMER_CNT:
  479. case KVM_REG_ARM_TIMER_CVAL:
  480. return true;
  481. }
  482. return false;
  483. }
  484. static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  485. {
  486. if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
  487. return -EFAULT;
  488. uindices++;
  489. if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
  490. return -EFAULT;
  491. uindices++;
  492. if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
  493. return -EFAULT;
  494. return 0;
  495. }
  496. static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  497. {
  498. void __user *uaddr = (void __user *)(long)reg->addr;
  499. u64 val;
  500. int ret;
  501. ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
  502. if (ret != 0)
  503. return -EFAULT;
  504. return kvm_arm_timer_set_reg(vcpu, reg->id, val);
  505. }
  506. static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  507. {
  508. void __user *uaddr = (void __user *)(long)reg->addr;
  509. u64 val;
  510. val = kvm_arm_timer_get_reg(vcpu, reg->id);
  511. return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
  512. }
  513. static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
  514. {
  515. const unsigned int slices = vcpu_sve_slices(vcpu);
  516. if (!vcpu_has_sve(vcpu))
  517. return 0;
  518. /* Policed by KVM_GET_REG_LIST: */
  519. WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
  520. return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
  521. + 1; /* KVM_REG_ARM64_SVE_VLS */
  522. }
  523. static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
  524. u64 __user *uindices)
  525. {
  526. const unsigned int slices = vcpu_sve_slices(vcpu);
  527. u64 reg;
  528. unsigned int i, n;
  529. int num_regs = 0;
  530. if (!vcpu_has_sve(vcpu))
  531. return 0;
  532. /* Policed by KVM_GET_REG_LIST: */
  533. WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
  534. /*
  535. * Enumerate this first, so that userspace can save/restore in
  536. * the order reported by KVM_GET_REG_LIST:
  537. */
  538. reg = KVM_REG_ARM64_SVE_VLS;
  539. if (put_user(reg, uindices++))
  540. return -EFAULT;
  541. ++num_regs;
  542. for (i = 0; i < slices; i++) {
  543. for (n = 0; n < SVE_NUM_ZREGS; n++) {
  544. reg = KVM_REG_ARM64_SVE_ZREG(n, i);
  545. if (put_user(reg, uindices++))
  546. return -EFAULT;
  547. num_regs++;
  548. }
  549. for (n = 0; n < SVE_NUM_PREGS; n++) {
  550. reg = KVM_REG_ARM64_SVE_PREG(n, i);
  551. if (put_user(reg, uindices++))
  552. return -EFAULT;
  553. num_regs++;
  554. }
  555. reg = KVM_REG_ARM64_SVE_FFR(i);
  556. if (put_user(reg, uindices++))
  557. return -EFAULT;
  558. num_regs++;
  559. }
  560. return num_regs;
  561. }
  562. /**
  563. * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
  564. *
  565. * This is for all registers.
  566. */
  567. unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
  568. {
  569. unsigned long res = 0;
  570. res += num_core_regs(vcpu);
  571. res += num_sve_regs(vcpu);
  572. res += kvm_arm_num_sys_reg_descs(vcpu);
  573. res += kvm_arm_get_fw_num_regs(vcpu);
  574. res += NUM_TIMER_REGS;
  575. return res;
  576. }
  577. /**
  578. * kvm_arm_copy_reg_indices - get indices of all registers.
  579. *
  580. * We do core registers right here, then we append system regs.
  581. */
  582. int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  583. {
  584. int ret;
  585. ret = copy_core_reg_indices(vcpu, uindices);
  586. if (ret < 0)
  587. return ret;
  588. uindices += ret;
  589. ret = copy_sve_reg_indices(vcpu, uindices);
  590. if (ret < 0)
  591. return ret;
  592. uindices += ret;
  593. ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
  594. if (ret < 0)
  595. return ret;
  596. uindices += kvm_arm_get_fw_num_regs(vcpu);
  597. ret = copy_timer_indices(vcpu, uindices);
  598. if (ret < 0)
  599. return ret;
  600. uindices += NUM_TIMER_REGS;
  601. return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
  602. }
  603. int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  604. {
  605. /* We currently use nothing arch-specific in upper 32 bits */
  606. if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
  607. return -EINVAL;
  608. switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
  609. case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
  610. case KVM_REG_ARM_FW:
  611. case KVM_REG_ARM_FW_FEAT_BMAP:
  612. return kvm_arm_get_fw_reg(vcpu, reg);
  613. case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
  614. }
  615. if (is_timer_reg(reg->id))
  616. return get_timer_reg(vcpu, reg);
  617. return kvm_arm_sys_reg_get_reg(vcpu, reg);
  618. }
  619. int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  620. {
  621. /* We currently use nothing arch-specific in upper 32 bits */
  622. if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
  623. return -EINVAL;
  624. switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
  625. case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
  626. case KVM_REG_ARM_FW:
  627. case KVM_REG_ARM_FW_FEAT_BMAP:
  628. return kvm_arm_set_fw_reg(vcpu, reg);
  629. case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
  630. }
  631. if (is_timer_reg(reg->id))
  632. return set_timer_reg(vcpu, reg);
  633. return kvm_arm_sys_reg_set_reg(vcpu, reg);
  634. }
  635. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  636. struct kvm_sregs *sregs)
  637. {
  638. return -EINVAL;
  639. }
  640. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  641. struct kvm_sregs *sregs)
  642. {
  643. return -EINVAL;
  644. }
  645. int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
  646. struct kvm_vcpu_events *events)
  647. {
  648. events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
  649. events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
  650. if (events->exception.serror_pending && events->exception.serror_has_esr)
  651. events->exception.serror_esr = vcpu_get_vsesr(vcpu);
  652. /*
  653. * We never return a pending ext_dabt here because we deliver it to
  654. * the virtual CPU directly when setting the event and it's no longer
  655. * 'pending' at this point.
  656. */
  657. return 0;
  658. }
  659. int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
  660. struct kvm_vcpu_events *events)
  661. {
  662. bool serror_pending = events->exception.serror_pending;
  663. bool has_esr = events->exception.serror_has_esr;
  664. bool ext_dabt_pending = events->exception.ext_dabt_pending;
  665. if (serror_pending && has_esr) {
  666. if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
  667. return -EINVAL;
  668. if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
  669. kvm_set_sei_esr(vcpu, events->exception.serror_esr);
  670. else
  671. return -EINVAL;
  672. } else if (serror_pending) {
  673. kvm_inject_vabt(vcpu);
  674. }
  675. if (ext_dabt_pending)
  676. kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
  677. return 0;
  678. }
  679. u32 __attribute_const__ kvm_target_cpu(void)
  680. {
  681. unsigned long implementor = read_cpuid_implementor();
  682. unsigned long part_number = read_cpuid_part_number();
  683. switch (implementor) {
  684. case ARM_CPU_IMP_ARM:
  685. switch (part_number) {
  686. case ARM_CPU_PART_AEM_V8:
  687. return KVM_ARM_TARGET_AEM_V8;
  688. case ARM_CPU_PART_FOUNDATION:
  689. return KVM_ARM_TARGET_FOUNDATION_V8;
  690. case ARM_CPU_PART_CORTEX_A53:
  691. return KVM_ARM_TARGET_CORTEX_A53;
  692. case ARM_CPU_PART_CORTEX_A57:
  693. return KVM_ARM_TARGET_CORTEX_A57;
  694. }
  695. break;
  696. case ARM_CPU_IMP_APM:
  697. switch (part_number) {
  698. case APM_CPU_PART_XGENE:
  699. return KVM_ARM_TARGET_XGENE_POTENZA;
  700. }
  701. break;
  702. }
  703. /* Return a default generic target */
  704. return KVM_ARM_TARGET_GENERIC_V8;
  705. }
  706. void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
  707. {
  708. u32 target = kvm_target_cpu();
  709. memset(init, 0, sizeof(*init));
  710. /*
  711. * For now, we don't return any features.
  712. * In future, we might use features to return target
  713. * specific features available for the preferred
  714. * target type.
  715. */
  716. init->target = (__u32)target;
  717. }
  718. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  719. {
  720. return -EINVAL;
  721. }
  722. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  723. {
  724. return -EINVAL;
  725. }
  726. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  727. struct kvm_translation *tr)
  728. {
  729. return -EINVAL;
  730. }
  731. /**
  732. * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
  733. * @kvm: pointer to the KVM struct
  734. * @kvm_guest_debug: the ioctl data buffer
  735. *
  736. * This sets up and enables the VM for guest debugging. Userspace
  737. * passes in a control flag to enable different debug types and
  738. * potentially other architecture specific information in the rest of
  739. * the structure.
  740. */
  741. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  742. struct kvm_guest_debug *dbg)
  743. {
  744. int ret = 0;
  745. trace_kvm_set_guest_debug(vcpu, dbg->control);
  746. if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
  747. ret = -EINVAL;
  748. goto out;
  749. }
  750. if (dbg->control & KVM_GUESTDBG_ENABLE) {
  751. vcpu->guest_debug = dbg->control;
  752. /* Hardware assisted Break and Watch points */
  753. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
  754. vcpu->arch.external_debug_state = dbg->arch;
  755. }
  756. } else {
  757. /* If not enabled clear all flags */
  758. vcpu->guest_debug = 0;
  759. vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
  760. }
  761. out:
  762. return ret;
  763. }
  764. int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
  765. struct kvm_device_attr *attr)
  766. {
  767. int ret;
  768. switch (attr->group) {
  769. case KVM_ARM_VCPU_PMU_V3_CTRL:
  770. mutex_lock(&vcpu->kvm->arch.config_lock);
  771. ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
  772. mutex_unlock(&vcpu->kvm->arch.config_lock);
  773. break;
  774. case KVM_ARM_VCPU_TIMER_CTRL:
  775. ret = kvm_arm_timer_set_attr(vcpu, attr);
  776. break;
  777. case KVM_ARM_VCPU_PVTIME_CTRL:
  778. ret = kvm_arm_pvtime_set_attr(vcpu, attr);
  779. break;
  780. default:
  781. ret = -ENXIO;
  782. break;
  783. }
  784. return ret;
  785. }
  786. int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
  787. struct kvm_device_attr *attr)
  788. {
  789. int ret;
  790. switch (attr->group) {
  791. case KVM_ARM_VCPU_PMU_V3_CTRL:
  792. ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
  793. break;
  794. case KVM_ARM_VCPU_TIMER_CTRL:
  795. ret = kvm_arm_timer_get_attr(vcpu, attr);
  796. break;
  797. case KVM_ARM_VCPU_PVTIME_CTRL:
  798. ret = kvm_arm_pvtime_get_attr(vcpu, attr);
  799. break;
  800. default:
  801. ret = -ENXIO;
  802. break;
  803. }
  804. return ret;
  805. }
  806. int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
  807. struct kvm_device_attr *attr)
  808. {
  809. int ret;
  810. switch (attr->group) {
  811. case KVM_ARM_VCPU_PMU_V3_CTRL:
  812. ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
  813. break;
  814. case KVM_ARM_VCPU_TIMER_CTRL:
  815. ret = kvm_arm_timer_has_attr(vcpu, attr);
  816. break;
  817. case KVM_ARM_VCPU_PVTIME_CTRL:
  818. ret = kvm_arm_pvtime_has_attr(vcpu, attr);
  819. break;
  820. default:
  821. ret = -ENXIO;
  822. break;
  823. }
  824. return ret;
  825. }
  826. long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
  827. struct kvm_arm_copy_mte_tags *copy_tags)
  828. {
  829. gpa_t guest_ipa = copy_tags->guest_ipa;
  830. size_t length = copy_tags->length;
  831. void __user *tags = copy_tags->addr;
  832. gpa_t gfn;
  833. bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
  834. int ret = 0;
  835. if (!kvm_has_mte(kvm))
  836. return -EINVAL;
  837. if (copy_tags->reserved[0] || copy_tags->reserved[1])
  838. return -EINVAL;
  839. if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
  840. return -EINVAL;
  841. if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
  842. return -EINVAL;
  843. gfn = gpa_to_gfn(guest_ipa);
  844. mutex_lock(&kvm->slots_lock);
  845. while (length > 0) {
  846. kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
  847. void *maddr;
  848. unsigned long num_tags;
  849. struct page *page;
  850. if (is_error_noslot_pfn(pfn)) {
  851. ret = -EFAULT;
  852. goto out;
  853. }
  854. page = pfn_to_online_page(pfn);
  855. if (!page) {
  856. /* Reject ZONE_DEVICE memory */
  857. ret = -EFAULT;
  858. goto out;
  859. }
  860. maddr = page_address(page);
  861. if (!write) {
  862. if (page_mte_tagged(page))
  863. num_tags = mte_copy_tags_to_user(tags, maddr,
  864. MTE_GRANULES_PER_PAGE);
  865. else
  866. /* No tags in memory, so write zeros */
  867. num_tags = MTE_GRANULES_PER_PAGE -
  868. clear_user(tags, MTE_GRANULES_PER_PAGE);
  869. kvm_release_pfn_clean(pfn);
  870. } else {
  871. num_tags = mte_copy_tags_from_user(maddr, tags,
  872. MTE_GRANULES_PER_PAGE);
  873. /*
  874. * Set the flag after checking the write
  875. * completed fully
  876. */
  877. if (num_tags == MTE_GRANULES_PER_PAGE)
  878. set_page_mte_tagged(page);
  879. kvm_release_pfn_dirty(pfn);
  880. }
  881. if (num_tags != MTE_GRANULES_PER_PAGE) {
  882. ret = -EFAULT;
  883. goto out;
  884. }
  885. gfn++;
  886. tags += num_tags;
  887. length -= PAGE_SIZE;
  888. }
  889. out:
  890. mutex_unlock(&kvm->slots_lock);
  891. /* If some data has been copied report the number of bytes copied */
  892. if (length != copy_tags->length)
  893. return copy_tags->length - length;
  894. return ret;
  895. }