vm.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2023 MediaTek Inc.
  4. */
  5. #include <linux/arm-smccc.h>
  6. #include <linux/err.h>
  7. #include <linux/uaccess.h>
  8. #include <linux/gzvm.h>
  9. #include <linux/gzvm_drv.h>
  10. #include "gzvm_arch_common.h"
  11. #define PAR_PA47_MASK ((((1UL << 48) - 1) >> 12) << 12)
  12. int gzvm_arch_inform_exit(u16 vm_id)
  13. {
  14. struct arm_smccc_res res;
  15. arm_smccc_hvc(MT_HVC_GZVM_INFORM_EXIT, vm_id, 0, 0, 0, 0, 0, 0, &res);
  16. if (res.a0 == 0)
  17. return 0;
  18. return -ENXIO;
  19. }
  20. int gzvm_arch_probe(void)
  21. {
  22. struct arm_smccc_res res;
  23. arm_smccc_hvc(MT_HVC_GZVM_PROBE, 0, 0, 0, 0, 0, 0, 0, &res);
  24. if (res.a0)
  25. return -ENXIO;
  26. return 0;
  27. }
  28. int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
  29. phys_addr_t region)
  30. {
  31. struct arm_smccc_res res;
  32. return gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_MEMREGION, vm_id,
  33. buf_size, region, 0, 0, 0, 0, &res);
  34. }
  35. static int gzvm_cap_vm_gpa_size(void __user *argp)
  36. {
  37. __u64 value = CONFIG_ARM64_PA_BITS;
  38. if (copy_to_user(argp, &value, sizeof(__u64)))
  39. return -EFAULT;
  40. return 0;
  41. }
  42. int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp)
  43. {
  44. int ret;
  45. switch (cap) {
  46. case GZVM_CAP_PROTECTED_VM: {
  47. __u64 success = 1;
  48. if (copy_to_user(argp, &success, sizeof(__u64)))
  49. return -EFAULT;
  50. return 0;
  51. }
  52. case GZVM_CAP_VM_GPA_SIZE: {
  53. ret = gzvm_cap_vm_gpa_size(argp);
  54. return ret;
  55. }
  56. default:
  57. break;
  58. }
  59. return -EOPNOTSUPP;
  60. }
  61. /**
  62. * gzvm_arch_create_vm() - create vm
  63. * @vm_type: VM type. Only supports Linux VM now.
  64. *
  65. * Return:
  66. * * positive value - VM ID
  67. * * -ENOMEM - Memory not enough for storing VM data
  68. */
  69. int gzvm_arch_create_vm(unsigned long vm_type)
  70. {
  71. struct arm_smccc_res res;
  72. int ret;
  73. ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_CREATE_VM, vm_type, 0, 0, 0, 0,
  74. 0, 0, &res);
  75. return ret ? ret : res.a1;
  76. }
  77. int gzvm_arch_destroy_vm(u16 vm_id)
  78. {
  79. struct arm_smccc_res res;
  80. return gzvm_hypcall_wrapper(MT_HVC_GZVM_DESTROY_VM, vm_id, 0, 0, 0, 0,
  81. 0, 0, &res);
  82. }
  83. int gzvm_arch_memregion_purpose(struct gzvm *gzvm,
  84. struct gzvm_userspace_memory_region *mem)
  85. {
  86. struct arm_smccc_res res;
  87. return gzvm_hypcall_wrapper(MT_HVC_GZVM_MEMREGION_PURPOSE, gzvm->vm_id,
  88. mem->guest_phys_addr, mem->memory_size,
  89. mem->flags, 0, 0, 0, &res);
  90. }
  91. int gzvm_arch_set_dtb_config(struct gzvm *gzvm, struct gzvm_dtb_config *cfg)
  92. {
  93. struct arm_smccc_res res;
  94. return gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_DTB_CONFIG, gzvm->vm_id,
  95. cfg->dtb_addr, cfg->dtb_size, 0, 0, 0, 0,
  96. &res);
  97. }
  98. static int gzvm_vm_arch_enable_cap(struct gzvm *gzvm,
  99. struct gzvm_enable_cap *cap,
  100. struct arm_smccc_res *res)
  101. {
  102. return gzvm_hypcall_wrapper(MT_HVC_GZVM_ENABLE_CAP, gzvm->vm_id,
  103. cap->cap, cap->args[0], cap->args[1],
  104. cap->args[2], cap->args[3], cap->args[4],
  105. res);
  106. }
  107. /**
  108. * gzvm_vm_ioctl_get_pvmfw_size() - Get pvmfw size from hypervisor, return
  109. * in x1, and return to userspace in args
  110. * @gzvm: Pointer to struct gzvm.
  111. * @cap: Pointer to struct gzvm_enable_cap.
  112. * @argp: Pointer to struct gzvm_enable_cap in user space.
  113. *
  114. * Return:
  115. * * 0 - Succeed
  116. * * -EINVAL - Hypervisor return invalid results
  117. * * -EFAULT - Fail to copy back to userspace buffer
  118. */
  119. static int gzvm_vm_ioctl_get_pvmfw_size(struct gzvm *gzvm,
  120. struct gzvm_enable_cap *cap,
  121. void __user *argp)
  122. {
  123. struct arm_smccc_res res = {0};
  124. if (gzvm_vm_arch_enable_cap(gzvm, cap, &res) != 0)
  125. return -EINVAL;
  126. cap->args[1] = res.a1;
  127. if (copy_to_user(argp, cap, sizeof(*cap)))
  128. return -EFAULT;
  129. return 0;
  130. }
  131. /**
  132. * fill_constituents() - Populate pa to buffer until full
  133. * @consti: Pointer to struct mem_region_addr_range.
  134. * @consti_cnt: Constituent count.
  135. * @max_nr_consti: Maximum number of constituent count.
  136. * @gfn: Guest frame number.
  137. * @total_pages: Total page numbers.
  138. * @slot: Pointer to struct gzvm_memslot.
  139. *
  140. * Return: how many pages we've fill in, negative if error
  141. */
  142. static int fill_constituents(struct mem_region_addr_range *consti,
  143. int *consti_cnt, int max_nr_consti, u64 gfn,
  144. u32 total_pages, struct gzvm_memslot *slot)
  145. {
  146. u64 pfn, prev_pfn, gfn_end;
  147. int nr_pages = 1;
  148. int i = 0;
  149. if (unlikely(total_pages == 0))
  150. return -EINVAL;
  151. gfn_end = gfn + total_pages;
  152. /* entry 0 */
  153. if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0)
  154. return -EFAULT;
  155. consti[0].address = PFN_PHYS(pfn);
  156. consti[0].pg_cnt = 1;
  157. gfn++;
  158. prev_pfn = pfn;
  159. while (i < max_nr_consti && gfn < gfn_end) {
  160. if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0)
  161. return -EFAULT;
  162. if (pfn == (prev_pfn + 1)) {
  163. consti[i].pg_cnt++;
  164. } else {
  165. i++;
  166. if (i >= max_nr_consti)
  167. break;
  168. consti[i].address = PFN_PHYS(pfn);
  169. consti[i].pg_cnt = 1;
  170. }
  171. prev_pfn = pfn;
  172. gfn++;
  173. nr_pages++;
  174. }
  175. if (i != max_nr_consti)
  176. i++;
  177. *consti_cnt = i;
  178. return nr_pages;
  179. }
  180. /**
  181. * populate_mem_region() - Iterate all mem slot and populate pa to buffer until it's full
  182. * @gzvm: Pointer to struct gzvm.
  183. *
  184. * Return: 0 if it is successful, negative if error
  185. */
  186. static int populate_mem_region(struct gzvm *gzvm)
  187. {
  188. int slot_cnt = 0;
  189. while (slot_cnt < GZVM_MAX_MEM_REGION && gzvm->memslot[slot_cnt].npages != 0) {
  190. struct gzvm_memslot *memslot = &gzvm->memslot[slot_cnt];
  191. struct gzvm_memory_region_ranges *region;
  192. int max_nr_consti, remain_pages;
  193. u64 gfn, gfn_end;
  194. u32 buf_size;
  195. buf_size = PAGE_SIZE * 2;
  196. region = alloc_pages_exact(buf_size, GFP_KERNEL);
  197. if (!region)
  198. return -ENOMEM;
  199. max_nr_consti = (buf_size - sizeof(*region)) /
  200. sizeof(struct mem_region_addr_range);
  201. region->slot = memslot->slot_id;
  202. remain_pages = memslot->npages;
  203. gfn = memslot->base_gfn;
  204. gfn_end = gfn + remain_pages;
  205. while (gfn < gfn_end) {
  206. int nr_pages;
  207. nr_pages = fill_constituents(region->constituents,
  208. &region->constituent_cnt,
  209. max_nr_consti, gfn,
  210. remain_pages, memslot);
  211. if (nr_pages < 0) {
  212. pr_err("Failed to fill constituents\n");
  213. free_pages_exact(region, buf_size);
  214. return -EFAULT;
  215. }
  216. region->gpa = PFN_PHYS(gfn);
  217. region->total_pages = nr_pages;
  218. remain_pages -= nr_pages;
  219. gfn += nr_pages;
  220. if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size,
  221. virt_to_phys(region))) {
  222. pr_err("Failed to register memregion to hypervisor\n");
  223. free_pages_exact(region, buf_size);
  224. return -EFAULT;
  225. }
  226. }
  227. free_pages_exact(region, buf_size);
  228. ++slot_cnt;
  229. }
  230. return 0;
  231. }
  232. /**
  233. * gzvm_vm_ioctl_cap_pvm() - Proceed GZVM_CAP_PROTECTED_VM's subcommands
  234. * @gzvm: Pointer to struct gzvm.
  235. * @cap: Pointer to struct gzvm_enable_cap.
  236. * @argp: Pointer to struct gzvm_enable_cap in user space.
  237. *
  238. * Return:
  239. * * 0 - Succeed
  240. * * -EINVAL - Invalid subcommand or arguments
  241. */
  242. static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm,
  243. struct gzvm_enable_cap *cap,
  244. void __user *argp)
  245. {
  246. struct arm_smccc_res res = {0};
  247. int ret;
  248. switch (cap->args[0]) {
  249. case GZVM_CAP_PVM_SET_PVMFW_GPA:
  250. fallthrough;
  251. case GZVM_CAP_PVM_SET_PROTECTED_VM:
  252. /*
  253. * If the hypervisor doesn't support block-based demand paging, we
  254. * populate memory in advance to improve performance for protected VM.
  255. */
  256. if (gzvm->demand_page_gran == PAGE_SIZE)
  257. populate_mem_region(gzvm);
  258. ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
  259. return ret;
  260. case GZVM_CAP_PVM_GET_PVMFW_SIZE:
  261. ret = gzvm_vm_ioctl_get_pvmfw_size(gzvm, cap, argp);
  262. return ret;
  263. default:
  264. break;
  265. }
  266. return -EINVAL;
  267. }
  268. int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
  269. struct gzvm_enable_cap *cap,
  270. void __user *argp)
  271. {
  272. struct arm_smccc_res res = {0};
  273. int ret;
  274. switch (cap->cap) {
  275. case GZVM_CAP_PROTECTED_VM:
  276. ret = gzvm_vm_ioctl_cap_pvm(gzvm, cap, argp);
  277. return ret;
  278. case GZVM_CAP_BLOCK_BASED_DEMAND_PAGING:
  279. ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
  280. return ret;
  281. default:
  282. break;
  283. }
  284. return -EINVAL;
  285. }
  286. /**
  287. * gzvm_hva_to_pa_arch() - converts hva to pa with arch-specific way
  288. * @hva: Host virtual address.
  289. *
  290. * Return: GZVM_PA_ERR_BAD for translation error
  291. */
  292. u64 gzvm_hva_to_pa_arch(u64 hva)
  293. {
  294. unsigned long flags;
  295. u64 par;
  296. local_irq_save(flags);
  297. asm volatile("at s1e1r, %0" :: "r" (hva));
  298. isb();
  299. par = read_sysreg_par();
  300. local_irq_restore(flags);
  301. if (par & SYS_PAR_EL1_F)
  302. return GZVM_PA_ERR_BAD;
  303. par = par & PAR_PA47_MASK;
  304. if (!par)
  305. return GZVM_PA_ERR_BAD;
  306. return par;
  307. }
  308. int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
  309. u64 nr_pages)
  310. {
  311. struct arm_smccc_res res;
  312. return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST, vm_id, memslot_id,
  313. pfn, gfn, nr_pages, 0, 0, &res);
  314. }
  315. int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages)
  316. {
  317. struct arm_smccc_res res;
  318. return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST_BLOCK, vm_id,
  319. memslot_id, gfn, nr_pages, 0, 0, 0, &res);
  320. }