gzvm_vcpu.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2023 MediaTek Inc.
  4. */
  5. #include <asm/sysreg.h>
  6. #include <linux/anon_inodes.h>
  7. #include <linux/device.h>
  8. #include <linux/file.h>
  9. #include <linux/mm.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/slab.h>
  12. #include <linux/gzvm_drv.h>
  13. /* maximum size needed for holding an integer */
  14. #define ITOA_MAX_LEN 12
  15. static long gzvm_vcpu_update_one_reg(struct gzvm_vcpu *vcpu,
  16. void __user *argp,
  17. bool is_write)
  18. {
  19. struct gzvm_one_reg reg;
  20. void __user *reg_addr;
  21. u64 data = 0;
  22. u64 reg_size;
  23. long ret;
  24. if (copy_from_user(&reg, argp, sizeof(reg)))
  25. return -EFAULT;
  26. reg_addr = (void __user *)reg.addr;
  27. reg_size = (reg.id & GZVM_REG_SIZE_MASK) >> GZVM_REG_SIZE_SHIFT;
  28. reg_size = BIT(reg_size);
  29. if (reg_size != 1 && reg_size != 2 && reg_size != 4 && reg_size != 8)
  30. return -EINVAL;
  31. if (is_write) {
  32. /* GZ hypervisor would filter out invalid vcpu register access */
  33. if (copy_from_user(&data, reg_addr, reg_size))
  34. return -EFAULT;
  35. } else {
  36. return -EOPNOTSUPP;
  37. }
  38. ret = gzvm_arch_vcpu_update_one_reg(vcpu, reg.id, is_write, &data);
  39. if (ret)
  40. return ret;
  41. return 0;
  42. }
  43. /**
  44. * gzvm_vcpu_handle_mmio() - Handle mmio in kernel space.
  45. * @vcpu: Pointer to vcpu.
  46. *
  47. * Return:
  48. * * true - This mmio exit has been processed.
  49. * * false - This mmio exit has not been processed, require userspace.
  50. */
  51. static bool gzvm_vcpu_handle_mmio(struct gzvm_vcpu *vcpu)
  52. {
  53. __u64 addr;
  54. __u32 len;
  55. const void *val_ptr;
  56. /* So far, we don't have in-kernel mmio read handler */
  57. if (!vcpu->run->mmio.is_write)
  58. return false;
  59. addr = vcpu->run->mmio.phys_addr;
  60. len = vcpu->run->mmio.size;
  61. val_ptr = &vcpu->run->mmio.data;
  62. return gzvm_ioevent_write(vcpu, addr, len, val_ptr);
  63. }
  64. /**
  65. * gzvm_vcpu_run() - Handle vcpu run ioctl, entry point to guest and exit
  66. * point from guest
  67. * @vcpu: Pointer to struct gzvm_vcpu
  68. * @argp: Pointer to struct gzvm_vcpu_run in userspace
  69. *
  70. * Return:
  71. * * 0 - Success.
  72. * * Negative - Failure.
  73. */
  74. static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void __user *argp)
  75. {
  76. bool need_userspace = false;
  77. u64 exit_reason = 0;
  78. if (copy_from_user(vcpu->run, argp, sizeof(struct gzvm_vcpu_run)))
  79. return -EFAULT;
  80. for (int i = 0; i < ARRAY_SIZE(vcpu->run->padding1); i++) {
  81. if (vcpu->run->padding1[i])
  82. return -EINVAL;
  83. }
  84. if (vcpu->run->immediate_exit == 1)
  85. return -EINTR;
  86. while (!need_userspace && !signal_pending(current)) {
  87. gzvm_arch_vcpu_run(vcpu, &exit_reason);
  88. switch (exit_reason) {
  89. case GZVM_EXIT_MMIO:
  90. if (!gzvm_vcpu_handle_mmio(vcpu))
  91. need_userspace = true;
  92. break;
  93. /**
  94. * it's geniezone's responsibility to fill corresponding data
  95. * structure
  96. */
  97. case GZVM_EXIT_HYPERCALL:
  98. if (!gzvm_handle_guest_hvc(vcpu))
  99. need_userspace = true;
  100. break;
  101. case GZVM_EXIT_EXCEPTION:
  102. if (!gzvm_handle_guest_exception(vcpu))
  103. need_userspace = true;
  104. break;
  105. case GZVM_EXIT_DEBUG:
  106. fallthrough;
  107. case GZVM_EXIT_FAIL_ENTRY:
  108. fallthrough;
  109. case GZVM_EXIT_INTERNAL_ERROR:
  110. fallthrough;
  111. case GZVM_EXIT_SYSTEM_EVENT:
  112. fallthrough;
  113. case GZVM_EXIT_SHUTDOWN:
  114. need_userspace = true;
  115. break;
  116. case GZVM_EXIT_IRQ:
  117. fallthrough;
  118. case GZVM_EXIT_GZ:
  119. break;
  120. case GZVM_EXIT_UNKNOWN:
  121. fallthrough;
  122. default:
  123. pr_err("vcpu unknown exit\n");
  124. need_userspace = true;
  125. goto out;
  126. }
  127. }
  128. out:
  129. if (copy_to_user(argp, vcpu->run, sizeof(struct gzvm_vcpu_run)))
  130. return -EFAULT;
  131. if (signal_pending(current)) {
  132. // invoke hvc to inform gz to map memory
  133. gzvm_arch_inform_exit(vcpu->gzvm->vm_id);
  134. return -ERESTARTSYS;
  135. }
  136. return 0;
  137. }
  138. static long gzvm_vcpu_ioctl(struct file *filp, unsigned int ioctl,
  139. unsigned long arg)
  140. {
  141. int ret = -ENOTTY;
  142. void __user *argp = (void __user *)arg;
  143. struct gzvm_vcpu *vcpu = filp->private_data;
  144. switch (ioctl) {
  145. case GZVM_RUN:
  146. ret = gzvm_vcpu_run(vcpu, argp);
  147. break;
  148. case GZVM_GET_ONE_REG:
  149. /* !is_write */
  150. ret = -EOPNOTSUPP;
  151. break;
  152. case GZVM_SET_ONE_REG:
  153. /* is_write */
  154. ret = gzvm_vcpu_update_one_reg(vcpu, argp, true);
  155. break;
  156. default:
  157. break;
  158. }
  159. return ret;
  160. }
  161. static const struct file_operations gzvm_vcpu_fops = {
  162. .unlocked_ioctl = gzvm_vcpu_ioctl,
  163. .llseek = noop_llseek,
  164. };
  165. /* caller must hold the vm lock */
  166. static void gzvm_destroy_vcpu(struct gzvm_vcpu *vcpu)
  167. {
  168. if (!vcpu)
  169. return;
  170. gzvm_arch_destroy_vcpu(vcpu->gzvm->vm_id, vcpu->vcpuid);
  171. /* clean guest's data */
  172. memset(vcpu->run, 0, GZVM_VCPU_RUN_MAP_SIZE);
  173. free_pages_exact(vcpu->run, GZVM_VCPU_RUN_MAP_SIZE);
  174. kfree(vcpu);
  175. }
  176. /**
  177. * gzvm_destroy_vcpus() - Destroy all vcpus, caller has to hold the vm lock
  178. *
  179. * @gzvm: vm struct that owns the vcpus
  180. */
  181. void gzvm_destroy_vcpus(struct gzvm *gzvm)
  182. {
  183. int i;
  184. for (i = 0; i < GZVM_MAX_VCPUS; i++) {
  185. gzvm_destroy_vcpu(gzvm->vcpus[i]);
  186. gzvm->vcpus[i] = NULL;
  187. }
  188. }
  189. /* create_vcpu_fd() - Allocates an inode for the vcpu. */
  190. static int create_vcpu_fd(struct gzvm_vcpu *vcpu)
  191. {
  192. /* sizeof("gzvm-vcpu:") + max(strlen(itoa(vcpuid))) + null */
  193. char name[10 + ITOA_MAX_LEN + 1];
  194. snprintf(name, sizeof(name), "gzvm-vcpu:%d", vcpu->vcpuid);
  195. return anon_inode_getfd(name, &gzvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
  196. }
  197. /**
  198. * gzvm_vm_ioctl_create_vcpu() - for GZVM_CREATE_VCPU
  199. * @gzvm: Pointer to struct gzvm
  200. * @cpuid: equals arg
  201. *
  202. * Return: Fd of vcpu, negative errno if error occurs
  203. */
  204. int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid)
  205. {
  206. struct gzvm_vcpu *vcpu;
  207. int ret;
  208. if (cpuid >= GZVM_MAX_VCPUS)
  209. return -EINVAL;
  210. vcpu = kzalloc(sizeof(*vcpu), GFP_KERNEL);
  211. if (!vcpu)
  212. return -ENOMEM;
  213. /**
  214. * Allocate 2 pages for data sharing between driver and gz hypervisor
  215. *
  216. * |- page 0 -|- page 1 -|
  217. * |gzvm_vcpu_run|......|hwstate|.......|
  218. *
  219. */
  220. vcpu->run = alloc_pages_exact(GZVM_VCPU_RUN_MAP_SIZE,
  221. GFP_KERNEL_ACCOUNT | __GFP_ZERO);
  222. if (!vcpu->run) {
  223. ret = -ENOMEM;
  224. goto free_vcpu;
  225. }
  226. vcpu->hwstate = (void *)vcpu->run + PAGE_SIZE;
  227. vcpu->vcpuid = cpuid;
  228. vcpu->gzvm = gzvm;
  229. mutex_init(&vcpu->lock);
  230. ret = gzvm_arch_create_vcpu(gzvm->vm_id, vcpu->vcpuid, vcpu->run);
  231. if (ret < 0)
  232. goto free_vcpu_run;
  233. ret = create_vcpu_fd(vcpu);
  234. if (ret < 0)
  235. goto free_vcpu_run;
  236. gzvm->vcpus[cpuid] = vcpu;
  237. return ret;
  238. free_vcpu_run:
  239. free_pages_exact(vcpu->run, GZVM_VCPU_RUN_MAP_SIZE);
  240. free_vcpu:
  241. kfree(vcpu);
  242. return ret;
  243. }