hsm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ACRN Hypervisor Service Module (HSM)
  4. *
  5. * Copyright (C) 2020 Intel Corporation. All rights reserved.
  6. *
  7. * Authors:
  8. * Fengwei Yin <[email protected]>
  9. * Yakui Zhao <[email protected]>
  10. */
  11. #include <linux/cpu.h>
  12. #include <linux/io.h>
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <asm/acrn.h>
  17. #include <asm/hypervisor.h>
  18. #include "acrn_drv.h"
  19. /*
  20. * When /dev/acrn_hsm is opened, a 'struct acrn_vm' object is created to
  21. * represent a VM instance and continues to be associated with the opened file
  22. * descriptor. All ioctl operations on this file descriptor will be targeted to
  23. * the VM instance. Release of this file descriptor will destroy the object.
  24. */
  25. static int acrn_dev_open(struct inode *inode, struct file *filp)
  26. {
  27. struct acrn_vm *vm;
  28. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  29. if (!vm)
  30. return -ENOMEM;
  31. vm->vmid = ACRN_INVALID_VMID;
  32. filp->private_data = vm;
  33. return 0;
  34. }
  35. static int pmcmd_ioctl(u64 cmd, void __user *uptr)
  36. {
  37. struct acrn_pstate_data *px_data;
  38. struct acrn_cstate_data *cx_data;
  39. u64 *pm_info;
  40. int ret = 0;
  41. switch (cmd & PMCMD_TYPE_MASK) {
  42. case ACRN_PMCMD_GET_PX_CNT:
  43. case ACRN_PMCMD_GET_CX_CNT:
  44. pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
  45. if (!pm_info)
  46. return -ENOMEM;
  47. ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info));
  48. if (ret < 0) {
  49. kfree(pm_info);
  50. break;
  51. }
  52. if (copy_to_user(uptr, pm_info, sizeof(u64)))
  53. ret = -EFAULT;
  54. kfree(pm_info);
  55. break;
  56. case ACRN_PMCMD_GET_PX_DATA:
  57. px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
  58. if (!px_data)
  59. return -ENOMEM;
  60. ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data));
  61. if (ret < 0) {
  62. kfree(px_data);
  63. break;
  64. }
  65. if (copy_to_user(uptr, px_data, sizeof(*px_data)))
  66. ret = -EFAULT;
  67. kfree(px_data);
  68. break;
  69. case ACRN_PMCMD_GET_CX_DATA:
  70. cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
  71. if (!cx_data)
  72. return -ENOMEM;
  73. ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data));
  74. if (ret < 0) {
  75. kfree(cx_data);
  76. break;
  77. }
  78. if (copy_to_user(uptr, cx_data, sizeof(*cx_data)))
  79. ret = -EFAULT;
  80. kfree(cx_data);
  81. break;
  82. default:
  83. break;
  84. }
  85. return ret;
  86. }
  87. /*
  88. * HSM relies on hypercall layer of the ACRN hypervisor to do the
  89. * sanity check against the input parameters.
  90. */
  91. static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
  92. unsigned long ioctl_param)
  93. {
  94. struct acrn_vm *vm = filp->private_data;
  95. struct acrn_vm_creation *vm_param;
  96. struct acrn_vcpu_regs *cpu_regs;
  97. struct acrn_ioreq_notify notify;
  98. struct acrn_ptdev_irq *irq_info;
  99. struct acrn_ioeventfd ioeventfd;
  100. struct acrn_vm_memmap memmap;
  101. struct acrn_mmiodev *mmiodev;
  102. struct acrn_msi_entry *msi;
  103. struct acrn_pcidev *pcidev;
  104. struct acrn_irqfd irqfd;
  105. struct acrn_vdev *vdev;
  106. struct page *page;
  107. u64 cstate_cmd;
  108. int i, ret = 0;
  109. if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
  110. dev_dbg(acrn_dev.this_device,
  111. "ioctl 0x%x: Invalid VM state!\n", cmd);
  112. return -EINVAL;
  113. }
  114. switch (cmd) {
  115. case ACRN_IOCTL_CREATE_VM:
  116. vm_param = memdup_user((void __user *)ioctl_param,
  117. sizeof(struct acrn_vm_creation));
  118. if (IS_ERR(vm_param))
  119. return PTR_ERR(vm_param);
  120. if ((vm_param->reserved0 | vm_param->reserved1) != 0) {
  121. kfree(vm_param);
  122. return -EINVAL;
  123. }
  124. vm = acrn_vm_create(vm, vm_param);
  125. if (!vm) {
  126. ret = -EINVAL;
  127. kfree(vm_param);
  128. break;
  129. }
  130. if (copy_to_user((void __user *)ioctl_param, vm_param,
  131. sizeof(struct acrn_vm_creation))) {
  132. acrn_vm_destroy(vm);
  133. ret = -EFAULT;
  134. }
  135. kfree(vm_param);
  136. break;
  137. case ACRN_IOCTL_START_VM:
  138. ret = hcall_start_vm(vm->vmid);
  139. if (ret < 0)
  140. dev_dbg(acrn_dev.this_device,
  141. "Failed to start VM %u!\n", vm->vmid);
  142. break;
  143. case ACRN_IOCTL_PAUSE_VM:
  144. ret = hcall_pause_vm(vm->vmid);
  145. if (ret < 0)
  146. dev_dbg(acrn_dev.this_device,
  147. "Failed to pause VM %u!\n", vm->vmid);
  148. break;
  149. case ACRN_IOCTL_RESET_VM:
  150. ret = hcall_reset_vm(vm->vmid);
  151. if (ret < 0)
  152. dev_dbg(acrn_dev.this_device,
  153. "Failed to restart VM %u!\n", vm->vmid);
  154. break;
  155. case ACRN_IOCTL_DESTROY_VM:
  156. ret = acrn_vm_destroy(vm);
  157. break;
  158. case ACRN_IOCTL_SET_VCPU_REGS:
  159. cpu_regs = memdup_user((void __user *)ioctl_param,
  160. sizeof(struct acrn_vcpu_regs));
  161. if (IS_ERR(cpu_regs))
  162. return PTR_ERR(cpu_regs);
  163. for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++)
  164. if (cpu_regs->reserved[i]) {
  165. kfree(cpu_regs);
  166. return -EINVAL;
  167. }
  168. for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++)
  169. if (cpu_regs->vcpu_regs.reserved_32[i]) {
  170. kfree(cpu_regs);
  171. return -EINVAL;
  172. }
  173. for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++)
  174. if (cpu_regs->vcpu_regs.reserved_64[i]) {
  175. kfree(cpu_regs);
  176. return -EINVAL;
  177. }
  178. for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++)
  179. if (cpu_regs->vcpu_regs.gdt.reserved[i] |
  180. cpu_regs->vcpu_regs.idt.reserved[i]) {
  181. kfree(cpu_regs);
  182. return -EINVAL;
  183. }
  184. ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs));
  185. if (ret < 0)
  186. dev_dbg(acrn_dev.this_device,
  187. "Failed to set regs state of VM%u!\n",
  188. vm->vmid);
  189. kfree(cpu_regs);
  190. break;
  191. case ACRN_IOCTL_SET_MEMSEG:
  192. if (copy_from_user(&memmap, (void __user *)ioctl_param,
  193. sizeof(memmap)))
  194. return -EFAULT;
  195. ret = acrn_vm_memseg_map(vm, &memmap);
  196. break;
  197. case ACRN_IOCTL_UNSET_MEMSEG:
  198. if (copy_from_user(&memmap, (void __user *)ioctl_param,
  199. sizeof(memmap)))
  200. return -EFAULT;
  201. ret = acrn_vm_memseg_unmap(vm, &memmap);
  202. break;
  203. case ACRN_IOCTL_ASSIGN_MMIODEV:
  204. mmiodev = memdup_user((void __user *)ioctl_param,
  205. sizeof(struct acrn_mmiodev));
  206. if (IS_ERR(mmiodev))
  207. return PTR_ERR(mmiodev);
  208. ret = hcall_assign_mmiodev(vm->vmid, virt_to_phys(mmiodev));
  209. if (ret < 0)
  210. dev_dbg(acrn_dev.this_device,
  211. "Failed to assign MMIO device!\n");
  212. kfree(mmiodev);
  213. break;
  214. case ACRN_IOCTL_DEASSIGN_MMIODEV:
  215. mmiodev = memdup_user((void __user *)ioctl_param,
  216. sizeof(struct acrn_mmiodev));
  217. if (IS_ERR(mmiodev))
  218. return PTR_ERR(mmiodev);
  219. ret = hcall_deassign_mmiodev(vm->vmid, virt_to_phys(mmiodev));
  220. if (ret < 0)
  221. dev_dbg(acrn_dev.this_device,
  222. "Failed to deassign MMIO device!\n");
  223. kfree(mmiodev);
  224. break;
  225. case ACRN_IOCTL_ASSIGN_PCIDEV:
  226. pcidev = memdup_user((void __user *)ioctl_param,
  227. sizeof(struct acrn_pcidev));
  228. if (IS_ERR(pcidev))
  229. return PTR_ERR(pcidev);
  230. ret = hcall_assign_pcidev(vm->vmid, virt_to_phys(pcidev));
  231. if (ret < 0)
  232. dev_dbg(acrn_dev.this_device,
  233. "Failed to assign pci device!\n");
  234. kfree(pcidev);
  235. break;
  236. case ACRN_IOCTL_DEASSIGN_PCIDEV:
  237. pcidev = memdup_user((void __user *)ioctl_param,
  238. sizeof(struct acrn_pcidev));
  239. if (IS_ERR(pcidev))
  240. return PTR_ERR(pcidev);
  241. ret = hcall_deassign_pcidev(vm->vmid, virt_to_phys(pcidev));
  242. if (ret < 0)
  243. dev_dbg(acrn_dev.this_device,
  244. "Failed to deassign pci device!\n");
  245. kfree(pcidev);
  246. break;
  247. case ACRN_IOCTL_CREATE_VDEV:
  248. vdev = memdup_user((void __user *)ioctl_param,
  249. sizeof(struct acrn_vdev));
  250. if (IS_ERR(vdev))
  251. return PTR_ERR(vdev);
  252. ret = hcall_create_vdev(vm->vmid, virt_to_phys(vdev));
  253. if (ret < 0)
  254. dev_dbg(acrn_dev.this_device,
  255. "Failed to create virtual device!\n");
  256. kfree(vdev);
  257. break;
  258. case ACRN_IOCTL_DESTROY_VDEV:
  259. vdev = memdup_user((void __user *)ioctl_param,
  260. sizeof(struct acrn_vdev));
  261. if (IS_ERR(vdev))
  262. return PTR_ERR(vdev);
  263. ret = hcall_destroy_vdev(vm->vmid, virt_to_phys(vdev));
  264. if (ret < 0)
  265. dev_dbg(acrn_dev.this_device,
  266. "Failed to destroy virtual device!\n");
  267. kfree(vdev);
  268. break;
  269. case ACRN_IOCTL_SET_PTDEV_INTR:
  270. irq_info = memdup_user((void __user *)ioctl_param,
  271. sizeof(struct acrn_ptdev_irq));
  272. if (IS_ERR(irq_info))
  273. return PTR_ERR(irq_info);
  274. ret = hcall_set_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
  275. if (ret < 0)
  276. dev_dbg(acrn_dev.this_device,
  277. "Failed to configure intr for ptdev!\n");
  278. kfree(irq_info);
  279. break;
  280. case ACRN_IOCTL_RESET_PTDEV_INTR:
  281. irq_info = memdup_user((void __user *)ioctl_param,
  282. sizeof(struct acrn_ptdev_irq));
  283. if (IS_ERR(irq_info))
  284. return PTR_ERR(irq_info);
  285. ret = hcall_reset_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
  286. if (ret < 0)
  287. dev_dbg(acrn_dev.this_device,
  288. "Failed to reset intr for ptdev!\n");
  289. kfree(irq_info);
  290. break;
  291. case ACRN_IOCTL_SET_IRQLINE:
  292. ret = hcall_set_irqline(vm->vmid, ioctl_param);
  293. if (ret < 0)
  294. dev_dbg(acrn_dev.this_device,
  295. "Failed to set interrupt line!\n");
  296. break;
  297. case ACRN_IOCTL_INJECT_MSI:
  298. msi = memdup_user((void __user *)ioctl_param,
  299. sizeof(struct acrn_msi_entry));
  300. if (IS_ERR(msi))
  301. return PTR_ERR(msi);
  302. ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
  303. if (ret < 0)
  304. dev_dbg(acrn_dev.this_device,
  305. "Failed to inject MSI!\n");
  306. kfree(msi);
  307. break;
  308. case ACRN_IOCTL_VM_INTR_MONITOR:
  309. ret = pin_user_pages_fast(ioctl_param, 1,
  310. FOLL_WRITE | FOLL_LONGTERM, &page);
  311. if (unlikely(ret != 1)) {
  312. dev_dbg(acrn_dev.this_device,
  313. "Failed to pin intr hdr buffer!\n");
  314. return -EFAULT;
  315. }
  316. ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page));
  317. if (ret < 0) {
  318. unpin_user_page(page);
  319. dev_dbg(acrn_dev.this_device,
  320. "Failed to monitor intr data!\n");
  321. return ret;
  322. }
  323. if (vm->monitor_page)
  324. unpin_user_page(vm->monitor_page);
  325. vm->monitor_page = page;
  326. break;
  327. case ACRN_IOCTL_CREATE_IOREQ_CLIENT:
  328. if (vm->default_client)
  329. return -EEXIST;
  330. if (!acrn_ioreq_client_create(vm, NULL, NULL, true, "acrndm"))
  331. ret = -EINVAL;
  332. break;
  333. case ACRN_IOCTL_DESTROY_IOREQ_CLIENT:
  334. if (vm->default_client)
  335. acrn_ioreq_client_destroy(vm->default_client);
  336. break;
  337. case ACRN_IOCTL_ATTACH_IOREQ_CLIENT:
  338. if (vm->default_client)
  339. ret = acrn_ioreq_client_wait(vm->default_client);
  340. else
  341. ret = -ENODEV;
  342. break;
  343. case ACRN_IOCTL_NOTIFY_REQUEST_FINISH:
  344. if (copy_from_user(&notify, (void __user *)ioctl_param,
  345. sizeof(struct acrn_ioreq_notify)))
  346. return -EFAULT;
  347. if (notify.reserved != 0)
  348. return -EINVAL;
  349. ret = acrn_ioreq_request_default_complete(vm, notify.vcpu);
  350. break;
  351. case ACRN_IOCTL_CLEAR_VM_IOREQ:
  352. acrn_ioreq_request_clear(vm);
  353. break;
  354. case ACRN_IOCTL_PM_GET_CPU_STATE:
  355. if (copy_from_user(&cstate_cmd, (void __user *)ioctl_param,
  356. sizeof(cstate_cmd)))
  357. return -EFAULT;
  358. ret = pmcmd_ioctl(cstate_cmd, (void __user *)ioctl_param);
  359. break;
  360. case ACRN_IOCTL_IOEVENTFD:
  361. if (copy_from_user(&ioeventfd, (void __user *)ioctl_param,
  362. sizeof(ioeventfd)))
  363. return -EFAULT;
  364. if (ioeventfd.reserved != 0)
  365. return -EINVAL;
  366. ret = acrn_ioeventfd_config(vm, &ioeventfd);
  367. break;
  368. case ACRN_IOCTL_IRQFD:
  369. if (copy_from_user(&irqfd, (void __user *)ioctl_param,
  370. sizeof(irqfd)))
  371. return -EFAULT;
  372. ret = acrn_irqfd_config(vm, &irqfd);
  373. break;
  374. default:
  375. dev_dbg(acrn_dev.this_device, "Unknown IOCTL 0x%x!\n", cmd);
  376. ret = -ENOTTY;
  377. }
  378. return ret;
  379. }
  380. static int acrn_dev_release(struct inode *inode, struct file *filp)
  381. {
  382. struct acrn_vm *vm = filp->private_data;
  383. acrn_vm_destroy(vm);
  384. kfree(vm);
  385. return 0;
  386. }
  387. static ssize_t remove_cpu_store(struct device *dev,
  388. struct device_attribute *attr,
  389. const char *buf, size_t count)
  390. {
  391. u64 cpu, lapicid;
  392. int ret;
  393. if (kstrtoull(buf, 0, &cpu) < 0)
  394. return -EINVAL;
  395. if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
  396. return -EINVAL;
  397. if (cpu_online(cpu))
  398. remove_cpu(cpu);
  399. lapicid = cpu_data(cpu).apicid;
  400. dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
  401. ret = hcall_sos_remove_cpu(lapicid);
  402. if (ret < 0) {
  403. dev_err(dev, "Failed to remove cpu %lld!\n", cpu);
  404. goto fail_remove;
  405. }
  406. return count;
  407. fail_remove:
  408. add_cpu(cpu);
  409. return ret;
  410. }
  411. static DEVICE_ATTR_WO(remove_cpu);
  412. static umode_t acrn_attr_visible(struct kobject *kobj, struct attribute *a, int n)
  413. {
  414. if (a == &dev_attr_remove_cpu.attr)
  415. return IS_ENABLED(CONFIG_HOTPLUG_CPU) ? a->mode : 0;
  416. return a->mode;
  417. }
  418. static struct attribute *acrn_attrs[] = {
  419. &dev_attr_remove_cpu.attr,
  420. NULL
  421. };
  422. static struct attribute_group acrn_attr_group = {
  423. .attrs = acrn_attrs,
  424. .is_visible = acrn_attr_visible,
  425. };
  426. static const struct attribute_group *acrn_attr_groups[] = {
  427. &acrn_attr_group,
  428. NULL
  429. };
  430. static const struct file_operations acrn_fops = {
  431. .owner = THIS_MODULE,
  432. .open = acrn_dev_open,
  433. .release = acrn_dev_release,
  434. .unlocked_ioctl = acrn_dev_ioctl,
  435. };
  436. struct miscdevice acrn_dev = {
  437. .minor = MISC_DYNAMIC_MINOR,
  438. .name = "acrn_hsm",
  439. .fops = &acrn_fops,
  440. .groups = acrn_attr_groups,
  441. };
  442. static int __init hsm_init(void)
  443. {
  444. int ret;
  445. if (x86_hyper_type != X86_HYPER_ACRN)
  446. return -ENODEV;
  447. if (!(cpuid_eax(ACRN_CPUID_FEATURES) & ACRN_FEATURE_PRIVILEGED_VM))
  448. return -EPERM;
  449. ret = misc_register(&acrn_dev);
  450. if (ret) {
  451. pr_err("Create misc dev failed!\n");
  452. return ret;
  453. }
  454. ret = acrn_ioreq_intr_setup();
  455. if (ret) {
  456. pr_err("Setup I/O request handler failed!\n");
  457. misc_deregister(&acrn_dev);
  458. return ret;
  459. }
  460. return 0;
  461. }
  462. static void __exit hsm_exit(void)
  463. {
  464. acrn_ioreq_intr_remove();
  465. misc_deregister(&acrn_dev);
  466. }
  467. module_init(hsm_init);
  468. module_exit(hsm_exit);
  469. MODULE_AUTHOR("Intel Corporation");
  470. MODULE_LICENSE("GPL");
  471. MODULE_DESCRIPTION("ACRN Hypervisor Service Module (HSM)");