gh_main.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/anon_inodes.h>
  7. #include <linux/miscdevice.h>
  8. #include <linux/pagemap.h>
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/slab.h>
  12. #include <linux/file.h>
  13. #include <linux/fs.h>
  14. #include <linux/qcom_scm.h>
  15. #include <soc/qcom/secure_buffer.h>
  16. #include <linux/gunyah_deprecated.h>
  17. #include "gh_secure_vm_virtio_backend.h"
  18. #include "gh_secure_vm_loader.h"
  19. #include "gh_proxy_sched.h"
  20. #include "gh_private.h"
  21. #define MAX_VCPU_NAME 20 /* gh-vcpu:u32_max +1 */
  22. SRCU_NOTIFIER_HEAD_STATIC(gh_vm_notifier);
  23. /*
  24. * Support for RM calls and the wait for change of status
  25. */
  26. #define gh_rm_call_and_set_status(name) \
  27. static int gh_##name(struct gh_vm *vm, int vm_status) \
  28. { \
  29. int ret = 0; \
  30. ret = ghd_rm_##name(vm->vmid); \
  31. if (!ret) \
  32. vm->status.vm_status = vm_status; \
  33. return ret; \
  34. }
  35. gh_rm_call_and_set_status(vm_start);
  36. int gh_register_vm_notifier(struct notifier_block *nb)
  37. {
  38. return srcu_notifier_chain_register(&gh_vm_notifier, nb);
  39. }
  40. EXPORT_SYMBOL(gh_register_vm_notifier);
  41. int gh_unregister_vm_notifier(struct notifier_block *nb)
  42. {
  43. return srcu_notifier_chain_unregister(&gh_vm_notifier, nb);
  44. }
  45. EXPORT_SYMBOL(gh_unregister_vm_notifier);
  46. static void gh_notify_clients(struct gh_vm *vm, unsigned long val)
  47. {
  48. srcu_notifier_call_chain(&gh_vm_notifier, val, &vm->vmid);
  49. }
  50. static void gh_notif_vm_status(struct gh_vm *vm,
  51. struct gh_rm_notif_vm_status_payload *status)
  52. {
  53. if (vm->vmid != status->vmid)
  54. return;
  55. /* Wake up the waiters only if there's a change in any of the states */
  56. if (status->vm_status != vm->status.vm_status &&
  57. (status->vm_status == GH_RM_VM_STATUS_RESET ||
  58. status->vm_status == GH_RM_VM_STATUS_READY)) {
  59. pr_info("VM: %d status %d complete\n", vm->vmid,
  60. status->vm_status);
  61. vm->status.vm_status = status->vm_status;
  62. wake_up_interruptible(&vm->vm_status_wait);
  63. }
  64. }
  65. static void gh_notif_vm_exited(struct gh_vm *vm,
  66. struct gh_rm_notif_vm_exited_payload *vm_exited)
  67. {
  68. if (vm->vmid != vm_exited->vmid)
  69. return;
  70. mutex_lock(&vm->vm_lock);
  71. vm->exit_type = vm_exited->exit_type;
  72. vm->status.vm_status = GH_RM_VM_STATUS_EXITED;
  73. gh_wakeup_all_vcpus(vm->vmid);
  74. wake_up_interruptible(&vm->vm_status_wait);
  75. mutex_unlock(&vm->vm_lock);
  76. }
  77. int gh_wait_for_vm_status(struct gh_vm *vm, int wait_status)
  78. {
  79. int ret = 0;
  80. ret = wait_event_interruptible(vm->vm_status_wait,
  81. vm->status.vm_status == wait_status);
  82. if (ret < 0)
  83. pr_err("Wait for VM_STATUS %d interrupted\n", wait_status);
  84. return ret;
  85. }
  86. static int gh_vm_rm_notifier_fn(struct notifier_block *nb,
  87. unsigned long cmd, void *data)
  88. {
  89. struct gh_vm *vm;
  90. vm = container_of(nb, struct gh_vm, rm_nb);
  91. switch (cmd) {
  92. case GH_RM_NOTIF_VM_STATUS:
  93. gh_notif_vm_status(vm, data);
  94. break;
  95. case GH_RM_NOTIF_VM_EXITED:
  96. gh_notif_vm_exited(vm, data);
  97. break;
  98. }
  99. return NOTIFY_DONE;
  100. }
  101. static void gh_vm_cleanup(struct gh_vm *vm)
  102. {
  103. gh_vmid_t vmid = vm->vmid;
  104. int vm_status = vm->status.vm_status;
  105. int ret;
  106. switch (vm_status) {
  107. case GH_RM_VM_STATUS_EXITED:
  108. case GH_RM_VM_STATUS_RUNNING:
  109. case GH_RM_VM_STATUS_READY:
  110. ret = gh_rm_unpopulate_hyp_res(vmid, vm->fw_name);
  111. if (ret)
  112. pr_warn("Failed to unpopulate hyp resources: %d\n", ret);
  113. ret = gh_virtio_mmio_exit(vmid, vm->fw_name);
  114. if (ret)
  115. pr_warn("Failed to free virtio resources : %d\n", ret);
  116. fallthrough;
  117. case GH_RM_VM_STATUS_INIT:
  118. case GH_RM_VM_STATUS_AUTH:
  119. ret = ghd_rm_vm_reset(vmid);
  120. if (!ret) {
  121. ret = gh_wait_for_vm_status(vm, GH_RM_VM_STATUS_RESET);
  122. if (ret < 0)
  123. pr_err("wait for VM_STATUS_RESET interrupted %d\n", ret);
  124. } else
  125. pr_warn("Reset is unsuccessful for VM:%d\n", vmid);
  126. if (vm->is_secure_vm) {
  127. ret = gh_secure_vm_loader_reclaim_fw(vm);
  128. if (ret)
  129. pr_warn("Failed to reclaim mem VMID: %d: %d\n", vmid, ret);
  130. }
  131. fallthrough;
  132. case GH_RM_VM_STATUS_LOAD:
  133. ret = gh_rm_vm_dealloc_vmid(vmid);
  134. if (ret)
  135. pr_warn("Failed to dealloc VMID: %d: %d\n", vmid, ret);
  136. vm->vmid = 0;
  137. }
  138. vm->status.vm_status = GH_RM_VM_STATUS_NO_STATE;
  139. }
  140. static int gh_exit_vm(struct gh_vm *vm, u32 stop_reason, u8 stop_flags)
  141. {
  142. gh_vmid_t vmid = vm->vmid;
  143. int ret = -EINVAL;
  144. if (!vmid)
  145. return -ENODEV;
  146. mutex_lock(&vm->vm_lock);
  147. if (vm->status.vm_status != GH_RM_VM_STATUS_RUNNING) {
  148. pr_err("VM:%d is not running\n", vmid);
  149. mutex_unlock(&vm->vm_lock);
  150. return -ENODEV;
  151. }
  152. ret = ghd_rm_vm_stop(vmid, stop_reason, stop_flags);
  153. if (ret) {
  154. pr_err("Failed to stop the VM:%d ret %d\n", vmid, ret);
  155. mutex_unlock(&vm->vm_lock);
  156. return ret;
  157. }
  158. mutex_unlock(&vm->vm_lock);
  159. ret = gh_wait_for_vm_status(vm, GH_RM_VM_STATUS_EXITED);
  160. if (ret)
  161. pr_err("VM:%d stop operation is interrupted\n", vmid);
  162. return ret;
  163. }
  164. static int gh_stop_vm(struct gh_vm *vm)
  165. {
  166. gh_vmid_t vmid = vm->vmid;
  167. int ret = -EINVAL;
  168. ret = gh_exit_vm(vm, GH_VM_STOP_RESTART, 0);
  169. if (ret && ret != -ENODEV)
  170. goto err_vm_force_stop;
  171. return ret;
  172. err_vm_force_stop:
  173. ret = gh_exit_vm(vm, GH_VM_STOP_FORCE_STOP,
  174. GH_RM_VM_STOP_FLAG_FORCE_STOP);
  175. if (ret)
  176. pr_err("VM:%d force stop has failed\n", vmid);
  177. return ret;
  178. }
  179. void gh_destroy_vcpu(struct gh_vcpu *vcpu)
  180. {
  181. struct gh_vm *vm = vcpu->vm;
  182. u32 id = vcpu->vcpu_id;
  183. kfree(vcpu);
  184. vm->vcpus[id] = NULL;
  185. vm->created_vcpus--;
  186. }
  187. void gh_destroy_vm(struct gh_vm *vm)
  188. {
  189. int vcpu_id = 0;
  190. if (vm->status.vm_status == GH_RM_VM_STATUS_NO_STATE)
  191. goto clean_vm;
  192. gh_stop_vm(vm);
  193. while (vm->created_vcpus && vcpu_id < GH_MAX_VCPUS) {
  194. if (!vm->vcpus[vcpu_id])
  195. continue;
  196. gh_destroy_vcpu(vm->vcpus[vcpu_id]);
  197. vcpu_id++;
  198. }
  199. gh_notify_clients(vm, GH_VM_EARLY_POWEROFF);
  200. gh_vm_cleanup(vm);
  201. gh_uevent_notify_change(GH_EVENT_DESTROY_VM, vm);
  202. gh_notify_clients(vm, GH_VM_POWEROFF);
  203. memset(vm->fw_name, 0, GH_VM_FW_NAME_MAX);
  204. clean_vm:
  205. gh_rm_unregister_notifier(&vm->rm_nb);
  206. mutex_destroy(&vm->vm_lock);
  207. kfree(vm);
  208. }
  209. static void gh_get_vm(struct gh_vm *vm)
  210. {
  211. refcount_inc(&vm->users_count);
  212. }
  213. static void gh_put_vm(struct gh_vm *vm)
  214. {
  215. if (refcount_dec_and_test(&vm->users_count))
  216. gh_destroy_vm(vm);
  217. }
  218. static int gh_vcpu_release(struct inode *inode, struct file *filp)
  219. {
  220. struct gh_vcpu *vcpu = filp->private_data;
  221. /* need to create workqueue if critical vm */
  222. if (vcpu->vm->keep_running)
  223. gh_vcpu_create_wq(vcpu->vm->vmid, vcpu->vcpu_id);
  224. else
  225. gh_put_vm(vcpu->vm);
  226. return 0;
  227. }
  228. static int gh_vcpu_ioctl_run(struct gh_vcpu *vcpu)
  229. {
  230. struct gh_hcall_vcpu_run_resp vcpu_run;
  231. struct gh_vm *vm = vcpu->vm;
  232. int ret = 0;
  233. mutex_lock(&vm->vm_lock);
  234. if (vm->status.vm_status == GH_RM_VM_STATUS_RUNNING) {
  235. mutex_unlock(&vm->vm_lock);
  236. goto start_vcpu_run;
  237. }
  238. if (vm->vm_run_once &&
  239. vm->status.vm_status != GH_RM_VM_STATUS_RUNNING) {
  240. pr_err("VM:%d has failed to run before\n", vm->vmid);
  241. mutex_unlock(&vm->vm_lock);
  242. return -EINVAL;
  243. }
  244. vm->vm_run_once = true;
  245. if (vm->is_secure_vm &&
  246. vm->created_vcpus != vm->allowed_vcpus) {
  247. pr_err("VCPUs created %d doesn't match with allowed %d for VM %d\n",
  248. vm->created_vcpus, vm->allowed_vcpus,
  249. vm->vmid);
  250. ret = -EINVAL;
  251. mutex_unlock(&vm->vm_lock);
  252. return ret;
  253. }
  254. if (vm->status.vm_status != GH_RM_VM_STATUS_READY) {
  255. pr_err("VM:%d not ready to start\n", vm->vmid);
  256. ret = -EINVAL;
  257. mutex_unlock(&vm->vm_lock);
  258. return ret;
  259. }
  260. gh_notify_clients(vm, GH_VM_BEFORE_POWERUP);
  261. ret = gh_vm_start(vm, GH_RM_VM_STATUS_RUNNING);
  262. if (ret) {
  263. pr_err("Failed to start VM:%d %d\n", vm->vmid, ret);
  264. mutex_unlock(&vm->vm_lock);
  265. goto err_powerup;
  266. }
  267. pr_info("VM:%d started running\n", vm->vmid);
  268. mutex_unlock(&vm->vm_lock);
  269. start_vcpu_run:
  270. /*
  271. * proxy scheduling APIs
  272. */
  273. if (gh_vm_supports_proxy_sched(vm->vmid)) {
  274. ret = gh_vcpu_run(vm->vmid, vcpu->vcpu_id,
  275. 0, 0, 0, &vcpu_run);
  276. if (ret < 0) {
  277. pr_err("Failed vcpu_run %d\n", ret);
  278. return ret;
  279. }
  280. }
  281. ret = gh_wait_for_vm_status(vm, GH_RM_VM_STATUS_EXITED);
  282. if (ret)
  283. return ret;
  284. ret = vm->exit_type;
  285. return ret;
  286. err_powerup:
  287. gh_notify_clients(vm, GH_VM_POWERUP_FAIL);
  288. return ret;
  289. }
  290. static long gh_vcpu_ioctl(struct file *filp,
  291. unsigned int cmd, unsigned long arg)
  292. {
  293. struct gh_vcpu *vcpu = filp->private_data;
  294. int ret = -EINVAL;
  295. switch (cmd) {
  296. case GH_VCPU_RUN:
  297. ret = gh_vcpu_ioctl_run(vcpu);
  298. break;
  299. default:
  300. pr_err("Invalid gunyah VCPU ioctl 0x%lx\n", cmd);
  301. break;
  302. }
  303. return ret;
  304. }
  305. static const struct file_operations gh_vcpu_fops = {
  306. .unlocked_ioctl = gh_vcpu_ioctl,
  307. .release = gh_vcpu_release,
  308. .llseek = noop_llseek,
  309. };
  310. static int gh_vm_ioctl_get_vcpu_count(struct gh_vm *vm)
  311. {
  312. if (!vm->is_secure_vm)
  313. return -EINVAL;
  314. if (vm->status.vm_status != GH_RM_VM_STATUS_READY)
  315. return -EAGAIN;
  316. return vm->allowed_vcpus;
  317. }
  318. static long gh_vm_ioctl_create_vcpu(struct gh_vm *vm, u32 id)
  319. {
  320. struct gh_vcpu *vcpu;
  321. struct file *file;
  322. char name[MAX_VCPU_NAME];
  323. int fd, err = 0;
  324. if (id >= GH_MAX_VCPUS)
  325. return -EINVAL;
  326. mutex_lock(&vm->vm_lock);
  327. if (vm->vcpus[id]) {
  328. err = -EEXIST;
  329. mutex_unlock(&vm->vm_lock);
  330. return err;
  331. }
  332. vcpu = kzalloc(sizeof(*vcpu), GFP_KERNEL);
  333. if (!vcpu) {
  334. err = -ENOMEM;
  335. mutex_unlock(&vm->vm_lock);
  336. return err;
  337. }
  338. vcpu->vcpu_id = id;
  339. vcpu->vm = vm;
  340. fd = get_unused_fd_flags(O_CLOEXEC);
  341. if (fd < 0) {
  342. err = fd;
  343. goto err_destroy_vcpu;
  344. }
  345. snprintf(name, sizeof(name), "gh-vcpu:%d", id);
  346. file = anon_inode_getfile(name, &gh_vcpu_fops, vcpu, O_RDWR);
  347. if (IS_ERR(file)) {
  348. err = PTR_ERR(file);
  349. goto err_put_fd;
  350. }
  351. fd_install(fd, file);
  352. gh_get_vm(vm);
  353. vm->vcpus[id] = vcpu;
  354. vm->created_vcpus++;
  355. mutex_unlock(&vm->vm_lock);
  356. return fd;
  357. err_put_fd:
  358. put_unused_fd(fd);
  359. err_destroy_vcpu:
  360. kfree(vcpu);
  361. mutex_unlock(&vm->vm_lock);
  362. return err;
  363. }
  364. int gh_reclaim_mem(struct gh_vm *vm, phys_addr_t phys,
  365. ssize_t size, bool is_system_vm)
  366. {
  367. int vmid = vm->vmid;
  368. struct qcom_scm_vmperm destVM[1] = {{VMID_HLOS,
  369. PERM_READ | PERM_WRITE | PERM_EXEC}};
  370. u64 srcVM = BIT(vmid);
  371. int ret = 0;
  372. if (!is_system_vm) {
  373. ret = ghd_rm_mem_reclaim(vm->mem_handle, 0);
  374. if (ret)
  375. pr_err("Failed to reclaim memory for %d, %d\n",
  376. vm->vmid, ret);
  377. }
  378. ret = qcom_scm_assign_mem(phys, size, &srcVM, destVM, ARRAY_SIZE(destVM));
  379. if (ret)
  380. pr_err("failed qcom_assign for %pa address of size %zx - subsys VMid %d rc:%d\n",
  381. &phys, size, vmid, ret);
  382. if (vm->ext_region_supported) {
  383. if (!is_system_vm) {
  384. ret = ghd_rm_mem_reclaim(vm->ext_region->ext_mem_handle, 0);
  385. if (ret)
  386. pr_err("Failed to reclaim memory for %d, %d\n",
  387. vm->vmid, ret);
  388. }
  389. ret |= qcom_scm_assign_mem(vm->ext_region->ext_phys,
  390. vm->ext_region->ext_size,
  391. &srcVM, destVM, ARRAY_SIZE(destVM));
  392. if (ret)
  393. pr_err("failed qcom_assign for %pa address of size %zx - subsys VMid %d rc:%d\n",
  394. &vm->ext_region->ext_phys,
  395. vm->ext_region->ext_size, vmid, ret);
  396. }
  397. return ret;
  398. }
  399. int gh_provide_mem(struct gh_vm *vm, phys_addr_t phys,
  400. ssize_t size, bool is_system_vm)
  401. {
  402. gh_vmid_t vmid = vm->vmid;
  403. struct gh_acl_desc *acl_desc;
  404. struct gh_sgl_desc *sgl_desc;
  405. struct qcom_scm_vmperm srcVM[1] = {{VMID_HLOS,
  406. PERM_READ | PERM_WRITE | PERM_EXEC}};
  407. struct qcom_scm_vmperm destVM[1] = {{vmid,
  408. PERM_READ | PERM_WRITE | PERM_EXEC}};
  409. u64 srcvmid = BIT(srcVM[0].vmid);
  410. u64 dstvmid = BIT(destVM[0].vmid);
  411. int ret = 0;
  412. acl_desc = kzalloc(offsetof(struct gh_acl_desc, acl_entries[1]),
  413. GFP_KERNEL);
  414. if (!acl_desc)
  415. return -ENOMEM;
  416. acl_desc->n_acl_entries = 1;
  417. acl_desc->acl_entries[0].vmid = vmid;
  418. acl_desc->acl_entries[0].perms =
  419. GH_RM_ACL_X | GH_RM_ACL_R | GH_RM_ACL_W;
  420. sgl_desc = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]),
  421. GFP_KERNEL);
  422. if (!sgl_desc) {
  423. kfree(acl_desc);
  424. return -ENOMEM;
  425. }
  426. sgl_desc->n_sgl_entries = 1;
  427. sgl_desc->sgl_entries[0].ipa_base = phys;
  428. sgl_desc->sgl_entries[0].size = size;
  429. if (vm->ext_region_supported) {
  430. destVM[0].perm = PERM_READ;
  431. acl_desc->acl_entries[0].perms = GH_RM_ACL_R;
  432. }
  433. ret = qcom_scm_assign_mem(phys, size, &srcvmid, destVM,
  434. ARRAY_SIZE(destVM));
  435. if (ret) {
  436. pr_err("failed qcom_assign for %pa address of size %zx - subsys VMid %d rc:%d\n",
  437. &phys, size, vmid, ret);
  438. goto err_hyp_assign;
  439. }
  440. /*
  441. * A system VM is deemed critical for the functioning of the
  442. * system. The memory donated to this VM can't be reclaimed
  443. * by host OS at any point in time after donating it.
  444. * Whereas any memory lent to a non system VM, can be reclaimed
  445. * when VM terminates.
  446. */
  447. if (is_system_vm) {
  448. ret = gh_rm_mem_donate(GH_RM_MEM_TYPE_NORMAL, 0, 0,
  449. acl_desc, sgl_desc, NULL, &vm->mem_handle);
  450. } else {
  451. if (vm->ext_region_supported)
  452. ret = ghd_rm_mem_lend(GH_RM_MEM_TYPE_NORMAL, 0,
  453. vm->ext_region->ext_label, acl_desc,
  454. sgl_desc, NULL, &vm->ext_region->ext_mem_handle);
  455. else
  456. ret = ghd_rm_mem_lend(GH_RM_MEM_TYPE_NORMAL, 0, 0, acl_desc,
  457. sgl_desc, NULL, &vm->mem_handle);
  458. }
  459. if (ret) {
  460. ret = qcom_scm_assign_mem(phys, size, &dstvmid,
  461. srcVM, ARRAY_SIZE(srcVM));
  462. if (ret)
  463. pr_err("failed qcom_assign for %pa address of size %zx - subsys VMid %d rc:%d\n",
  464. &phys, size, srcVM[0].vmid, ret);
  465. }
  466. err_hyp_assign:
  467. kfree(acl_desc);
  468. kfree(sgl_desc);
  469. return ret;
  470. }
  471. long gh_vm_configure(u16 auth_mech, u64 image_offset,
  472. u64 image_size, u64 dtb_offset, u64 dtb_size,
  473. u32 pas_id, const char *fw_name, struct gh_vm *vm)
  474. {
  475. struct gh_vm_auth_param_entry entry;
  476. long ret = -EINVAL;
  477. int nr_vcpus = 0;
  478. switch (auth_mech) {
  479. case GH_VM_AUTH_PIL_ELF:
  480. ret = gh_rm_vm_config_image(vm->vmid, auth_mech,
  481. vm->mem_handle, image_offset,
  482. image_size, dtb_offset, dtb_size);
  483. if (ret) {
  484. pr_err("VM_CONFIG failed for VM:%d %d\n",
  485. vm->vmid, ret);
  486. return ret;
  487. }
  488. vm->status.vm_status = GH_RM_VM_STATUS_AUTH;
  489. if (!pas_id) {
  490. pr_err("Incorrect pas_id %d for VM:%d\n", pas_id,
  491. vm->vmid);
  492. return -EINVAL;
  493. }
  494. entry.auth_param_type = GH_VM_AUTH_PARAM_PAS_ID;
  495. entry.auth_param = pas_id;
  496. ret = gh_rm_vm_auth_image(vm->vmid, 1, &entry);
  497. if (ret) {
  498. pr_err("VM_AUTH_IMAGE failed for VM:%d %d\n",
  499. vm->vmid, ret);
  500. return ret;
  501. }
  502. vm->status.vm_status = GH_RM_VM_STATUS_INIT;
  503. break;
  504. default:
  505. pr_err("Invalid auth mechanism for VM\n");
  506. return ret;
  507. }
  508. ret = ghd_rm_vm_init(vm->vmid);
  509. if (ret) {
  510. pr_err("VM_INIT_IMAGE failed for VM:%d %d\n",
  511. vm->vmid, ret);
  512. return ret;
  513. }
  514. ret = gh_wait_for_vm_status(vm, GH_RM_VM_STATUS_READY);
  515. if (ret < 0)
  516. pr_err("wait for VM_STATUS_RESET interrupted %d\n", ret);
  517. ret = gh_rm_populate_hyp_res(vm->vmid, fw_name);
  518. if (ret < 0) {
  519. pr_err("Failed to populate resources %d\n", ret);
  520. return ret;
  521. }
  522. if (vm->is_secure_vm) {
  523. nr_vcpus = gh_get_nr_vcpus(vm->vmid);
  524. if (nr_vcpus < 0) {
  525. pr_err("Failed to get vcpu count for vm %d ret%d\n",
  526. vm->vmid, nr_vcpus);
  527. ret = nr_vcpus;
  528. return ret;
  529. } else if (!nr_vcpus) /* Hypervisor scheduled case when at least 1 vcpu is needed */
  530. nr_vcpus = 1;
  531. vm->allowed_vcpus = nr_vcpus;
  532. }
  533. return ret;
  534. }
  535. static long gh_vm_ioctl(struct file *filp,
  536. unsigned int cmd, unsigned long arg)
  537. {
  538. struct gh_vm *vm = filp->private_data;
  539. long ret = -EINVAL;
  540. switch (cmd) {
  541. case GH_CREATE_VCPU:
  542. ret = gh_vm_ioctl_create_vcpu(vm, arg);
  543. break;
  544. case GH_VM_SET_FW_NAME:
  545. ret = gh_vm_ioctl_set_fw_name(vm, arg);
  546. break;
  547. case GH_VM_GET_FW_NAME:
  548. ret = gh_vm_ioctl_get_fw_name(vm, arg);
  549. break;
  550. case GH_VM_GET_VCPU_COUNT:
  551. ret = gh_vm_ioctl_get_vcpu_count(vm);
  552. break;
  553. default:
  554. ret = gh_virtio_backend_ioctl(vm->fw_name, cmd, arg);
  555. break;
  556. }
  557. return ret;
  558. }
  559. static int gh_vm_mmap(struct file *file, struct vm_area_struct *vma)
  560. {
  561. struct gh_vm *vm = file->private_data;
  562. int ret = -EINVAL;
  563. ret = gh_virtio_backend_mmap(vm->fw_name, vma);
  564. return ret;
  565. }
  566. static int gh_vm_release(struct inode *inode, struct file *filp)
  567. {
  568. struct gh_vm *vm = filp->private_data;
  569. if (!vm->keep_running)
  570. gh_put_vm(vm);
  571. return 0;
  572. }
  573. static const struct file_operations gh_vm_fops = {
  574. .unlocked_ioctl = gh_vm_ioctl,
  575. .mmap = gh_vm_mmap,
  576. .release = gh_vm_release,
  577. .llseek = noop_llseek,
  578. };
  579. static struct gh_vm *gh_create_vm(void)
  580. {
  581. struct gh_vm *vm;
  582. struct gh_ext_reg *ext_region;
  583. int ret;
  584. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  585. if (!vm)
  586. return ERR_PTR(-ENOMEM);
  587. ext_region = kzalloc(sizeof(*ext_region), GFP_KERNEL);
  588. if (!ext_region)
  589. return ERR_PTR(-ENOMEM);
  590. vm->ext_region = ext_region;
  591. mutex_init(&vm->vm_lock);
  592. vm->rm_nb.priority = 1;
  593. vm->rm_nb.notifier_call = gh_vm_rm_notifier_fn;
  594. ret = gh_rm_register_notifier(&vm->rm_nb);
  595. if (ret) {
  596. mutex_destroy(&vm->vm_lock);
  597. kfree(vm);
  598. return ERR_PTR(ret);
  599. }
  600. refcount_set(&vm->users_count, 1);
  601. init_waitqueue_head(&vm->vm_status_wait);
  602. vm->status.vm_status = GH_RM_VM_STATUS_NO_STATE;
  603. vm->exit_type = -EINVAL;
  604. return vm;
  605. }
  606. static long gh_dev_ioctl_create_vm(unsigned long arg)
  607. {
  608. struct gh_vm *vm;
  609. struct file *file;
  610. int fd, err;
  611. vm = gh_create_vm();
  612. if (IS_ERR_OR_NULL(vm))
  613. return PTR_ERR(vm);
  614. fd = get_unused_fd_flags(O_CLOEXEC);
  615. if (fd < 0) {
  616. err = fd;
  617. goto err_destroy_vm;
  618. }
  619. file = anon_inode_getfile("gunyah-vm", &gh_vm_fops, vm, O_RDWR);
  620. if (IS_ERR(file)) {
  621. err = PTR_ERR(file);
  622. goto err_put_fd;
  623. }
  624. fd_install(fd, file);
  625. return fd;
  626. err_put_fd:
  627. put_unused_fd(fd);
  628. err_destroy_vm:
  629. gh_put_vm(vm);
  630. return err;
  631. }
  632. static long gh_dev_ioctl(struct file *filp,
  633. unsigned int cmd, unsigned long arg)
  634. {
  635. long ret = -EINVAL;
  636. switch (cmd) {
  637. case GH_CREATE_VM:
  638. ret = gh_dev_ioctl_create_vm(arg);
  639. break;
  640. default:
  641. pr_err("Invalid gunyah dev ioctl 0x%lx\n", cmd);
  642. break;
  643. }
  644. return ret;
  645. }
  646. static const struct file_operations gh_dev_fops = {
  647. .owner = THIS_MODULE,
  648. .unlocked_ioctl = gh_dev_ioctl,
  649. .llseek = noop_llseek,
  650. };
  651. static struct miscdevice gh_dev = {
  652. .name = "qgunyah",
  653. .minor = MISC_DYNAMIC_MINOR,
  654. .fops = &gh_dev_fops,
  655. };
  656. void gh_uevent_notify_change(unsigned int type, struct gh_vm *vm)
  657. {
  658. struct kobj_uevent_env *env;
  659. env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
  660. if (!env)
  661. return;
  662. if (type == GH_EVENT_CREATE_VM)
  663. add_uevent_var(env, "EVENT=create");
  664. else if (type == GH_EVENT_DESTROY_VM) {
  665. add_uevent_var(env, "EVENT=destroy");
  666. add_uevent_var(env, "vm_exit=%d", vm->exit_type);
  667. }
  668. add_uevent_var(env, "vm_name=%s", vm->fw_name);
  669. env->envp[env->envp_idx++] = NULL;
  670. kobject_uevent_env(&gh_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
  671. kfree(env);
  672. }
  673. static int __init gh_init(void)
  674. {
  675. int ret;
  676. ret = gh_secure_vm_loader_init();
  677. if (ret)
  678. pr_err("gunyah: secure loader init failed %d\n", ret);
  679. ret = gh_proxy_sched_init();
  680. if (ret)
  681. pr_err("gunyah: proxy scheduler init failed %d\n", ret);
  682. ret = misc_register(&gh_dev);
  683. if (ret) {
  684. pr_err("gunyah: misc device register failed %d\n", ret);
  685. goto err_gh_init;
  686. }
  687. ret = gh_virtio_backend_init();
  688. if (ret)
  689. pr_err("gunyah: virtio backend init failed %d\n", ret);
  690. return ret;
  691. err_gh_init:
  692. gh_proxy_sched_exit();
  693. gh_secure_vm_loader_exit();
  694. return 0;
  695. }
  696. module_init(gh_init);
  697. static void __exit gh_exit(void)
  698. {
  699. misc_deregister(&gh_dev);
  700. gh_proxy_sched_exit();
  701. gh_secure_vm_loader_exit();
  702. gh_virtio_backend_exit();
  703. }
  704. module_exit(gh_exit);
  705. MODULE_LICENSE("GPL");