rsc_mgr_rpc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/gunyah_rsc_mgr.h>
  6. #include "rsc_mgr.h"
  7. /* Message IDs: Memory Management */
  8. #define GH_RM_RPC_MEM_LEND 0x51000012
  9. #define GH_RM_RPC_MEM_SHARE 0x51000013
  10. #define GH_RM_RPC_MEM_RECLAIM 0x51000015
  11. #define GH_RM_RPC_MEM_APPEND 0x51000018
  12. /* Message IDs: VM Management */
  13. #define GH_RM_RPC_VM_ALLOC_VMID 0x56000001
  14. #define GH_RM_RPC_VM_DEALLOC_VMID 0x56000002
  15. #define GH_RM_RPC_VM_START 0x56000004
  16. #define GH_RM_RPC_VM_STOP 0x56000005
  17. #define GH_RM_RPC_VM_RESET 0x56000006
  18. #define GH_RM_RPC_VM_CONFIG_IMAGE 0x56000009
  19. #define GH_RM_RPC_VM_INIT 0x5600000B
  20. #define GH_RM_RPC_VM_GET_HYP_RESOURCES 0x56000020
  21. #define GH_RM_RPC_VM_GET_VMID 0x56000024
  22. #define GH_RM_RPC_VM_SET_FIRMWARE_MEM 0x56000032
  23. struct gh_rm_vm_common_vmid_req {
  24. __le16 vmid;
  25. __le16 _padding;
  26. } __packed;
  27. /* Call: MEM_LEND, MEM_SHARE */
  28. #define GH_MEM_SHARE_REQ_FLAGS_APPEND BIT(1)
  29. struct gh_rm_mem_share_req_header {
  30. u8 mem_type;
  31. u8 _padding0;
  32. u8 flags;
  33. u8 _padding1;
  34. __le32 label;
  35. } __packed;
  36. struct gh_rm_mem_share_req_acl_section {
  37. __le32 n_entries;
  38. struct gh_rm_mem_acl_entry entries[];
  39. };
  40. struct gh_rm_mem_share_req_mem_section {
  41. __le16 n_entries;
  42. __le16 _padding;
  43. struct gh_rm_mem_entry entries[];
  44. };
  45. /* Call: MEM_RELEASE */
  46. struct gh_rm_mem_release_req {
  47. __le32 mem_handle;
  48. u8 flags; /* currently not used */
  49. u8 _padding0;
  50. __le16 _padding1;
  51. } __packed;
  52. /* Call: MEM_APPEND */
  53. #define GH_MEM_APPEND_REQ_FLAGS_END BIT(0)
  54. struct gh_rm_mem_append_req_header {
  55. __le32 mem_handle;
  56. u8 flags;
  57. u8 _padding0;
  58. __le16 _padding1;
  59. } __packed;
  60. /* Call: VM_ALLOC */
  61. struct gh_rm_vm_alloc_vmid_resp {
  62. __le16 vmid;
  63. __le16 _padding;
  64. } __packed;
  65. /* Call: VM_STOP */
  66. #define GH_RM_VM_STOP_FLAG_FORCE_STOP BIT(0)
  67. #define GH_RM_VM_STOP_REASON_FORCE_STOP 3
  68. struct gh_rm_vm_stop_req {
  69. __le16 vmid;
  70. u8 flags;
  71. u8 _padding;
  72. __le32 stop_reason;
  73. } __packed;
  74. /* Call: VM_CONFIG_IMAGE */
  75. struct gh_rm_vm_config_image_req {
  76. __le16 vmid;
  77. __le16 auth_mech;
  78. __le32 mem_handle;
  79. __le64 image_offset;
  80. __le64 image_size;
  81. __le64 dtb_offset;
  82. __le64 dtb_size;
  83. } __packed;
  84. /* Call: VM_SET_FIRMWARE_MEM */
  85. struct gh_vm_set_firmware_mem_req {
  86. __le16 vmid;
  87. __le16 reserved;
  88. __le32 mem_handle;
  89. __le64 fw_offset;
  90. __le64 fw_size;
  91. } __packed;
  92. #define GH_RM_MAX_MEM_ENTRIES 512
  93. /*
  94. * Several RM calls take only a VMID as a parameter and give only standard
  95. * response back. Deduplicate boilerplate code by using this common call.
  96. */
  97. static int gh_rm_common_vmid_call(struct gh_rm *rm, u32 message_id, u16 vmid)
  98. {
  99. struct gh_rm_vm_common_vmid_req req_payload = {
  100. .vmid = cpu_to_le16(vmid),
  101. };
  102. return gh_rm_call(rm, message_id, &req_payload, sizeof(req_payload), NULL, NULL);
  103. }
  104. static int _gh_rm_mem_append(struct gh_rm *rm, u32 mem_handle, bool end_append,
  105. struct gh_rm_mem_entry *mem_entries, size_t n_mem_entries)
  106. {
  107. struct gh_rm_mem_share_req_mem_section *mem_section;
  108. struct gh_rm_mem_append_req_header *req_header;
  109. size_t msg_size = 0;
  110. void *msg;
  111. int ret;
  112. msg_size += sizeof(struct gh_rm_mem_append_req_header);
  113. msg_size += struct_size(mem_section, entries, n_mem_entries);
  114. msg = kzalloc(msg_size, GFP_KERNEL);
  115. if (!msg)
  116. return -ENOMEM;
  117. req_header = msg;
  118. mem_section = (void *)(req_header + 1);
  119. req_header->mem_handle = cpu_to_le32(mem_handle);
  120. if (end_append)
  121. req_header->flags |= GH_MEM_APPEND_REQ_FLAGS_END;
  122. mem_section->n_entries = cpu_to_le16(n_mem_entries);
  123. memcpy(mem_section->entries, mem_entries, sizeof(*mem_entries) * n_mem_entries);
  124. ret = gh_rm_call(rm, GH_RM_RPC_MEM_APPEND, msg, msg_size, NULL, NULL);
  125. kfree(msg);
  126. return ret;
  127. }
  128. static int gh_rm_mem_append(struct gh_rm *rm, u32 mem_handle,
  129. struct gh_rm_mem_entry *mem_entries, size_t n_mem_entries)
  130. {
  131. bool end_append;
  132. int ret = 0;
  133. size_t n;
  134. while (n_mem_entries) {
  135. if (n_mem_entries > GH_RM_MAX_MEM_ENTRIES) {
  136. end_append = false;
  137. n = GH_RM_MAX_MEM_ENTRIES;
  138. } else {
  139. end_append = true;
  140. n = n_mem_entries;
  141. }
  142. ret = _gh_rm_mem_append(rm, mem_handle, end_append, mem_entries, n);
  143. if (ret)
  144. break;
  145. mem_entries += n;
  146. n_mem_entries -= n;
  147. }
  148. return ret;
  149. }
  150. static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_mem_parcel *p)
  151. {
  152. size_t msg_size = 0, initial_mem_entries = p->n_mem_entries, resp_size;
  153. size_t acl_section_size, mem_section_size;
  154. struct gh_rm_mem_share_req_acl_section *acl_section;
  155. struct gh_rm_mem_share_req_mem_section *mem_section;
  156. struct gh_rm_mem_share_req_header *req_header;
  157. u32 *attr_section;
  158. __le32 *resp;
  159. void *msg;
  160. int ret;
  161. if (!p->acl_entries || !p->n_acl_entries || !p->mem_entries || !p->n_mem_entries ||
  162. p->n_acl_entries > U8_MAX || p->mem_handle != GH_MEM_HANDLE_INVAL)
  163. return -EINVAL;
  164. if (initial_mem_entries > GH_RM_MAX_MEM_ENTRIES)
  165. initial_mem_entries = GH_RM_MAX_MEM_ENTRIES;
  166. acl_section_size = struct_size(acl_section, entries, p->n_acl_entries);
  167. mem_section_size = struct_size(mem_section, entries, initial_mem_entries);
  168. /* The format of the message goes:
  169. * request header
  170. * ACL entries (which VMs get what kind of access to this memory parcel)
  171. * Memory entries (list of memory regions to share)
  172. * Memory attributes (currently unused, we'll hard-code the size to 0)
  173. */
  174. msg_size += sizeof(struct gh_rm_mem_share_req_header);
  175. msg_size += acl_section_size;
  176. msg_size += mem_section_size;
  177. msg_size += sizeof(u32); /* for memory attributes, currently unused */
  178. msg = kzalloc(msg_size, GFP_KERNEL);
  179. if (!msg)
  180. return -ENOMEM;
  181. ret = gh_rm_platform_pre_mem_share(rm, p);
  182. if (ret) {
  183. kfree(msg);
  184. return ret;
  185. }
  186. req_header = msg;
  187. acl_section = (void *)req_header + sizeof(*req_header);
  188. mem_section = (void *)acl_section + acl_section_size;
  189. attr_section = (void *)mem_section + mem_section_size;
  190. req_header->mem_type = p->mem_type;
  191. if (initial_mem_entries != p->n_mem_entries)
  192. req_header->flags |= GH_MEM_SHARE_REQ_FLAGS_APPEND;
  193. req_header->label = cpu_to_le32(p->label);
  194. acl_section->n_entries = cpu_to_le32(p->n_acl_entries);
  195. memcpy(acl_section->entries, p->acl_entries,
  196. flex_array_size(acl_section, entries, p->n_acl_entries));
  197. mem_section->n_entries = cpu_to_le16(initial_mem_entries);
  198. memcpy(mem_section->entries, p->mem_entries,
  199. flex_array_size(mem_section, entries, initial_mem_entries));
  200. /* Set n_entries for memory attribute section to 0 */
  201. *attr_section = 0;
  202. ret = gh_rm_call(rm, message_id, msg, msg_size, (void **)&resp, &resp_size);
  203. kfree(msg);
  204. if (ret) {
  205. gh_rm_platform_post_mem_reclaim(rm, p);
  206. return ret;
  207. }
  208. p->mem_handle = le32_to_cpu(*resp);
  209. kfree(resp);
  210. if (initial_mem_entries != p->n_mem_entries) {
  211. ret = gh_rm_mem_append(rm, p->mem_handle,
  212. &p->mem_entries[initial_mem_entries],
  213. p->n_mem_entries - initial_mem_entries);
  214. if (ret) {
  215. gh_rm_mem_reclaim(rm, p);
  216. p->mem_handle = GH_MEM_HANDLE_INVAL;
  217. }
  218. }
  219. return ret;
  220. }
  221. /**
  222. * gh_rm_mem_lend() - Lend memory to other virtual machines.
  223. * @rm: Handle to a Gunyah resource manager
  224. * @parcel: Information about the memory to be lent.
  225. *
  226. * Lending removes Linux's access to the memory while the memory parcel is lent.
  227. */
  228. int gh_rm_mem_lend(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel)
  229. {
  230. return gh_rm_mem_lend_common(rm, GH_RM_RPC_MEM_LEND, parcel);
  231. }
  232. /**
  233. * gh_rm_mem_share() - Share memory with other virtual machines.
  234. * @rm: Handle to a Gunyah resource manager
  235. * @parcel: Information about the memory to be shared.
  236. *
  237. * Sharing keeps Linux's access to the memory while the memory parcel is shared.
  238. */
  239. int gh_rm_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel)
  240. {
  241. return gh_rm_mem_lend_common(rm, GH_RM_RPC_MEM_SHARE, parcel);
  242. }
  243. /**
  244. * gh_rm_mem_reclaim() - Reclaim a memory parcel
  245. * @rm: Handle to a Gunyah resource manager
  246. * @parcel: Information about the memory to be reclaimed.
  247. *
  248. * RM maps the associated memory back into the stage-2 page tables of the owner VM.
  249. */
  250. int gh_rm_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel)
  251. {
  252. struct gh_rm_mem_release_req req = {
  253. .mem_handle = cpu_to_le32(parcel->mem_handle),
  254. };
  255. int ret;
  256. ret = gh_rm_call(rm, GH_RM_RPC_MEM_RECLAIM, &req, sizeof(req), NULL, NULL);
  257. /* Only call the platform mem reclaim hooks if we reclaimed the memory */
  258. if (ret)
  259. return ret;
  260. return gh_rm_platform_post_mem_reclaim(rm, parcel);
  261. }
  262. /**
  263. * gh_rm_vm_set_firmware_mem() - Set the location of firmware for GH_RM_VM_AUTH_QCOM_ANDROID_PVM VMs
  264. * @rm: Handle to a Gunyah resource manager.
  265. * @vmid: VM identifier allocated with gh_rm_alloc_vmid.
  266. * @parcel: Memory parcel where the firmware should be loaded.
  267. * @fw_offset: offset into the memory parcel where the firmware should be loaded.
  268. * @fw_size: Maxmimum size of the fw that can be loaded.
  269. */
  270. int gh_rm_vm_set_firmware_mem(struct gh_rm *rm, u16 vmid, struct gh_rm_mem_parcel *parcel,
  271. u64 fw_offset, u64 fw_size)
  272. {
  273. struct gh_vm_set_firmware_mem_req req = {
  274. .vmid = cpu_to_le16(vmid),
  275. .mem_handle = cpu_to_le32(parcel->mem_handle),
  276. .fw_offset = cpu_to_le64(fw_offset),
  277. .fw_size = cpu_to_le64(fw_size),
  278. };
  279. return gh_rm_call(rm, GH_RM_RPC_VM_SET_FIRMWARE_MEM, &req, sizeof(req), NULL, NULL);
  280. }
  281. EXPORT_SYMBOL_GPL(gh_rm_vm_set_firmware_mem);
  282. /**
  283. * gh_rm_alloc_vmid() - Allocate a new VM in Gunyah. Returns the VM identifier.
  284. * @rm: Handle to a Gunyah resource manager
  285. * @vmid: Use 0 to dynamically allocate a VM. A reserved VMID can be supplied
  286. * to request allocation of a platform-defined VM.
  287. *
  288. * Returns - the allocated VMID or negative value on error
  289. */
  290. int gh_rm_alloc_vmid(struct gh_rm *rm, u16 vmid)
  291. {
  292. struct gh_rm_vm_common_vmid_req req_payload = {
  293. .vmid = cpu_to_le16(vmid),
  294. };
  295. struct gh_rm_vm_alloc_vmid_resp *resp_payload;
  296. size_t resp_size;
  297. void *resp;
  298. int ret;
  299. ret = gh_rm_call(rm, GH_RM_RPC_VM_ALLOC_VMID, &req_payload, sizeof(req_payload), &resp,
  300. &resp_size);
  301. if (ret)
  302. return ret;
  303. if (!vmid) {
  304. resp_payload = resp;
  305. ret = le16_to_cpu(resp_payload->vmid);
  306. kfree(resp);
  307. }
  308. return ret;
  309. }
  310. /**
  311. * gh_rm_dealloc_vmid() - Dispose of a VMID
  312. * @rm: Handle to a Gunyah resource manager
  313. * @vmid: VM identifier allocated with gh_rm_alloc_vmid
  314. */
  315. int gh_rm_dealloc_vmid(struct gh_rm *rm, u16 vmid)
  316. {
  317. return gh_rm_common_vmid_call(rm, GH_RM_RPC_VM_DEALLOC_VMID, vmid);
  318. }
  319. /**
  320. * gh_rm_vm_reset() - Reset a VM's resources
  321. * @rm: Handle to a Gunyah resource manager
  322. * @vmid: VM identifier allocated with gh_rm_alloc_vmid
  323. *
  324. * As part of tearing down the VM, request RM to clean up all the VM resources
  325. * associated with the VM. Only after this, Linux can clean up all the
  326. * references it maintains to resources.
  327. */
  328. int gh_rm_vm_reset(struct gh_rm *rm, u16 vmid)
  329. {
  330. return gh_rm_common_vmid_call(rm, GH_RM_RPC_VM_RESET, vmid);
  331. }
  332. /**
  333. * gh_rm_vm_start() - Move a VM into "ready to run" state
  334. * @rm: Handle to a Gunyah resource manager
  335. * @vmid: VM identifier allocated with gh_rm_alloc_vmid
  336. *
  337. * On VMs which use proxy scheduling, vcpu_run is needed to actually run the VM.
  338. * On VMs which use Gunyah's scheduling, the vCPUs start executing in accordance with Gunyah
  339. * scheduling policies.
  340. */
  341. int gh_rm_vm_start(struct gh_rm *rm, u16 vmid)
  342. {
  343. return gh_rm_common_vmid_call(rm, GH_RM_RPC_VM_START, vmid);
  344. }
  345. /**
  346. * gh_rm_vm_stop() - Send a request to Resource Manager VM to forcibly stop a VM.
  347. * @rm: Handle to a Gunyah resource manager
  348. * @vmid: VM identifier allocated with gh_rm_alloc_vmid
  349. */
  350. int gh_rm_vm_stop(struct gh_rm *rm, u16 vmid)
  351. {
  352. struct gh_rm_vm_stop_req req_payload = {
  353. .vmid = cpu_to_le16(vmid),
  354. .flags = GH_RM_VM_STOP_FLAG_FORCE_STOP,
  355. .stop_reason = cpu_to_le32(GH_RM_VM_STOP_REASON_FORCE_STOP),
  356. };
  357. return gh_rm_call(rm, GH_RM_RPC_VM_STOP, &req_payload, sizeof(req_payload), NULL, NULL);
  358. }
  359. /**
  360. * gh_rm_vm_configure() - Prepare a VM to start and provide the common
  361. * configuration needed by RM to configure a VM
  362. * @rm: Handle to a Gunyah resource manager
  363. * @vmid: VM identifier allocated with gh_rm_alloc_vmid
  364. * @auth_mechanism: Authentication mechanism used by resource manager to verify
  365. * the virtual machine
  366. * @mem_handle: Handle to a previously shared memparcel that contains all parts
  367. * of the VM image subject to authentication.
  368. * @image_offset: Start address of VM image, relative to the start of memparcel
  369. * @image_size: Size of the VM image
  370. * @dtb_offset: Start address of the devicetree binary with VM configuration,
  371. * relative to start of memparcel.
  372. * @dtb_size: Maximum size of devicetree binary.
  373. */
  374. int gh_rm_vm_configure(struct gh_rm *rm, u16 vmid, enum gh_rm_vm_auth_mechanism auth_mechanism,
  375. u32 mem_handle, u64 image_offset, u64 image_size, u64 dtb_offset, u64 dtb_size)
  376. {
  377. struct gh_rm_vm_config_image_req req_payload = {
  378. .vmid = cpu_to_le16(vmid),
  379. .auth_mech = cpu_to_le16(auth_mechanism),
  380. .mem_handle = cpu_to_le32(mem_handle),
  381. .image_offset = cpu_to_le64(image_offset),
  382. .image_size = cpu_to_le64(image_size),
  383. .dtb_offset = cpu_to_le64(dtb_offset),
  384. .dtb_size = cpu_to_le64(dtb_size),
  385. };
  386. return gh_rm_call(rm, GH_RM_RPC_VM_CONFIG_IMAGE, &req_payload, sizeof(req_payload),
  387. NULL, NULL);
  388. }
  389. /**
  390. * gh_rm_vm_init() - Move the VM to initialized state.
  391. * @rm: Handle to a Gunyah resource manager
  392. * @vmid: VM identifier
  393. *
  394. * RM will allocate needed resources for the VM.
  395. */
  396. int gh_rm_vm_init(struct gh_rm *rm, u16 vmid)
  397. {
  398. return gh_rm_common_vmid_call(rm, GH_RM_RPC_VM_INIT, vmid);
  399. }
  400. /**
  401. * gh_rm_get_hyp_resources() - Retrieve hypervisor resources (capabilities) associated with a VM
  402. * @rm: Handle to a Gunyah resource manager
  403. * @vmid: VMID of the other VM to get the resources of
  404. * @resources: Set by gh_rm_get_hyp_resources and contains the returned hypervisor resources.
  405. * Caller must free the resources pointer if successful.
  406. */
  407. int gh_rm_get_hyp_resources(struct gh_rm *rm, u16 vmid,
  408. struct gh_rm_hyp_resources **resources)
  409. {
  410. struct gh_rm_vm_common_vmid_req req_payload = {
  411. .vmid = cpu_to_le16(vmid),
  412. };
  413. struct gh_rm_hyp_resources *resp;
  414. size_t resp_size;
  415. int ret;
  416. ret = gh_rm_call(rm, GH_RM_RPC_VM_GET_HYP_RESOURCES,
  417. &req_payload, sizeof(req_payload),
  418. (void **)&resp, &resp_size);
  419. if (ret)
  420. return ret;
  421. if (!resp_size)
  422. return -EBADMSG;
  423. if (resp_size < struct_size(resp, entries, 0) ||
  424. resp_size != struct_size(resp, entries, le32_to_cpu(resp->n_entries))) {
  425. kfree(resp);
  426. return -EBADMSG;
  427. }
  428. *resources = resp;
  429. return 0;
  430. }
  431. /**
  432. * gh_rm_get_vmid() - Retrieve VMID of this virtual machine
  433. * @rm: Handle to a Gunyah resource manager
  434. * @vmid: Filled with the VMID of this VM
  435. */
  436. int gh_rm_get_vmid(struct gh_rm *rm, u16 *vmid)
  437. {
  438. static u16 cached_vmid = GH_VMID_INVAL;
  439. size_t resp_size;
  440. __le32 *resp;
  441. int ret;
  442. if (cached_vmid != GH_VMID_INVAL) {
  443. *vmid = cached_vmid;
  444. return 0;
  445. }
  446. ret = gh_rm_call(rm, GH_RM_RPC_VM_GET_VMID, NULL, 0, (void **)&resp, &resp_size);
  447. if (ret)
  448. return ret;
  449. *vmid = cached_vmid = lower_16_bits(le32_to_cpu(*resp));
  450. kfree(resp);
  451. return ret;
  452. }
  453. EXPORT_SYMBOL_GPL(gh_rm_get_vmid);