mem-buf-dev-gh.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/scatterlist.h>
  8. #include <linux/slab.h>
  9. #include <soc/qcom/secure_buffer.h>
  10. #include "mem-buf-dev.h"
  11. #include "mem-buf-ids.h"
  12. #define CREATE_TRACE_POINTS
  13. #include "trace-mem-buf.h"
  14. EXPORT_TRACEPOINT_SYMBOL(send_alloc_req);
  15. EXPORT_TRACEPOINT_SYMBOL(receive_alloc_req);
  16. EXPORT_TRACEPOINT_SYMBOL(send_relinquish_msg);
  17. EXPORT_TRACEPOINT_SYMBOL(receive_relinquish_msg);
  18. EXPORT_TRACEPOINT_SYMBOL(send_alloc_resp_msg);
  19. EXPORT_TRACEPOINT_SYMBOL(receive_alloc_resp_msg);
  20. EXPORT_TRACEPOINT_SYMBOL(mem_buf_alloc_info);
  21. EXPORT_TRACEPOINT_SYMBOL(send_relinquish_resp_msg);
  22. EXPORT_TRACEPOINT_SYMBOL(receive_relinquish_resp_msg);
  23. struct gh_acl_desc *mem_buf_vmid_perm_list_to_gh_acl(int *vmids, int *perms,
  24. unsigned int nr_acl_entries)
  25. {
  26. struct gh_acl_desc *gh_acl;
  27. size_t size;
  28. unsigned int i;
  29. size = offsetof(struct gh_acl_desc, acl_entries[nr_acl_entries]);
  30. gh_acl = kmalloc(size, GFP_KERNEL);
  31. if (!gh_acl)
  32. return ERR_PTR(-ENOMEM);
  33. gh_acl->n_acl_entries = nr_acl_entries;
  34. for (i = 0; i < nr_acl_entries; i++) {
  35. gh_acl->acl_entries[i].vmid = vmids[i];
  36. gh_acl->acl_entries[i].perms = perms[i];
  37. }
  38. return gh_acl;
  39. }
  40. EXPORT_SYMBOL(mem_buf_vmid_perm_list_to_gh_acl);
  41. struct gh_sgl_desc *mem_buf_sgt_to_gh_sgl_desc(struct sg_table *sgt)
  42. {
  43. struct gh_sgl_desc *gh_sgl;
  44. size_t size;
  45. int i;
  46. struct scatterlist *sg;
  47. /* gh_sgl_desc uses u16. Use struct scatterlist instead in future */
  48. if (WARN(sgt->orig_nents > U16_MAX, "Too many sgl_entries\n"))
  49. return ERR_PTR(-EINVAL);
  50. size = offsetof(struct gh_sgl_desc, sgl_entries[sgt->orig_nents]);
  51. gh_sgl = kvmalloc(size, GFP_KERNEL);
  52. if (!gh_sgl)
  53. return ERR_PTR(-ENOMEM);
  54. gh_sgl->n_sgl_entries = sgt->orig_nents;
  55. for_each_sgtable_sg(sgt, sg, i) {
  56. gh_sgl->sgl_entries[i].ipa_base = sg_phys(sg);
  57. gh_sgl->sgl_entries[i].size = sg->length;
  58. }
  59. return gh_sgl;
  60. }
  61. EXPORT_SYMBOL(mem_buf_sgt_to_gh_sgl_desc);
  62. int mem_buf_gh_acl_desc_to_vmid_perm_list(struct gh_acl_desc *acl_desc,
  63. int **vmids, int **perms)
  64. {
  65. int *vmids_arr = NULL, *perms_arr = NULL;
  66. u32 nr_acl_entries = acl_desc->n_acl_entries;
  67. unsigned int i;
  68. if (!vmids || !perms)
  69. return -EINVAL;
  70. vmids_arr = kmalloc_array(nr_acl_entries, sizeof(*vmids_arr),
  71. GFP_KERNEL);
  72. if (!vmids_arr)
  73. return -ENOMEM;
  74. perms_arr = kmalloc_array(nr_acl_entries, sizeof(*perms_arr),
  75. GFP_KERNEL);
  76. if (!perms_arr) {
  77. kfree(vmids_arr);
  78. return -ENOMEM;
  79. }
  80. *vmids = vmids_arr;
  81. *perms = perms_arr;
  82. for (i = 0; i < nr_acl_entries; i++) {
  83. vmids_arr[i] = acl_desc->acl_entries[i].vmid;
  84. perms_arr[i] = acl_desc->acl_entries[i].perms;
  85. }
  86. return 0;
  87. }
  88. EXPORT_SYMBOL(mem_buf_gh_acl_desc_to_vmid_perm_list);
  89. struct sg_table *dup_gh_sgl_desc_to_sgt(struct gh_sgl_desc *sgl_desc)
  90. {
  91. struct sg_table *new_table;
  92. int ret, i;
  93. struct scatterlist *sg;
  94. if (!sgl_desc || !sgl_desc->n_sgl_entries)
  95. return ERR_PTR(-EINVAL);
  96. new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
  97. if (!new_table)
  98. return ERR_PTR(-ENOMEM);
  99. ret = sg_alloc_table(new_table, sgl_desc->n_sgl_entries, GFP_KERNEL);
  100. if (ret) {
  101. kfree(new_table);
  102. return ERR_PTR(-ENOMEM);
  103. }
  104. for_each_sg(new_table->sgl, sg, new_table->nents, i) {
  105. sg_set_page(sg, phys_to_page(sgl_desc->sgl_entries[i].ipa_base),
  106. sgl_desc->sgl_entries[i].size, 0);
  107. sg_dma_address(sg) = 0;
  108. sg_dma_len(sg) = 0;
  109. }
  110. return new_table;
  111. }
  112. EXPORT_SYMBOL(dup_gh_sgl_desc_to_sgt);
  113. struct gh_sgl_desc *dup_gh_sgl_desc(struct gh_sgl_desc *sgl_desc)
  114. {
  115. size_t size;
  116. struct gh_sgl_desc *copy;
  117. if (!sgl_desc)
  118. return NULL;
  119. size = offsetof(struct gh_sgl_desc, sgl_entries[sgl_desc->n_sgl_entries]);
  120. copy = kvmalloc(size, GFP_KERNEL);
  121. if (!copy)
  122. return ERR_PTR(-ENOMEM);
  123. memcpy(copy, sgl_desc, size);
  124. return copy;
  125. }
  126. EXPORT_SYMBOL(dup_gh_sgl_desc);
  127. size_t mem_buf_get_sgl_buf_size(struct gh_sgl_desc *sgl_desc)
  128. {
  129. size_t size = 0;
  130. unsigned int i;
  131. for (i = 0; i < sgl_desc->n_sgl_entries; i++)
  132. size += sgl_desc->sgl_entries[i].size;
  133. return size;
  134. }
  135. EXPORT_SYMBOL(mem_buf_get_sgl_buf_size);
  136. static int __mem_buf_map_mem_s2_cleanup_donate(struct gh_sgl_desc *sgl_desc,
  137. int src_vmid, gh_memparcel_handle_t *handle)
  138. {
  139. int ret;
  140. int src_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
  141. struct mem_buf_lend_kernel_arg arg = {
  142. .nr_acl_entries = 1,
  143. .vmids = &src_vmid,
  144. .perms = &src_perms,
  145. .flags = 0, //No sanitize as buffer unmodified.
  146. .label = 0,
  147. };
  148. struct sg_table *sgt;
  149. sgt = dup_gh_sgl_desc_to_sgt(sgl_desc);
  150. if (IS_ERR(sgt))
  151. return PTR_ERR(sgt);
  152. ret = mem_buf_assign_mem_gunyah(GH_RM_TRANS_TYPE_DONATE, sgt, &arg);
  153. if (!ret)
  154. *handle = arg.memparcel_hdl;
  155. sg_free_table(sgt);
  156. kfree(sgt);
  157. return ret;
  158. }
  159. static int mem_buf_hyp_assign_table_gh(struct gh_sgl_desc *sgl_desc, int src_vmid,
  160. struct gh_acl_desc *acl_desc);
  161. /*
  162. * @memparcel_hdl:
  163. * GH_RM_TRANS_TYPE_DONATE - memparcel_hdl will be set to MEM_BUF_MEMPARCEL_INVALID
  164. * on success, and (possibly) set to a different valid memparcel on error. This is
  165. * because accepting a donated memparcel handle destroys that handle.
  166. * GH_RM_TRANS_TYPE_LEND - unmodified.
  167. * GH_RM_TRANS_TYPE_SHARE - unmodified.
  168. * @sgl_desc:
  169. * If *sgl_desc is not NULL, request specific IPA address(es). Otherwise, hypervisor
  170. * will choose the IPA address, and return it here.
  171. */
  172. int mem_buf_map_mem_s2(u32 op, gh_memparcel_handle_t *__memparcel_hdl,
  173. struct gh_acl_desc *acl_desc, struct gh_sgl_desc **__sgl_desc,
  174. int src_vmid)
  175. {
  176. int ret, ret2;
  177. struct gh_sgl_desc *sgl_desc;
  178. u8 flags = GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
  179. GH_RM_MEM_ACCEPT_DONE;
  180. gh_memparcel_handle_t memparcel_hdl = *__memparcel_hdl;
  181. if (!acl_desc || !__sgl_desc)
  182. return -EINVAL;
  183. /*
  184. * memory returns to its original IPA address when accepted by HLOS. For example,
  185. * scattered memory returns to being scattered memory.
  186. */
  187. if (current_vmid != VMID_HLOS || (*__sgl_desc && (*__sgl_desc)->n_sgl_entries == 1))
  188. flags |= GH_RM_MEM_ACCEPT_MAP_IPA_CONTIGUOUS;
  189. pr_debug("%s: adding CPU MMU stage 2 mappings\n", __func__);
  190. sgl_desc = gh_rm_mem_accept(memparcel_hdl, GH_RM_MEM_TYPE_NORMAL, op,
  191. flags, 0, acl_desc, *__sgl_desc,
  192. NULL, 0);
  193. if (IS_ERR(sgl_desc)) {
  194. pr_err("%s failed to map memory in stage 2 rc: %d\n", __func__,
  195. PTR_ERR(sgl_desc));
  196. return PTR_ERR(sgl_desc);
  197. }
  198. if (op == GH_RM_TRANS_TYPE_DONATE)
  199. *__memparcel_hdl = MEM_BUF_MEMPARCEL_INVALID;
  200. ret = mem_buf_hyp_assign_table_gh(sgl_desc, src_vmid, acl_desc);
  201. if (ret)
  202. goto err_relinquish;
  203. trace_map_mem_s2(memparcel_hdl, sgl_desc);
  204. *__sgl_desc = sgl_desc;
  205. return 0;
  206. err_relinquish:
  207. if (op == GH_RM_TRANS_TYPE_DONATE)
  208. ret2 = __mem_buf_map_mem_s2_cleanup_donate(sgl_desc, src_vmid,
  209. __memparcel_hdl);
  210. else
  211. ret2 = mem_buf_unmap_mem_s2(memparcel_hdl);
  212. /*
  213. * Only free sgl_desc if caller passed NULL in *__sgl_desc to request
  214. * gh_rm_mem_accept to allocate new IPA/sgl_desc.
  215. */
  216. if (sgl_desc != *__sgl_desc)
  217. kvfree(sgl_desc);
  218. if (ret2) {
  219. pr_err("%s failed to recover\n", __func__);
  220. return -EADDRNOTAVAIL;
  221. }
  222. return ret;
  223. }
  224. EXPORT_SYMBOL(mem_buf_map_mem_s2);
  225. int mem_buf_unmap_mem_s2(gh_memparcel_handle_t memparcel_hdl)
  226. {
  227. int ret;
  228. pr_debug("%s: removing CPU MMU stage 2 mappings\n", __func__);
  229. ret = gh_rm_mem_release(memparcel_hdl, 0);
  230. if (ret < 0)
  231. pr_err("%s: Failed to release memparcel hdl: 0x%lx rc: %d\n",
  232. __func__, memparcel_hdl, ret);
  233. else
  234. pr_debug("%s: CPU MMU stage 2 mappings removed\n", __func__);
  235. return ret;
  236. }
  237. EXPORT_SYMBOL(mem_buf_unmap_mem_s2);
  238. int mem_buf_map_mem_s1(struct gh_sgl_desc *sgl_desc)
  239. {
  240. u64 base, size;
  241. int i, ret;
  242. for (i = 0; i < sgl_desc->n_sgl_entries; i++) {
  243. base = sgl_desc->sgl_entries[i].ipa_base;
  244. size = sgl_desc->sgl_entries[i].size;
  245. ret = add_memory_subsection(numa_node_id(), base, size);
  246. if (ret) {
  247. pr_err("%s: failed to add memory base=%llx, size=%llx, ret=%d\n",
  248. __func__, base, size, ret);
  249. goto out;
  250. }
  251. }
  252. return 0;
  253. out:
  254. for (i--; i >= 0; i--) {
  255. base = sgl_desc->sgl_entries[i].ipa_base;
  256. size = sgl_desc->sgl_entries[i].size;
  257. remove_memory_subsection(base, size);
  258. }
  259. return ret;
  260. }
  261. EXPORT_SYMBOL(mem_buf_map_mem_s1);
  262. int mem_buf_unmap_mem_s1(struct gh_sgl_desc *sgl_desc)
  263. {
  264. u64 base, size;
  265. int i, ret = 0;
  266. for (i = 0; i < sgl_desc->n_sgl_entries; i++) {
  267. base = sgl_desc->sgl_entries[i].ipa_base;
  268. size = sgl_desc->sgl_entries[i].size;
  269. ret = remove_memory_subsection(base, size);
  270. if (ret)
  271. pr_err("%s: failed to remove memory base=%llx, size=%llx\n, ret=%d\n",
  272. __func__, base, size, ret);
  273. }
  274. return ret;
  275. }
  276. EXPORT_SYMBOL(mem_buf_unmap_mem_s1);
  277. static int mem_buf_hyp_assign_table_gh(struct gh_sgl_desc *sgl_desc, int src_vmid,
  278. struct gh_acl_desc *acl_desc)
  279. {
  280. struct sg_table *sgt;
  281. int *dst_vmids, *dst_perms;
  282. int ret;
  283. sgt = dup_gh_sgl_desc_to_sgt(sgl_desc);
  284. if (IS_ERR(sgt))
  285. return PTR_ERR(sgt);
  286. ret = mem_buf_gh_acl_desc_to_vmid_perm_list(acl_desc, &dst_vmids, &dst_perms);
  287. if (ret)
  288. goto err_free_sgt;
  289. ret = mem_buf_hyp_assign_table(sgt, &src_vmid, 1, dst_vmids, dst_perms,
  290. acl_desc->n_acl_entries);
  291. kfree(dst_vmids);
  292. kfree(dst_perms);
  293. err_free_sgt:
  294. sg_free_table(sgt);
  295. kfree(sgt);
  296. return ret;
  297. }
  298. int mem_buf_assign_mem_gunyah(u32 op, struct sg_table *sgt,
  299. struct mem_buf_lend_kernel_arg *arg)
  300. {
  301. int ret;
  302. struct gh_sgl_desc *gh_sgl;
  303. struct gh_acl_desc *gh_acl;
  304. arg->memparcel_hdl = MEM_BUF_MEMPARCEL_INVALID;
  305. ret = mem_buf_vm_uses_gunyah(arg->vmids, arg->nr_acl_entries);
  306. if (ret <= 0)
  307. return ret;
  308. gh_sgl = mem_buf_sgt_to_gh_sgl_desc(sgt);
  309. if (IS_ERR(gh_sgl))
  310. return PTR_ERR(gh_sgl);
  311. gh_acl = mem_buf_vmid_perm_list_to_gh_acl(arg->vmids, arg->perms,
  312. arg->nr_acl_entries);
  313. if (IS_ERR(gh_acl)) {
  314. ret = PTR_ERR(gh_acl);
  315. goto err_gh_acl;
  316. }
  317. pr_debug("%s: Invoking Gunyah Lend/Share\n", __func__);
  318. if (op == GH_RM_TRANS_TYPE_LEND) {
  319. ret = ghd_rm_mem_lend(GH_RM_MEM_TYPE_NORMAL, arg->flags,
  320. arg->label, gh_acl, gh_sgl,
  321. NULL /* Default memory attributes */,
  322. &arg->memparcel_hdl);
  323. } else if (op == GH_RM_TRANS_TYPE_SHARE) {
  324. ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, arg->flags,
  325. arg->label, gh_acl, gh_sgl,
  326. NULL /* Default memory attributes */,
  327. &arg->memparcel_hdl);
  328. } else if (op == GH_RM_TRANS_TYPE_DONATE) {
  329. ret = gh_rm_mem_donate(GH_RM_MEM_TYPE_NORMAL, arg->flags,
  330. arg->label, gh_acl, gh_sgl,
  331. NULL /* Default memory attributes */,
  332. &arg->memparcel_hdl);
  333. } else {
  334. pr_err("%s: Unrecognized op %d\n", __func__, op);
  335. ret = -EINVAL;
  336. }
  337. if (ret < 0) {
  338. pr_err("%s: Gunyah lend/share failed rc:%d\n",
  339. __func__, ret);
  340. goto err_gunyah;
  341. }
  342. kfree(gh_acl);
  343. kvfree(gh_sgl);
  344. return 0;
  345. err_gunyah:
  346. kfree(gh_acl);
  347. err_gh_acl:
  348. kvfree(gh_sgl);
  349. return ret;
  350. }
  351. int mem_buf_unassign_mem_gunyah(gh_memparcel_handle_t memparcel_hdl)
  352. {
  353. int ret;
  354. pr_debug("%s: Beginning gunyah reclaim\n", __func__);
  355. ret = ghd_rm_mem_reclaim(memparcel_hdl, 0);
  356. if (ret) {
  357. pr_err("%s: Gunyah reclaim failed\n", __func__);
  358. return ret;
  359. }
  360. pr_debug("%s: Finished gunyah reclaim\n", __func__);
  361. return ret;
  362. }