mem-buf-gh.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/anon_inodes.h>
  7. #include <linux/gunyah/gh_msgq.h>
  8. #include <linux/kthread.h>
  9. #include <linux/memory_hotplug.h>
  10. #include <linux/module.h>
  11. #include <linux/qcom_dma_heap.h>
  12. #include <linux/qcom_tui_heap.h>
  13. #include <linux/dma-map-ops.h>
  14. #include <linux/cma.h>
  15. #include "../../../../drivers/dma-buf/heaps/qcom_sg_ops.h"
  16. #include "mem-buf-gh.h"
  17. #include "mem-buf-msgq.h"
  18. #include "mem-buf-ids.h"
  19. #include "trace-mem-buf.h"
  20. #define MEM_BUF_MHP_ALIGNMENT (1UL << SUBSECTION_SHIFT)
  21. #define MEM_BUF_TIMEOUT_MS 3500
  22. #define to_rmt_msg(_work) container_of(_work, struct mem_buf_rmt_msg, work)
  23. /* Maintains a list of memory buffers requested from other VMs */
  24. static DEFINE_MUTEX(mem_buf_list_lock);
  25. static LIST_HEAD(mem_buf_list);
  26. /* Data structures for tracking message queue usage. */
  27. static struct workqueue_struct *mem_buf_wq;
  28. static void *mem_buf_msgq_hdl;
  29. /* Maintains a list of memory buffers lent out to other VMs */
  30. static DEFINE_MUTEX(mem_buf_xfer_mem_list_lock);
  31. static LIST_HEAD(mem_buf_xfer_mem_list);
  32. /**
  33. * struct mem_buf_rmt_msg: Represents a message sent from a remote VM
  34. * @msg: A pointer to the message buffer
  35. * @msg_size: The size of the message
  36. * @work: work structure for dispatching the message processing to a worker
  37. * thread, so as to not block the message queue receiving thread.
  38. */
  39. struct mem_buf_rmt_msg {
  40. void *msg;
  41. size_t msg_size;
  42. struct work_struct work;
  43. };
  44. /**
  45. * struct mem_buf_xfer_mem: Represents a memory buffer lent out or transferred
  46. * to another VM.
  47. * @size: The size of the memory buffer
  48. * @mem_type: The type of memory that was allocated and transferred
  49. * @mem_type_data: Data associated with the type of memory
  50. * @mem_sgt: An SG-Table representing the memory transferred
  51. * @secure_alloc: Denotes if the memory was assigned to the targeted VMs as part
  52. * of the allocation step
  53. * @hdl: The memparcel handle associated with the memory
  54. * @trans_type: The type of memory transfer associated with the memory (donation,
  55. * share, lend).
  56. * @entry: List entry for maintaining a list of memory buffers that are lent
  57. * out.
  58. * @nr_acl_entries: The number of VMIDs and permissions associated with the
  59. * memory
  60. * @dst_vmids: The VMIDs that have access to the memory
  61. * @dst_perms: The access permissions for the VMIDs that can access the memory
  62. * @obj_id: Uniquely identifies this object.
  63. */
  64. struct mem_buf_xfer_mem {
  65. size_t size;
  66. enum mem_buf_mem_type mem_type;
  67. void *mem_type_data;
  68. struct sg_table *mem_sgt;
  69. bool secure_alloc;
  70. u32 trans_type;
  71. gh_memparcel_handle_t hdl;
  72. struct list_head entry;
  73. u32 nr_acl_entries;
  74. int *dst_vmids;
  75. int *dst_perms;
  76. u32 obj_id;
  77. };
  78. /**
  79. * struct mem_buf_desc - Internal data structure, which contains information
  80. * about a particular memory buffer.
  81. * @size: The size of the memory buffer
  82. * @acl_desc: A GH ACL descriptor that describes the VMIDs that have access to
  83. * the memory, as well as the permissions each VMID has.
  84. * @sgl_desc: An GH SG-List descriptor that describes the IPAs of the memory
  85. * associated with the memory buffer that was allocated from another VM.
  86. * @memparcel_hdl: The handle associated with the memparcel that represents the
  87. * memory buffer.
  88. * @trans_type: The type of memory transfer associated with the memory (donation,
  89. * share, lend).
  90. * @src_mem_type: The type of memory that was allocated on the remote VM
  91. * @src_data: Memory type specific data used by the remote VM when performing
  92. * the allocation.
  93. * @dst_mem_type: The memory type of the memory buffer on the native VM
  94. * @dst_data: Memory type specific data used by the native VM when adding the
  95. * memory to the system.
  96. * @filp: Pointer to the file structure for the membuf
  97. * @entry: List head for maintaing a list of memory buffers that have been
  98. * provided by remote VMs.
  99. * @obj_id: Uniquely identifies this object.
  100. */
  101. struct mem_buf_desc {
  102. size_t size;
  103. struct gh_acl_desc *acl_desc;
  104. struct gh_sgl_desc *sgl_desc;
  105. gh_memparcel_handle_t memparcel_hdl;
  106. u32 trans_type;
  107. enum mem_buf_mem_type src_mem_type;
  108. void *src_data;
  109. enum mem_buf_mem_type dst_mem_type;
  110. void *dst_data;
  111. struct file *filp;
  112. struct list_head entry;
  113. u32 obj_id;
  114. };
  115. static DEFINE_IDR(mem_buf_obj_idr);
  116. static DEFINE_MUTEX(mem_buf_idr_mutex);
  117. struct mem_buf_xfer_dmaheap_mem {
  118. char name[MEM_BUF_MAX_DMAHEAP_NAME_LEN];
  119. struct dma_buf *dmabuf;
  120. struct dma_buf_attachment *attachment;
  121. };
  122. static int mem_buf_alloc_obj_id(void)
  123. {
  124. int ret;
  125. mutex_lock(&mem_buf_idr_mutex);
  126. ret = idr_alloc_cyclic(&mem_buf_obj_idr, NULL, 0, INT_MAX, GFP_KERNEL);
  127. mutex_unlock(&mem_buf_idr_mutex);
  128. if (ret < 0) {
  129. pr_err("%s: failed to allocate obj id rc: %d\n",
  130. __func__, ret);
  131. return ret;
  132. }
  133. return ret;
  134. }
  135. static void mem_buf_destroy_obj_id(u32 obj_id)
  136. {
  137. mutex_lock(&mem_buf_idr_mutex);
  138. idr_remove(&mem_buf_obj_idr, obj_id);
  139. mutex_unlock(&mem_buf_idr_mutex);
  140. }
  141. /* Functions invoked when treating allocation requests from other VMs. */
  142. static int mem_buf_rmt_alloc_dmaheap_mem(struct mem_buf_xfer_mem *xfer_mem)
  143. {
  144. struct dma_buf *dmabuf;
  145. struct dma_buf_attachment *attachment;
  146. struct sg_table *mem_sgt;
  147. struct mem_buf_xfer_dmaheap_mem *dmaheap_mem_data = xfer_mem->mem_type_data;
  148. int flags = O_RDWR | O_CLOEXEC;
  149. struct dma_heap *heap;
  150. char *name = dmaheap_mem_data->name;
  151. pr_debug("%s: Starting DMAHEAP allocation\n", __func__);
  152. heap = dma_heap_find(name);
  153. if (!heap) {
  154. pr_err("%s no such heap %s\n", __func__, name);
  155. return -EINVAL;
  156. }
  157. dmabuf = dma_heap_buffer_alloc(heap, xfer_mem->size, flags, 0);
  158. if (IS_ERR(dmabuf)) {
  159. pr_err("%s dmaheap_alloc failure sz: 0x%x heap: %s flags: 0x%x rc: %d\n",
  160. __func__, xfer_mem->size, name, flags,
  161. PTR_ERR(dmabuf));
  162. return PTR_ERR(dmabuf);
  163. }
  164. attachment = dma_buf_attach(dmabuf, mem_buf_dev);
  165. if (IS_ERR(attachment)) {
  166. pr_err("%s dma_buf_attach failure rc: %d\n", __func__,
  167. PTR_ERR(attachment));
  168. dma_buf_put(dmabuf);
  169. return PTR_ERR(attachment);
  170. }
  171. mem_sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
  172. if (IS_ERR(mem_sgt)) {
  173. pr_err("%s dma_buf_map_attachment failure rc: %d\n", __func__,
  174. PTR_ERR(mem_sgt));
  175. dma_buf_detach(dmabuf, attachment);
  176. dma_buf_put(dmabuf);
  177. return PTR_ERR(mem_sgt);
  178. }
  179. dmaheap_mem_data->dmabuf = dmabuf;
  180. dmaheap_mem_data->attachment = attachment;
  181. xfer_mem->mem_sgt = mem_sgt;
  182. xfer_mem->secure_alloc = false;
  183. pr_debug("%s: DMAHEAP allocation complete\n", __func__);
  184. return 0;
  185. }
  186. /*
  187. * See __iommu_dma_alloc_pages() @ dma-iommu.c
  188. * GFP_NORETRY allows some direct reclaim for large order pages.
  189. * GFP_ZERO to avoid leaking prior contents to another VM.
  190. */
  191. static int mem_buf_rmt_alloc_pages(struct sg_table *sgt, unsigned int count)
  192. {
  193. int ret, i = 0;
  194. struct page **pages;
  195. size_t size = count << PAGE_SHIFT;
  196. unsigned long order_mask = (1U << MAX_ORDER) - 1;
  197. pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
  198. if (!pages)
  199. return -ENOMEM;
  200. while (count) {
  201. struct page *page = NULL;
  202. unsigned int order_size;
  203. for (order_mask &= (2 << __fls(count)) - 1;
  204. order_mask; order_mask &= ~order_size) {
  205. unsigned int order = __fls(order_mask);
  206. gfp_t alloc_flags = GFP_KERNEL | __GFP_ZERO;
  207. order_size = 1U << order;
  208. if (order_mask > order_size)
  209. alloc_flags |= __GFP_NORETRY | __GFP_NOWARN;
  210. page = alloc_pages(alloc_flags, order);
  211. if (!page)
  212. continue;
  213. if (order)
  214. split_page(page, order);
  215. break;
  216. }
  217. if (!page) {
  218. ret = -ENOMEM;
  219. goto err_alloc_pages;
  220. }
  221. count -= order_size;
  222. while (order_size--)
  223. pages[i++] = page++;
  224. }
  225. count = i;
  226. ret = sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL);
  227. if (ret)
  228. goto err_alloc_table;
  229. kvfree(pages);
  230. return 0;
  231. err_alloc_table:
  232. err_alloc_pages:
  233. while (i--)
  234. __free_page(pages[i]);
  235. kvfree(pages);
  236. return ret;
  237. }
  238. static int mem_buf_rmt_alloc_cma(struct sg_table *sgt, unsigned int count)
  239. {
  240. struct cma *cma;
  241. struct page *page;
  242. int ret;
  243. u32 align;
  244. /*
  245. * For the common case of 4Mb transfer, we want it to be nicely aligned
  246. * to allow for 2Mb block mappings in S2 pagetable.
  247. */
  248. align = min(get_order(count << PAGE_SHIFT), get_order(SZ_2M));
  249. /*
  250. * Don't use dev_get_cma_area() as we don't want to fall back to
  251. * dma_contiguous_default_area.
  252. */
  253. cma = mem_buf_dev->cma_area;
  254. if (!cma)
  255. return -ENOMEM;
  256. ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
  257. if (ret)
  258. return ret;
  259. page = cma_alloc(cma, count, align, false);
  260. if (!page) {
  261. ret = -ENOMEM;
  262. goto err_cma_alloc;
  263. }
  264. sg_set_page(sgt->sgl, page, count << PAGE_SHIFT, 0);
  265. /* Zero memory before transferring to Guest VM */
  266. memset(page_address(page), 0, count << PAGE_SHIFT);
  267. return 0;
  268. err_cma_alloc:
  269. sg_free_table(sgt);
  270. return ret;
  271. }
  272. static int mem_buf_rmt_alloc_buddy_mem(struct mem_buf_xfer_mem *xfer_mem)
  273. {
  274. struct sg_table *sgt;
  275. int ret;
  276. unsigned int count = PAGE_ALIGN(xfer_mem->size) >> PAGE_SHIFT;
  277. pr_debug("%s: Starting DMAHEAP-BUDDY allocation\n", __func__);
  278. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  279. if (!sgt)
  280. return -ENOMEM;
  281. if (mem_buf_dev->cma_area)
  282. ret = mem_buf_rmt_alloc_cma(sgt, count);
  283. else
  284. ret = mem_buf_rmt_alloc_pages(sgt, count);
  285. if (ret)
  286. goto err_alloc_pages;
  287. xfer_mem->mem_sgt = sgt;
  288. xfer_mem->secure_alloc = false;
  289. pr_debug("%s: DMAHEAP-BUDDY allocation complete\n", __func__);
  290. return 0;
  291. err_alloc_pages:
  292. kfree(sgt);
  293. return ret;
  294. }
  295. static int mem_buf_rmt_alloc_mem(struct mem_buf_xfer_mem *xfer_mem)
  296. {
  297. int ret = -EINVAL;
  298. if (xfer_mem->mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
  299. ret = mem_buf_rmt_alloc_dmaheap_mem(xfer_mem);
  300. else if (xfer_mem->mem_type == MEM_BUF_BUDDY_MEM_TYPE)
  301. ret = mem_buf_rmt_alloc_buddy_mem(xfer_mem);
  302. return ret;
  303. }
  304. static void mem_buf_rmt_free_dmaheap_mem(struct mem_buf_xfer_mem *xfer_mem)
  305. {
  306. struct mem_buf_xfer_dmaheap_mem *dmaheap_mem_data = xfer_mem->mem_type_data;
  307. struct dma_buf *dmabuf = dmaheap_mem_data->dmabuf;
  308. struct dma_buf_attachment *attachment = dmaheap_mem_data->attachment;
  309. struct sg_table *mem_sgt = xfer_mem->mem_sgt;
  310. pr_debug("%s: Freeing DMAHEAP memory\n", __func__);
  311. dma_buf_unmap_attachment(attachment, mem_sgt, DMA_BIDIRECTIONAL);
  312. dma_buf_detach(dmabuf, attachment);
  313. dma_buf_put(dmaheap_mem_data->dmabuf);
  314. /*
  315. * No locks should be held at this point, as flush_delayed_fput may call the
  316. * release callbacks of arbitrary files. It should be safe for us since we
  317. * know this function is called only from our recv kthread, so we have control
  318. * over what locks are currently held.
  319. */
  320. flush_delayed_fput();
  321. pr_debug("%s: DMAHEAP memory freed\n", __func__);
  322. }
  323. static void mem_buf_rmt_free_buddy_mem(struct mem_buf_xfer_mem *xfer_mem)
  324. {
  325. struct sg_table *table = xfer_mem->mem_sgt;
  326. struct sg_page_iter sgiter;
  327. bool is_cma;
  328. pr_debug("%s: Freeing DMAHEAP-BUDDY memory\n", __func__);
  329. /* Returns false when called on !cma memory */
  330. is_cma = cma_release(mem_buf_dev->cma_area, sg_page(table->sgl),
  331. table->sgl->length >> PAGE_SHIFT);
  332. if (!is_cma)
  333. for_each_sg_page(table->sgl, &sgiter, table->nents, 0)
  334. __free_page(sg_page_iter_page(&sgiter));
  335. sg_free_table(table);
  336. kfree(table);
  337. pr_debug("%s: DMAHEAP-BUDDY memory freed\n", __func__);
  338. }
  339. static void mem_buf_rmt_free_mem(struct mem_buf_xfer_mem *xfer_mem)
  340. {
  341. if (xfer_mem->mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
  342. mem_buf_rmt_free_dmaheap_mem(xfer_mem);
  343. else if (xfer_mem->mem_type == MEM_BUF_BUDDY_MEM_TYPE)
  344. mem_buf_rmt_free_buddy_mem(xfer_mem);
  345. }
  346. static
  347. struct mem_buf_xfer_dmaheap_mem *mem_buf_alloc_dmaheap_xfer_mem_type_data(
  348. void *rmt_data)
  349. {
  350. struct mem_buf_xfer_dmaheap_mem *dmaheap_mem_data;
  351. dmaheap_mem_data = kzalloc(sizeof(*dmaheap_mem_data), GFP_KERNEL);
  352. if (!dmaheap_mem_data)
  353. return ERR_PTR(-ENOMEM);
  354. strscpy(dmaheap_mem_data->name, (char *)rmt_data,
  355. MEM_BUF_MAX_DMAHEAP_NAME_LEN);
  356. pr_debug("%s: DMAHEAP source heap: %s\n", __func__,
  357. dmaheap_mem_data->name);
  358. return dmaheap_mem_data;
  359. }
  360. static void *mem_buf_alloc_xfer_mem_type_data(enum mem_buf_mem_type type,
  361. void *rmt_data)
  362. {
  363. void *data = ERR_PTR(-EINVAL);
  364. if (type == MEM_BUF_DMAHEAP_MEM_TYPE)
  365. data = mem_buf_alloc_dmaheap_xfer_mem_type_data(rmt_data);
  366. else if (type == MEM_BUF_BUDDY_MEM_TYPE)
  367. data = NULL;
  368. return data;
  369. }
  370. static
  371. void mem_buf_free_dmaheap_xfer_mem_type_data(struct mem_buf_xfer_dmaheap_mem *mem)
  372. {
  373. kfree(mem);
  374. }
  375. static void mem_buf_free_xfer_mem_type_data(enum mem_buf_mem_type type,
  376. void *data)
  377. {
  378. if (type == MEM_BUF_DMAHEAP_MEM_TYPE)
  379. mem_buf_free_dmaheap_xfer_mem_type_data(data);
  380. /* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */
  381. }
  382. static
  383. struct mem_buf_xfer_mem *mem_buf_prep_xfer_mem(void *req_msg)
  384. {
  385. int ret;
  386. struct mem_buf_xfer_mem *xfer_mem;
  387. u32 nr_acl_entries;
  388. void *arb_payload;
  389. enum mem_buf_mem_type mem_type;
  390. void *mem_type_data;
  391. nr_acl_entries = get_alloc_req_nr_acl_entries(req_msg);
  392. if (nr_acl_entries != 1)
  393. return ERR_PTR(-EINVAL);
  394. arb_payload = get_alloc_req_arb_payload(req_msg);
  395. if (!arb_payload)
  396. return ERR_PTR(-EINVAL);
  397. mem_type = get_alloc_req_src_mem_type(req_msg);
  398. xfer_mem = kzalloc(sizeof(*xfer_mem), GFP_KERNEL);
  399. if (!xfer_mem)
  400. return ERR_PTR(-ENOMEM);
  401. ret = mem_buf_alloc_obj_id();
  402. if (ret < 0) {
  403. pr_err("%s failed to allocate obj_id: %d\n", __func__, ret);
  404. goto err_idr_alloc;
  405. }
  406. xfer_mem->obj_id = ret;
  407. xfer_mem->size = get_alloc_req_size(req_msg);
  408. xfer_mem->mem_type = mem_type;
  409. xfer_mem->nr_acl_entries = nr_acl_entries;
  410. ret = mem_buf_gh_acl_desc_to_vmid_perm_list(get_alloc_req_gh_acl_desc(req_msg),
  411. &xfer_mem->dst_vmids,
  412. &xfer_mem->dst_perms);
  413. if (ret) {
  414. pr_err("%s failed to create VMID and permissions list: %d\n",
  415. __func__, ret);
  416. goto err_alloc_vmid_perm_list;
  417. }
  418. mem_type_data = mem_buf_alloc_xfer_mem_type_data(mem_type, arb_payload);
  419. if (IS_ERR(mem_type_data)) {
  420. pr_err("%s: failed to allocate mem type specific data: %d\n",
  421. __func__, PTR_ERR(mem_type_data));
  422. ret = PTR_ERR(mem_type_data);
  423. goto err_alloc_xfer_mem_type_data;
  424. }
  425. xfer_mem->mem_type_data = mem_type_data;
  426. INIT_LIST_HEAD(&xfer_mem->entry);
  427. return xfer_mem;
  428. err_alloc_xfer_mem_type_data:
  429. kfree(xfer_mem->dst_vmids);
  430. kfree(xfer_mem->dst_perms);
  431. err_alloc_vmid_perm_list:
  432. mem_buf_destroy_obj_id(xfer_mem->obj_id);
  433. err_idr_alloc:
  434. kfree(xfer_mem);
  435. return ERR_PTR(ret);
  436. }
  437. static void mem_buf_free_xfer_mem(struct mem_buf_xfer_mem *xfer_mem)
  438. {
  439. mem_buf_free_xfer_mem_type_data(xfer_mem->mem_type,
  440. xfer_mem->mem_type_data);
  441. kfree(xfer_mem->dst_vmids);
  442. kfree(xfer_mem->dst_perms);
  443. mem_buf_destroy_obj_id(xfer_mem->obj_id);
  444. kfree(xfer_mem);
  445. }
  446. /*
  447. * @owner_vmid: Owner of the memparcel handle which has @vmids and @perms
  448. */
  449. static int __maybe_unused mem_buf_get_mem_xfer_type(int *vmids, int *perms,
  450. unsigned int nr_acl_entries, int owner_vmid)
  451. {
  452. u32 i;
  453. for (i = 0; i < nr_acl_entries; i++)
  454. if (vmids[i] == owner_vmid &&
  455. perms[i] != 0)
  456. return GH_RM_TRANS_TYPE_SHARE;
  457. return GH_RM_TRANS_TYPE_LEND;
  458. }
  459. /*
  460. * @owner_vmid: Owner of the memparcel handle which has @acl_desc
  461. */
  462. static int mem_buf_get_mem_xfer_type_gh(struct gh_acl_desc *acl_desc, int owner_vmid)
  463. {
  464. u32 i, nr_acl_entries = acl_desc->n_acl_entries;
  465. for (i = 0; i < nr_acl_entries; i++)
  466. if (acl_desc->acl_entries[i].vmid == owner_vmid &&
  467. acl_desc->acl_entries[i].perms != 0)
  468. return GH_RM_TRANS_TYPE_SHARE;
  469. return GH_RM_TRANS_TYPE_LEND;
  470. }
  471. static struct mem_buf_xfer_mem *mem_buf_process_alloc_req(void *req)
  472. {
  473. int ret;
  474. u32 xfer_type;
  475. struct mem_buf_xfer_mem *xfer_mem;
  476. struct mem_buf_lend_kernel_arg arg = {0};
  477. xfer_mem = mem_buf_prep_xfer_mem(req);
  478. if (IS_ERR(xfer_mem))
  479. return xfer_mem;
  480. ret = mem_buf_rmt_alloc_mem(xfer_mem);
  481. if (ret < 0)
  482. goto err_rmt_alloc;
  483. if (!xfer_mem->secure_alloc) {
  484. xfer_type = get_alloc_req_xfer_type(req);
  485. arg.nr_acl_entries = xfer_mem->nr_acl_entries;
  486. arg.vmids = xfer_mem->dst_vmids;
  487. arg.perms = xfer_mem->dst_perms;
  488. ret = mem_buf_assign_mem(xfer_type, xfer_mem->mem_sgt, &arg);
  489. if (ret < 0)
  490. goto err_assign_mem;
  491. xfer_mem->hdl = arg.memparcel_hdl;
  492. xfer_mem->trans_type = xfer_type;
  493. }
  494. mutex_lock(&mem_buf_xfer_mem_list_lock);
  495. list_add(&xfer_mem->entry, &mem_buf_xfer_mem_list);
  496. mutex_unlock(&mem_buf_xfer_mem_list_lock);
  497. return xfer_mem;
  498. err_assign_mem:
  499. if (ret != -EADDRNOTAVAIL)
  500. mem_buf_rmt_free_mem(xfer_mem);
  501. err_rmt_alloc:
  502. mem_buf_free_xfer_mem(xfer_mem);
  503. return ERR_PTR(ret);
  504. }
  505. static void mem_buf_cleanup_alloc_req(struct mem_buf_xfer_mem *xfer_mem,
  506. gh_memparcel_handle_t memparcel_hdl)
  507. {
  508. int ret;
  509. if (!xfer_mem->secure_alloc) {
  510. if (memparcel_hdl == xfer_mem->hdl) {
  511. ret = mem_buf_unassign_mem(xfer_mem->mem_sgt,
  512. xfer_mem->dst_vmids,
  513. xfer_mem->nr_acl_entries,
  514. xfer_mem->hdl);
  515. if (ret < 0)
  516. return;
  517. } else {
  518. struct gh_sgl_desc *sgl_desc = NULL;
  519. struct gh_acl_desc *acl_desc;
  520. size_t size;
  521. size = struct_size(acl_desc, acl_entries, 1);
  522. acl_desc = kzalloc(size, GFP_KERNEL);
  523. if (!acl_desc)
  524. return;
  525. acl_desc->n_acl_entries = 1;
  526. acl_desc->acl_entries[0].vmid = VMID_HLOS;
  527. acl_desc->acl_entries[0].perms = GH_RM_ACL_X | GH_RM_ACL_W | GH_RM_ACL_R;
  528. ret = mem_buf_map_mem_s2(GH_RM_TRANS_TYPE_DONATE, &memparcel_hdl,
  529. acl_desc, &sgl_desc, VMID_TVM);
  530. if (ret) {
  531. kfree(acl_desc);
  532. return;
  533. }
  534. kvfree(sgl_desc);
  535. kfree(acl_desc);
  536. }
  537. }
  538. mem_buf_rmt_free_mem(xfer_mem);
  539. mem_buf_free_xfer_mem(xfer_mem);
  540. }
  541. static void mem_buf_alloc_req_work(struct work_struct *work)
  542. {
  543. struct mem_buf_rmt_msg *rmt_msg = to_rmt_msg(work);
  544. void *req_msg = rmt_msg->msg;
  545. void *resp_msg;
  546. struct mem_buf_xfer_mem *xfer_mem;
  547. gh_memparcel_handle_t hdl = 0;
  548. u32 obj_id = 0;
  549. int ret;
  550. trace_receive_alloc_req(req_msg);
  551. xfer_mem = mem_buf_process_alloc_req(req_msg);
  552. if (IS_ERR(xfer_mem)) {
  553. ret = PTR_ERR(xfer_mem);
  554. pr_err("%s: failed to process rmt memory alloc request: %d\n",
  555. __func__, ret);
  556. xfer_mem = NULL;
  557. } else {
  558. ret = 0;
  559. hdl = xfer_mem->hdl;
  560. obj_id = xfer_mem->obj_id;
  561. }
  562. resp_msg = mem_buf_construct_alloc_resp(req_msg, ret, hdl, obj_id);
  563. kfree(rmt_msg->msg);
  564. kfree(rmt_msg);
  565. if (IS_ERR(resp_msg))
  566. goto out_err;
  567. trace_send_alloc_resp_msg(resp_msg);
  568. ret = mem_buf_msgq_send(mem_buf_msgq_hdl, resp_msg);
  569. /*
  570. * Free the buffer regardless of the return value as the hypervisor
  571. * would have consumed the data in the case of a success.
  572. */
  573. kfree(resp_msg);
  574. if (ret < 0) {
  575. pr_err("%s: failed to send memory allocation response rc: %d\n",
  576. __func__, ret);
  577. goto out_err;
  578. }
  579. pr_debug("%s: Allocation response sent\n", __func__);
  580. return;
  581. out_err:
  582. if (xfer_mem) {
  583. mutex_lock(&mem_buf_xfer_mem_list_lock);
  584. list_del(&xfer_mem->entry);
  585. mutex_unlock(&mem_buf_xfer_mem_list_lock);
  586. mem_buf_cleanup_alloc_req(xfer_mem, xfer_mem->hdl);
  587. }
  588. }
  589. static void mem_buf_relinquish_work(struct work_struct *work)
  590. {
  591. struct mem_buf_xfer_mem *xfer_mem_iter, *tmp, *xfer_mem = NULL;
  592. struct mem_buf_rmt_msg *rmt_msg = to_rmt_msg(work);
  593. struct mem_buf_alloc_relinquish *relinquish_msg = rmt_msg->msg;
  594. u32 obj_id = get_relinquish_req_obj_id(relinquish_msg);
  595. void *resp_msg;
  596. trace_receive_relinquish_msg(relinquish_msg);
  597. mutex_lock(&mem_buf_xfer_mem_list_lock);
  598. list_for_each_entry_safe(xfer_mem_iter, tmp, &mem_buf_xfer_mem_list,
  599. entry)
  600. if (xfer_mem_iter->obj_id == obj_id) {
  601. xfer_mem = xfer_mem_iter;
  602. list_del(&xfer_mem->entry);
  603. break;
  604. }
  605. mutex_unlock(&mem_buf_xfer_mem_list_lock);
  606. if (xfer_mem)
  607. mem_buf_cleanup_alloc_req(xfer_mem, relinquish_msg->hdl);
  608. else
  609. pr_err("%s: transferred memory with obj_id 0x%x not found\n",
  610. __func__, obj_id);
  611. resp_msg = mem_buf_construct_relinquish_resp(relinquish_msg);
  612. if (!IS_ERR(resp_msg)) {
  613. trace_send_relinquish_resp_msg(resp_msg);
  614. mem_buf_msgq_send(mem_buf_msgq_hdl, resp_msg);
  615. kfree(resp_msg);
  616. }
  617. kfree(rmt_msg->msg);
  618. kfree(rmt_msg);
  619. }
  620. static int mem_buf_alloc_resp_hdlr(void *hdlr_data, void *msg_buf, size_t size, void *out_buf)
  621. {
  622. struct mem_buf_alloc_resp *alloc_resp = msg_buf;
  623. struct mem_buf_desc *membuf = out_buf;
  624. int ret;
  625. trace_receive_alloc_resp_msg(alloc_resp);
  626. if (!(mem_buf_capability & MEM_BUF_CAP_CONSUMER)) {
  627. return -EPERM;
  628. }
  629. ret = get_alloc_resp_retval(alloc_resp);
  630. if (ret < 0) {
  631. pr_err("%s remote allocation failed rc: %d\n", __func__, ret);
  632. } else {
  633. membuf->memparcel_hdl = get_alloc_resp_hdl(alloc_resp);
  634. membuf->obj_id = get_alloc_resp_obj_id(alloc_resp);
  635. }
  636. return ret;
  637. }
  638. /* Functions invoked when treating allocation requests to other VMs. */
  639. static void mem_buf_alloc_req_hdlr(void *hdlr_data, void *_buf, size_t size)
  640. {
  641. struct mem_buf_rmt_msg *rmt_msg;
  642. void *buf;
  643. if (!(mem_buf_capability & MEM_BUF_CAP_SUPPLIER)) {
  644. return;
  645. }
  646. rmt_msg = kmalloc(sizeof(*rmt_msg), GFP_KERNEL);
  647. if (!rmt_msg)
  648. return;
  649. buf = kmemdup(_buf, size, GFP_KERNEL);
  650. if (!buf) {
  651. kfree(rmt_msg);
  652. return;
  653. }
  654. rmt_msg->msg = buf;
  655. rmt_msg->msg_size = size;
  656. INIT_WORK(&rmt_msg->work, mem_buf_alloc_req_work);
  657. queue_work(mem_buf_wq, &rmt_msg->work);
  658. }
  659. static void mem_buf_relinquish_hdlr(void *hdlr_data, void *_buf, size_t size)
  660. {
  661. struct mem_buf_rmt_msg *rmt_msg;
  662. void *buf;
  663. if (!(mem_buf_capability & MEM_BUF_CAP_SUPPLIER)) {
  664. return;
  665. }
  666. rmt_msg = kmalloc(sizeof(*rmt_msg), GFP_KERNEL);
  667. if (!rmt_msg)
  668. return;
  669. buf = kmemdup(_buf, size, GFP_KERNEL);
  670. if (!buf) {
  671. kfree(rmt_msg);
  672. return;
  673. }
  674. rmt_msg->msg = buf;
  675. rmt_msg->msg_size = size;
  676. INIT_WORK(&rmt_msg->work, mem_buf_relinquish_work);
  677. queue_work(mem_buf_wq, &rmt_msg->work);
  678. }
  679. static int mem_buf_request_mem(struct mem_buf_desc *membuf)
  680. {
  681. struct mem_buf_txn *txn;
  682. void *alloc_req_msg;
  683. int ret;
  684. txn = mem_buf_init_txn(mem_buf_msgq_hdl, membuf);
  685. if (IS_ERR(txn))
  686. return PTR_ERR(txn);
  687. alloc_req_msg = mem_buf_construct_alloc_req(txn, membuf->size, membuf->acl_desc,
  688. membuf->src_mem_type, membuf->src_data,
  689. membuf->trans_type);
  690. if (IS_ERR(alloc_req_msg)) {
  691. ret = PTR_ERR(alloc_req_msg);
  692. goto out;
  693. }
  694. ret = mem_buf_msgq_send(mem_buf_msgq_hdl, alloc_req_msg);
  695. /*
  696. * Free the buffer regardless of the return value as the hypervisor
  697. * would have consumed the data in the case of a success.
  698. */
  699. kfree(alloc_req_msg);
  700. if (ret < 0)
  701. goto out;
  702. ret = mem_buf_txn_wait(mem_buf_msgq_hdl, txn);
  703. if (ret < 0)
  704. goto out;
  705. out:
  706. mem_buf_destroy_txn(mem_buf_msgq_hdl, txn);
  707. return ret;
  708. }
  709. static void __mem_buf_relinquish_mem(u32 obj_id, u32 memparcel_hdl)
  710. {
  711. void *relinquish_msg, *txn;
  712. int ret;
  713. txn = mem_buf_init_txn(mem_buf_msgq_hdl, NULL);
  714. if (IS_ERR(txn))
  715. return;
  716. relinquish_msg = mem_buf_construct_relinquish_msg(txn, obj_id, memparcel_hdl);
  717. if (IS_ERR(relinquish_msg))
  718. goto err_construct_relinquish_msg;
  719. trace_send_relinquish_msg(relinquish_msg);
  720. ret = mem_buf_msgq_send(mem_buf_msgq_hdl, relinquish_msg);
  721. /*
  722. * Free the buffer regardless of the return value as the hypervisor
  723. * would have consumed the data in the case of a success.
  724. */
  725. kfree(relinquish_msg);
  726. if (ret < 0)
  727. pr_err("%s failed to send memory relinquish message rc: %d\n",
  728. __func__, ret);
  729. else
  730. pr_debug("%s: allocation relinquish message sent\n", __func__);
  731. /* Wait for response */
  732. mem_buf_txn_wait(mem_buf_msgq_hdl, txn);
  733. err_construct_relinquish_msg:
  734. mem_buf_destroy_txn(mem_buf_msgq_hdl, txn);
  735. }
  736. /*
  737. * Check if membuf already has a valid handle. If it doesn't, then create one.
  738. */
  739. static void mem_buf_relinquish_mem(struct mem_buf_desc *membuf)
  740. {
  741. int ret;
  742. int vmids[] = {VMID_HLOS};
  743. int perms[] = {PERM_READ | PERM_WRITE | PERM_EXEC};
  744. struct sg_table *sgt;
  745. struct mem_buf_lend_kernel_arg arg;
  746. if (membuf->memparcel_hdl != MEM_BUF_MEMPARCEL_INVALID) {
  747. if (membuf->trans_type != GH_RM_TRANS_TYPE_DONATE) {
  748. ret = mem_buf_unmap_mem_s2(membuf->memparcel_hdl);
  749. if (ret)
  750. return;
  751. }
  752. return __mem_buf_relinquish_mem(membuf->obj_id, membuf->memparcel_hdl);
  753. }
  754. sgt = dup_gh_sgl_desc_to_sgt(membuf->sgl_desc);
  755. if (IS_ERR(sgt))
  756. return;
  757. arg.nr_acl_entries = 1;
  758. arg.vmids = vmids;
  759. arg.perms = perms;
  760. arg.flags = GH_RM_MEM_DONATE_SANITIZE;
  761. arg.label = 0;
  762. ret = mem_buf_assign_mem(GH_RM_TRANS_TYPE_DONATE, sgt, &arg);
  763. if (ret)
  764. goto err_free_sgt;
  765. membuf->memparcel_hdl = arg.memparcel_hdl;
  766. __mem_buf_relinquish_mem(membuf->obj_id, membuf->memparcel_hdl);
  767. err_free_sgt:
  768. sg_free_table(sgt);
  769. kfree(sgt);
  770. }
  771. static void mem_buf_relinquish_memparcel_hdl(void *hdlr_data, u32 obj_id, gh_memparcel_handle_t hdl)
  772. {
  773. __mem_buf_relinquish_mem(obj_id, hdl);
  774. }
  775. static void *mem_buf_retrieve_dmaheap_mem_type_data_user(
  776. struct mem_buf_dmaheap_data __user *udata)
  777. {
  778. char *buf;
  779. int ret;
  780. struct mem_buf_dmaheap_data data;
  781. ret = copy_struct_from_user(&data, sizeof(data),
  782. udata,
  783. sizeof(data));
  784. if (ret)
  785. return ERR_PTR(-EINVAL);
  786. buf = kcalloc(MEM_BUF_MAX_DMAHEAP_NAME_LEN, sizeof(*buf), GFP_KERNEL);
  787. if (!buf)
  788. return ERR_PTR(-ENOMEM);
  789. ret = strncpy_from_user(buf, (const void __user *)data.heap_name,
  790. MEM_BUF_MAX_DMAHEAP_NAME_LEN);
  791. if (ret < 0 || ret == MEM_BUF_MAX_DMAHEAP_NAME_LEN) {
  792. kfree(buf);
  793. return ERR_PTR(-EINVAL);
  794. }
  795. return buf;
  796. }
  797. static void *mem_buf_retrieve_mem_type_data_user(enum mem_buf_mem_type mem_type,
  798. void __user *mem_type_data)
  799. {
  800. void *data = ERR_PTR(-EINVAL);
  801. if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
  802. data = mem_buf_retrieve_dmaheap_mem_type_data_user(mem_type_data);
  803. else if (mem_type == MEM_BUF_BUDDY_MEM_TYPE)
  804. data = NULL;
  805. return data;
  806. }
  807. static void *mem_buf_retrieve_dmaheap_mem_type_data(char *dmaheap_name)
  808. {
  809. return kstrdup(dmaheap_name, GFP_KERNEL);
  810. }
  811. static void *mem_buf_retrieve_mem_type_data(enum mem_buf_mem_type mem_type,
  812. void *mem_type_data)
  813. {
  814. void *data = ERR_PTR(-EINVAL);
  815. if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
  816. data = mem_buf_retrieve_dmaheap_mem_type_data(mem_type_data);
  817. else if (mem_type == MEM_BUF_BUDDY_MEM_TYPE)
  818. data = NULL;
  819. return data;
  820. }
  821. static void mem_buf_free_dmaheap_mem_type_data(char *dmaheap_name)
  822. {
  823. kfree(dmaheap_name);
  824. }
  825. static void mem_buf_free_mem_type_data(enum mem_buf_mem_type mem_type,
  826. void *mem_type_data)
  827. {
  828. if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
  829. mem_buf_free_dmaheap_mem_type_data(mem_type_data);
  830. /* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */
  831. }
  832. static bool is_valid_mem_type(enum mem_buf_mem_type mem_type)
  833. {
  834. return (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) ||
  835. (mem_type == MEM_BUF_BUDDY_MEM_TYPE);
  836. }
  837. static bool is_valid_ioctl_mem_type(enum mem_buf_mem_type mem_type)
  838. {
  839. return (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE);
  840. }
  841. void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data)
  842. {
  843. int ret;
  844. struct mem_buf_desc *membuf;
  845. int perms = PERM_READ | PERM_WRITE | PERM_EXEC;
  846. if (!(mem_buf_capability & MEM_BUF_CAP_CONSUMER))
  847. return ERR_PTR(-EOPNOTSUPP);
  848. if (!alloc_data || !alloc_data->size || alloc_data->nr_acl_entries != 1 ||
  849. !alloc_data->vmids || !alloc_data->perms ||
  850. !is_valid_mem_type(alloc_data->src_mem_type) ||
  851. !is_valid_mem_type(alloc_data->dst_mem_type))
  852. return ERR_PTR(-EINVAL);
  853. membuf = kzalloc(sizeof(*membuf), GFP_KERNEL);
  854. if (!membuf)
  855. return ERR_PTR(-ENOMEM);
  856. pr_debug("%s: mem buf alloc begin\n", __func__);
  857. membuf->size = alloc_data->size;
  858. /* Create copies of data structures from alloc_data as they may be on-stack */
  859. membuf->acl_desc = mem_buf_vmid_perm_list_to_gh_acl(
  860. alloc_data->vmids, &perms,
  861. alloc_data->nr_acl_entries);
  862. if (IS_ERR(membuf->acl_desc)) {
  863. ret = PTR_ERR(membuf->acl_desc);
  864. goto err_alloc_acl_list;
  865. }
  866. if (alloc_data->sgl_desc) {
  867. membuf->sgl_desc = dup_gh_sgl_desc(alloc_data->sgl_desc);
  868. if (IS_ERR(membuf->sgl_desc)) {
  869. ret = PTR_ERR(membuf->sgl_desc);
  870. goto err_alloc_sgl_desc;
  871. }
  872. }
  873. membuf->trans_type = alloc_data->trans_type;
  874. membuf->src_mem_type = alloc_data->src_mem_type;
  875. membuf->dst_mem_type = alloc_data->dst_mem_type;
  876. membuf->src_data =
  877. mem_buf_retrieve_mem_type_data(alloc_data->src_mem_type,
  878. alloc_data->src_data);
  879. if (IS_ERR(membuf->src_data)) {
  880. ret = PTR_ERR(membuf->src_data);
  881. goto err_alloc_src_data;
  882. }
  883. membuf->dst_data =
  884. mem_buf_retrieve_mem_type_data(alloc_data->dst_mem_type,
  885. alloc_data->dst_data);
  886. if (IS_ERR(membuf->dst_data)) {
  887. ret = PTR_ERR(membuf->dst_data);
  888. goto err_alloc_dst_data;
  889. }
  890. trace_mem_buf_alloc_info(membuf->size, membuf->src_mem_type,
  891. membuf->dst_mem_type, membuf->acl_desc);
  892. ret = mem_buf_request_mem(membuf);
  893. if (ret)
  894. goto err_mem_req;
  895. ret = mem_buf_map_mem_s2(membuf->trans_type, &membuf->memparcel_hdl,
  896. membuf->acl_desc, &membuf->sgl_desc, VMID_HLOS);
  897. if (ret)
  898. goto err_map_mem_s2;
  899. mutex_lock(&mem_buf_list_lock);
  900. list_add_tail(&membuf->entry, &mem_buf_list);
  901. mutex_unlock(&mem_buf_list_lock);
  902. pr_debug("%s: mem buf alloc success\n", __func__);
  903. return membuf;
  904. err_map_mem_s2:
  905. mem_buf_relinquish_mem(membuf);
  906. err_mem_req:
  907. mem_buf_free_mem_type_data(membuf->dst_mem_type, membuf->dst_data);
  908. err_alloc_dst_data:
  909. mem_buf_free_mem_type_data(membuf->src_mem_type, membuf->src_data);
  910. err_alloc_src_data:
  911. if (membuf->sgl_desc)
  912. kvfree(membuf->sgl_desc);
  913. err_alloc_sgl_desc:
  914. kfree(membuf->acl_desc);
  915. err_alloc_acl_list:
  916. kfree(membuf);
  917. return ERR_PTR(ret);
  918. }
  919. void mem_buf_free(void *__membuf)
  920. {
  921. struct mem_buf_desc *membuf = __membuf;
  922. mutex_lock(&mem_buf_list_lock);
  923. list_del(&membuf->entry);
  924. mutex_unlock(&mem_buf_list_lock);
  925. mem_buf_relinquish_mem(membuf);
  926. kvfree(membuf->sgl_desc);
  927. mem_buf_free_mem_type_data(membuf->dst_mem_type, membuf->dst_data);
  928. mem_buf_free_mem_type_data(membuf->src_mem_type, membuf->src_data);
  929. kfree(membuf->acl_desc);
  930. kfree(membuf);
  931. }
  932. EXPORT_SYMBOL(mem_buf_free);
  933. struct gh_sgl_desc *mem_buf_get_sgl(void *__membuf)
  934. {
  935. struct mem_buf_desc *membuf = __membuf;
  936. return membuf->sgl_desc;
  937. }
  938. EXPORT_SYMBOL(mem_buf_get_sgl);
  939. static void mem_buf_retrieve_release(struct qcom_sg_buffer *buffer)
  940. {
  941. sg_free_table(&buffer->sg_table);
  942. kfree(buffer);
  943. }
  944. struct dma_buf *mem_buf_retrieve(struct mem_buf_retrieve_kernel_arg *arg)
  945. {
  946. int ret, op;
  947. struct qcom_sg_buffer *buffer;
  948. struct gh_acl_desc *acl_desc;
  949. /* Hypervisor picks the IPA address */
  950. struct gh_sgl_desc *sgl_desc = NULL;
  951. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  952. struct dma_buf *dmabuf;
  953. struct sg_table *sgt;
  954. if (arg->fd_flags & ~MEM_BUF_VALID_FD_FLAGS)
  955. return ERR_PTR(-EINVAL);
  956. if (!arg->nr_acl_entries || !arg->vmids || !arg->perms)
  957. return ERR_PTR(-EINVAL);
  958. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  959. if (!buffer)
  960. return ERR_PTR(-ENOMEM);
  961. acl_desc = mem_buf_vmid_perm_list_to_gh_acl(arg->vmids, arg->perms,
  962. arg->nr_acl_entries);
  963. if (IS_ERR(acl_desc)) {
  964. ret = PTR_ERR(acl_desc);
  965. goto err_gh_acl;
  966. }
  967. op = mem_buf_get_mem_xfer_type_gh(acl_desc, arg->sender_vmid);
  968. ret = mem_buf_map_mem_s2(op, &arg->memparcel_hdl, acl_desc, &sgl_desc,
  969. arg->sender_vmid);
  970. if (ret)
  971. goto err_map_s2;
  972. ret = mem_buf_map_mem_s1(sgl_desc);
  973. if (ret < 0)
  974. goto err_map_mem_s1;
  975. sgt = dup_gh_sgl_desc_to_sgt(sgl_desc);
  976. if (IS_ERR(sgt)) {
  977. ret = PTR_ERR(sgt);
  978. goto err_dup_sgt;
  979. }
  980. buffer->sg_table = *sgt;
  981. kfree(sgt);
  982. INIT_LIST_HEAD(&buffer->attachments);
  983. mutex_init(&buffer->lock);
  984. buffer->heap = NULL;
  985. buffer->len = mem_buf_get_sgl_buf_size(sgl_desc);
  986. buffer->uncached = false;
  987. buffer->free = mem_buf_retrieve_release;
  988. buffer->vmperm = mem_buf_vmperm_alloc_accept(&buffer->sg_table,
  989. arg->memparcel_hdl,
  990. arg->vmids, arg->perms,
  991. arg->nr_acl_entries);
  992. exp_info.size = buffer->len;
  993. exp_info.flags = arg->fd_flags;
  994. exp_info.priv = buffer;
  995. dmabuf = qcom_dma_buf_export(&exp_info, &qcom_sg_buf_ops);
  996. if (IS_ERR(dmabuf)) {
  997. ret = PTR_ERR(dmabuf);
  998. goto err_export_dma_buf;
  999. }
  1000. /* sgt & qcom_sg_buffer will be freed by mem_buf_retrieve_release */
  1001. kvfree(sgl_desc);
  1002. kfree(acl_desc);
  1003. return dmabuf;
  1004. err_export_dma_buf:
  1005. sg_free_table(&buffer->sg_table);
  1006. err_dup_sgt:
  1007. mem_buf_unmap_mem_s1(sgl_desc);
  1008. err_map_mem_s1:
  1009. kvfree(sgl_desc);
  1010. mem_buf_unmap_mem_s2(arg->memparcel_hdl);
  1011. err_map_s2:
  1012. kfree(acl_desc);
  1013. err_gh_acl:
  1014. kfree(buffer);
  1015. return ERR_PTR(ret);
  1016. }
  1017. EXPORT_SYMBOL(mem_buf_retrieve);
  1018. static int mem_buf_prep_alloc_data(struct mem_buf_allocation_data *alloc_data,
  1019. struct mem_buf_alloc_ioctl_arg *allocation_args)
  1020. {
  1021. unsigned int nr_acl_entries = allocation_args->nr_acl_entries;
  1022. int ret;
  1023. alloc_data->size = allocation_args->size;
  1024. alloc_data->nr_acl_entries = nr_acl_entries;
  1025. ret = mem_buf_acl_to_vmid_perms_list(nr_acl_entries,
  1026. (const void __user *)allocation_args->acl_list,
  1027. &alloc_data->vmids, &alloc_data->perms);
  1028. if (ret)
  1029. goto err_acl;
  1030. /* alloc_data->trans_type set later according to src&dest_mem_type */
  1031. alloc_data->sgl_desc = NULL;
  1032. alloc_data->src_mem_type = allocation_args->src_mem_type;
  1033. alloc_data->dst_mem_type = allocation_args->dst_mem_type;
  1034. alloc_data->src_data =
  1035. mem_buf_retrieve_mem_type_data_user(
  1036. allocation_args->src_mem_type,
  1037. (void __user *)allocation_args->src_data);
  1038. if (IS_ERR(alloc_data->src_data)) {
  1039. ret = PTR_ERR(alloc_data->src_data);
  1040. goto err_alloc_src_data;
  1041. }
  1042. alloc_data->dst_data =
  1043. mem_buf_retrieve_mem_type_data_user(
  1044. allocation_args->dst_mem_type,
  1045. (void __user *)allocation_args->dst_data);
  1046. if (IS_ERR(alloc_data->dst_data)) {
  1047. ret = PTR_ERR(alloc_data->dst_data);
  1048. goto err_alloc_dst_data;
  1049. }
  1050. return 0;
  1051. err_alloc_dst_data:
  1052. mem_buf_free_mem_type_data(alloc_data->src_mem_type,
  1053. alloc_data->src_data);
  1054. err_alloc_src_data:
  1055. kfree(alloc_data->vmids);
  1056. kfree(alloc_data->perms);
  1057. err_acl:
  1058. return ret;
  1059. }
  1060. static void mem_buf_free_alloc_data(struct mem_buf_allocation_data *alloc_data)
  1061. {
  1062. mem_buf_free_mem_type_data(alloc_data->dst_mem_type,
  1063. alloc_data->dst_data);
  1064. mem_buf_free_mem_type_data(alloc_data->src_mem_type,
  1065. alloc_data->src_data);
  1066. kfree(alloc_data->vmids);
  1067. kfree(alloc_data->perms);
  1068. }
  1069. /* FIXME - remove is_valid_ioctl_mem_type. Its already handled */
  1070. int mem_buf_alloc_fd(struct mem_buf_alloc_ioctl_arg *allocation_args)
  1071. {
  1072. struct mem_buf_allocation_data alloc_data;
  1073. int ret;
  1074. if (!allocation_args->size || !allocation_args->nr_acl_entries ||
  1075. !allocation_args->acl_list ||
  1076. (allocation_args->nr_acl_entries > MEM_BUF_MAX_NR_ACL_ENTS) ||
  1077. !is_valid_ioctl_mem_type(allocation_args->src_mem_type) ||
  1078. !is_valid_ioctl_mem_type(allocation_args->dst_mem_type) ||
  1079. allocation_args->reserved0 || allocation_args->reserved1 ||
  1080. allocation_args->reserved2)
  1081. return -EINVAL;
  1082. ret = mem_buf_prep_alloc_data(&alloc_data, allocation_args);
  1083. if (ret < 0)
  1084. return ret;
  1085. if (alloc_data.dst_mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
  1086. ret = qcom_tui_heap_add_pool_fd(&alloc_data);
  1087. else
  1088. ret = -EINVAL;
  1089. mem_buf_free_alloc_data(&alloc_data);
  1090. return ret;
  1091. }
  1092. int mem_buf_retrieve_user(struct mem_buf_retrieve_ioctl_arg *uarg)
  1093. {
  1094. int ret, fd;
  1095. int *vmids, *perms;
  1096. struct dma_buf *dmabuf;
  1097. struct mem_buf_retrieve_kernel_arg karg = {0};
  1098. if (!uarg->nr_acl_entries || !uarg->acl_list ||
  1099. uarg->nr_acl_entries > MEM_BUF_MAX_NR_ACL_ENTS ||
  1100. uarg->reserved0 || uarg->reserved1 ||
  1101. uarg->reserved2 ||
  1102. uarg->fd_flags & ~MEM_BUF_VALID_FD_FLAGS)
  1103. return -EINVAL;
  1104. ret = mem_buf_acl_to_vmid_perms_list(uarg->nr_acl_entries,
  1105. (void *)uarg->acl_list, &vmids, &perms);
  1106. if (ret)
  1107. return ret;
  1108. karg.sender_vmid = mem_buf_fd_to_vmid(uarg->sender_vm_fd);
  1109. if (karg.sender_vmid < 0) {
  1110. pr_err_ratelimited("%s: Invalid sender_vmid %d\n", __func__, uarg->sender_vm_fd);
  1111. goto err_sender_vmid;
  1112. }
  1113. karg.nr_acl_entries = uarg->nr_acl_entries;
  1114. karg.vmids = vmids;
  1115. karg.perms = perms;
  1116. karg.memparcel_hdl = uarg->memparcel_hdl;
  1117. karg.fd_flags = uarg->fd_flags;
  1118. dmabuf = mem_buf_retrieve(&karg);
  1119. if (IS_ERR(dmabuf)) {
  1120. ret = PTR_ERR(dmabuf);
  1121. goto err_retrieve;
  1122. }
  1123. fd = dma_buf_fd(dmabuf, karg.fd_flags);
  1124. if (fd < 0) {
  1125. ret = fd;
  1126. goto err_fd;
  1127. }
  1128. uarg->dma_buf_import_fd = fd;
  1129. kfree(vmids);
  1130. kfree(perms);
  1131. return 0;
  1132. err_fd:
  1133. dma_buf_put(dmabuf);
  1134. err_sender_vmid:
  1135. err_retrieve:
  1136. kfree(vmids);
  1137. kfree(perms);
  1138. return ret;
  1139. }
  1140. static const struct mem_buf_msgq_ops msgq_ops = {
  1141. .alloc_req_hdlr = mem_buf_alloc_req_hdlr,
  1142. .alloc_resp_hdlr = mem_buf_alloc_resp_hdlr,
  1143. .relinquish_hdlr = mem_buf_relinquish_hdlr,
  1144. .relinquish_memparcel_hdl = mem_buf_relinquish_memparcel_hdl,
  1145. };
  1146. int mem_buf_msgq_alloc(struct device *dev)
  1147. {
  1148. struct mem_buf_msgq_hdlr_info info = {
  1149. .msgq_ops = &msgq_ops,
  1150. };
  1151. int ret;
  1152. /* No msgq if neither a consumer nor a supplier */
  1153. if (!(mem_buf_capability & MEM_BUF_CAP_DUAL))
  1154. return 0;
  1155. mem_buf_wq = alloc_workqueue("mem_buf_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
  1156. if (!mem_buf_wq) {
  1157. dev_err(dev, "Unable to initialize workqueue\n");
  1158. return -EINVAL;
  1159. }
  1160. mem_buf_msgq_hdl = mem_buf_msgq_register("trusted_vm", &info);
  1161. if (IS_ERR(mem_buf_msgq_hdl)) {
  1162. ret = PTR_ERR(mem_buf_msgq_hdl);
  1163. dev_err(dev, "Unable to register for mem-buf message queue\n");
  1164. goto err_msgq_register;
  1165. }
  1166. return 0;
  1167. err_msgq_register:
  1168. destroy_workqueue(mem_buf_wq);
  1169. mem_buf_wq = NULL;
  1170. return ret;
  1171. }
  1172. void mem_buf_msgq_free(struct device *dev)
  1173. {
  1174. if (!(mem_buf_capability & MEM_BUF_CAP_DUAL))
  1175. return;
  1176. mutex_lock(&mem_buf_list_lock);
  1177. if (!list_empty(&mem_buf_list))
  1178. dev_err(mem_buf_dev,
  1179. "Removing mem-buf driver while there are membufs\n");
  1180. mutex_unlock(&mem_buf_list_lock);
  1181. mutex_lock(&mem_buf_xfer_mem_list_lock);
  1182. if (!list_empty(&mem_buf_xfer_mem_list))
  1183. dev_err(mem_buf_dev,
  1184. "Removing mem-buf driver while memory is still lent\n");
  1185. mutex_unlock(&mem_buf_xfer_mem_list_lock);
  1186. mem_buf_msgq_unregister(mem_buf_msgq_hdl);
  1187. mem_buf_msgq_hdl = NULL;
  1188. destroy_workqueue(mem_buf_wq);
  1189. mem_buf_wq = NULL;
  1190. }