qcom_carveout_heap.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DMA-BUF heap carveout heap allocator. Copied from
  4. * drivers/staging/android/ion/heaps/ion_carveout_heap.c as of commit
  5. * aeb022cc01ecc ("dma-heap: qcom: Change symbol names to let module be built
  6. * in")
  7. *
  8. * Copyright (C) 2011 Google, Inc.
  9. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  10. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  11. */
  12. #include <linux/dma-mapping.h>
  13. #include <linux/err.h>
  14. #include <linux/genalloc.h>
  15. #include <linux/io.h>
  16. #include <linux/mm.h>
  17. #include <linux/scatterlist.h>
  18. #include <linux/slab.h>
  19. #include <linux/vmalloc.h>
  20. #include <soc/qcom/secure_buffer.h>
  21. #include <linux/of.h>
  22. #include <linux/of_address.h>
  23. #include <linux/list.h>
  24. #include <linux/dma-buf.h>
  25. #include <linux/dma-heap.h>
  26. #include <linux/qcom_dma_heap.h>
  27. #include <linux/types.h>
  28. #include "qcom_dma_heap_secure_utils.h"
  29. #include "qcom_sg_ops.h"
  30. #include "qcom_carveout_heap.h"
  31. #define CARVEOUT_ALLOCATE_FAIL -1
  32. static LIST_HEAD(secure_carveout_heaps);
  33. /*
  34. * @pool_refcount_priv -
  35. * Cookie set by carveout_heap_add_memory for use with its callbacks.
  36. * Cookie provider will call carveout_heap_remove_memory if refcount
  37. * reaches zero.
  38. * @pool_refcount_get -
  39. * Function callback to Increase refcount. Returns 0
  40. * on success and fails if refcount is already zero.
  41. * @pool_refcount_put - Function callback to decrease refcount.
  42. */
  43. struct carveout_heap {
  44. struct dma_heap *heap;
  45. struct rw_semaphore mem_sem;
  46. struct gen_pool *pool;
  47. struct device *dev;
  48. bool is_secure;
  49. phys_addr_t base;
  50. ssize_t size;
  51. };
  52. struct secure_carveout_heap {
  53. u32 token;
  54. struct carveout_heap carveout_heap;
  55. struct list_head list;
  56. atomic_long_t total_allocated;
  57. };
  58. static void sc_heap_free(struct qcom_sg_buffer *buffer);
  59. void __maybe_unused pages_sync_for_device(struct device *dev, struct page *page,
  60. size_t size, enum dma_data_direction dir)
  61. {
  62. struct scatterlist sg;
  63. sg_init_table(&sg, 1);
  64. sg_set_page(&sg, page, size, 0);
  65. /*
  66. * This is not correct - sg_dma_address needs a dma_addr_t that is valid
  67. * for the targeted device, but this works on the currently targeted
  68. * hardware.
  69. */
  70. sg_dma_address(&sg) = page_to_phys(page);
  71. dma_sync_sg_for_device(dev, &sg, 1, dir);
  72. }
  73. static phys_addr_t carveout_allocate(struct carveout_heap *carveout_heap,
  74. unsigned long size)
  75. {
  76. unsigned long offset = CARVEOUT_ALLOCATE_FAIL;
  77. down_read(&carveout_heap->mem_sem);
  78. if (carveout_heap->pool) {
  79. offset = gen_pool_alloc(carveout_heap->pool, size);
  80. if (!offset) {
  81. offset = CARVEOUT_ALLOCATE_FAIL;
  82. goto unlock;
  83. }
  84. }
  85. unlock:
  86. up_read(&carveout_heap->mem_sem);
  87. return offset;
  88. }
  89. static void carveout_free(struct carveout_heap *carveout_heap,
  90. phys_addr_t addr, unsigned long size)
  91. {
  92. if (addr == CARVEOUT_ALLOCATE_FAIL)
  93. return;
  94. down_read(&carveout_heap->mem_sem);
  95. if (carveout_heap->pool)
  96. gen_pool_free(carveout_heap->pool, addr, size);
  97. up_read(&carveout_heap->mem_sem);
  98. }
  99. struct mem_buf_vmperm *
  100. carveout_setup_vmperm(struct carveout_heap *carveout_heap,
  101. struct sg_table *sgt)
  102. {
  103. struct secure_carveout_heap *sc_heap;
  104. struct mem_buf_vmperm *vmperm;
  105. int *vmids, *perms;
  106. u32 nr;
  107. int ret;
  108. if (!carveout_heap->is_secure) {
  109. vmperm = mem_buf_vmperm_alloc(sgt);
  110. return vmperm;
  111. }
  112. sc_heap = container_of(carveout_heap,
  113. struct secure_carveout_heap, carveout_heap);
  114. ret = get_vmperm_from_ion_flags(sc_heap->token,
  115. &vmids, &perms, &nr);
  116. if (ret)
  117. return ERR_PTR(ret);
  118. vmperm = mem_buf_vmperm_alloc_staticvm(sgt, vmids, perms, nr);
  119. kfree(vmids);
  120. kfree(perms);
  121. return vmperm;
  122. }
  123. static struct dma_buf *__carveout_heap_allocate(struct carveout_heap *carveout_heap,
  124. unsigned long len,
  125. unsigned long fd_flags,
  126. unsigned long heap_flags,
  127. void (*buffer_free)(struct qcom_sg_buffer *))
  128. {
  129. struct sg_table *table;
  130. struct qcom_sg_buffer *buffer;
  131. phys_addr_t paddr;
  132. int ret;
  133. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  134. struct dma_buf *dmabuf;
  135. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  136. if (!buffer)
  137. return ERR_PTR(-ENOMEM);
  138. /* Initialize the buffer */
  139. INIT_LIST_HEAD(&buffer->attachments);
  140. mutex_init(&buffer->lock);
  141. buffer->heap = carveout_heap->heap;
  142. buffer->len = len;
  143. buffer->free = buffer_free;
  144. buffer->uncached = true;
  145. table = &buffer->sg_table;
  146. ret = sg_alloc_table(table, 1, GFP_KERNEL);
  147. if (ret)
  148. goto err_free;
  149. paddr = carveout_allocate(carveout_heap, len);
  150. if (paddr == CARVEOUT_ALLOCATE_FAIL) {
  151. ret = -ENOMEM;
  152. goto err_free_table;
  153. }
  154. sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), len, 0);
  155. buffer->vmperm = carveout_setup_vmperm(carveout_heap, &buffer->sg_table);
  156. if (IS_ERR(buffer->vmperm))
  157. goto err_free_carveout;
  158. /* Instantiate our dma_buf */
  159. exp_info.exp_name = dma_heap_get_name(carveout_heap->heap);
  160. exp_info.size = buffer->len;
  161. exp_info.flags = fd_flags;
  162. exp_info.priv = buffer;
  163. dmabuf = qcom_dma_buf_export(&exp_info, &qcom_sg_buf_ops);
  164. if (IS_ERR(dmabuf)) {
  165. ret = PTR_ERR(dmabuf);
  166. goto err_free_vmperm;
  167. }
  168. return dmabuf;
  169. err_free_vmperm:
  170. mem_buf_vmperm_release(buffer->vmperm);
  171. err_free_carveout:
  172. carveout_free(carveout_heap, paddr, len);
  173. err_free_table:
  174. sg_free_table(table);
  175. err_free:
  176. kfree(buffer);
  177. return ERR_PTR(ret);
  178. }
  179. static int carveout_pages_zero(struct page *page, size_t size);
  180. static void carveout_heap_free(struct qcom_sg_buffer *buffer)
  181. {
  182. struct carveout_heap *carveout_heap;
  183. struct sg_table *table = &buffer->sg_table;
  184. struct page *page = sg_page(table->sgl);
  185. phys_addr_t paddr = page_to_phys(page);
  186. struct device *dev;
  187. carveout_heap = dma_heap_get_drvdata(buffer->heap);
  188. dev = carveout_heap->dev;
  189. carveout_pages_zero(page, buffer->len);
  190. carveout_free(carveout_heap, paddr, buffer->len);
  191. sg_free_table(table);
  192. kfree(buffer);
  193. }
  194. static struct dma_buf *carveout_heap_allocate(struct dma_heap *heap,
  195. unsigned long len,
  196. unsigned long fd_flags,
  197. unsigned long heap_flags)
  198. {
  199. struct carveout_heap *carveout_heap = dma_heap_get_drvdata(heap);
  200. return __carveout_heap_allocate(carveout_heap, len, fd_flags,
  201. heap_flags, carveout_heap_free);
  202. }
  203. static int carveout_pages_zero(struct page *page, size_t size)
  204. {
  205. void __iomem *addr;
  206. addr = ioremap_wc(page_to_phys(page), size);
  207. if (!addr)
  208. return -ENOMEM;
  209. memset(addr, 0, size);
  210. iounmap(addr);
  211. return 0;
  212. }
  213. static int carveout_init_heap_memory(struct carveout_heap *co_heap,
  214. phys_addr_t base, ssize_t size)
  215. {
  216. struct page *page = pfn_to_page(PFN_DOWN(base));
  217. int ret = 0;
  218. ret = carveout_pages_zero(page, size);
  219. if (ret)
  220. return ret;
  221. co_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
  222. if (!co_heap->pool)
  223. return -ENOMEM;
  224. co_heap->base = base;
  225. co_heap->size = size;
  226. gen_pool_add(co_heap->pool, co_heap->base, size, -1);
  227. return 0;
  228. }
  229. static int __carveout_heap_init(struct platform_heap *heap_data,
  230. struct carveout_heap *carveout_heap)
  231. {
  232. struct device *dev = heap_data->dev;
  233. int ret = 0;
  234. carveout_heap->dev = dev;
  235. ret = carveout_init_heap_memory(carveout_heap,
  236. heap_data->base,
  237. heap_data->size);
  238. init_rwsem(&carveout_heap->mem_sem);
  239. return ret;
  240. }
  241. static const struct dma_heap_ops carveout_heap_ops = {
  242. .allocate = carveout_heap_allocate,
  243. };
  244. static void carveout_heap_destroy(struct carveout_heap *heap);
  245. int qcom_carveout_heap_create(struct platform_heap *heap_data)
  246. {
  247. struct dma_heap_export_info exp_info;
  248. struct carveout_heap *carveout_heap;
  249. int ret;
  250. if (!heap_data->is_nomap) {
  251. pr_err("carveout heap memory regions need to be created with no-map\n");
  252. return -EINVAL;
  253. }
  254. carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
  255. if (!carveout_heap)
  256. return -ENOMEM;
  257. ret = __carveout_heap_init(heap_data, carveout_heap);
  258. if (ret)
  259. goto err;
  260. carveout_heap->is_secure = false;
  261. exp_info.name = heap_data->name;
  262. exp_info.ops = &carveout_heap_ops;
  263. exp_info.priv = carveout_heap;
  264. carveout_heap->heap = dma_heap_add(&exp_info);
  265. if (IS_ERR(carveout_heap->heap)) {
  266. ret = PTR_ERR(carveout_heap->heap);
  267. goto destroy_heap;
  268. }
  269. return 0;
  270. destroy_heap:
  271. carveout_heap_destroy(carveout_heap);
  272. err:
  273. kfree(carveout_heap);
  274. return ret;
  275. }
  276. static void carveout_heap_destroy(struct carveout_heap *carveout_heap)
  277. {
  278. down_write(&carveout_heap->mem_sem);
  279. if (carveout_heap->pool)
  280. gen_pool_destroy(carveout_heap->pool);
  281. up_write(&carveout_heap->mem_sem);
  282. carveout_heap = NULL;
  283. }
  284. static struct dma_buf *sc_heap_allocate(struct dma_heap *heap,
  285. unsigned long len,
  286. unsigned long fd_flags,
  287. unsigned long heap_flags)
  288. {
  289. struct secure_carveout_heap *sc_heap;
  290. struct dma_buf *dbuf;
  291. sc_heap = dma_heap_get_drvdata(heap);
  292. dbuf = __carveout_heap_allocate(&sc_heap->carveout_heap, len,
  293. fd_flags, heap_flags, sc_heap_free);
  294. if (IS_ERR(dbuf))
  295. return dbuf;
  296. atomic_long_add(len, &sc_heap->total_allocated);
  297. return dbuf;
  298. }
  299. static void sc_heap_free(struct qcom_sg_buffer *buffer)
  300. {
  301. struct secure_carveout_heap *sc_heap;
  302. struct sg_table *table = &buffer->sg_table;
  303. struct page *page = sg_page(table->sgl);
  304. phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
  305. sc_heap = dma_heap_get_drvdata(buffer->heap);
  306. if (qcom_is_buffer_hlos_accessible(sc_heap->token))
  307. carveout_pages_zero(page, buffer->len);
  308. carveout_free(&sc_heap->carveout_heap, paddr, buffer->len);
  309. sg_free_table(table);
  310. atomic_long_sub(buffer->len, &sc_heap->total_allocated);
  311. kfree(buffer);
  312. }
  313. int qcom_secure_carveout_heap_freeze(void)
  314. {
  315. long sz;
  316. struct secure_carveout_heap *sc_heap;
  317. /*
  318. * It is expected that the buffers are freed by the clients
  319. * before the freeze. DMABUF framework tracks the unfreed memory
  320. * by the total_allocated struct member.
  321. */
  322. list_for_each_entry(sc_heap, &secure_carveout_heaps, list) {
  323. sz = atomic_long_read(&sc_heap->total_allocated);
  324. if (sz) {
  325. pr_err("%s: %s: %lx bytes of allocations not freed. Aborting freeze\n",
  326. __func__,
  327. dma_heap_get_name(sc_heap->carveout_heap.heap),
  328. sz);
  329. return -EBUSY;
  330. }
  331. }
  332. return 0;
  333. }
  334. int qcom_secure_carveout_heap_restore(void)
  335. {
  336. struct secure_carveout_heap *sc_heap;
  337. int ret;
  338. list_for_each_entry(sc_heap, &secure_carveout_heaps, list) {
  339. ret = hyp_assign_from_flags(sc_heap->carveout_heap.base,
  340. sc_heap->carveout_heap.size,
  341. sc_heap->token);
  342. BUG_ON(ret);
  343. }
  344. return 0;
  345. }
  346. static struct dma_heap_ops sc_heap_ops = {
  347. .allocate = sc_heap_allocate,
  348. };
  349. int qcom_secure_carveout_heap_create(struct platform_heap *heap_data)
  350. {
  351. struct dma_heap_export_info exp_info;
  352. struct secure_carveout_heap *sc_heap;
  353. int ret;
  354. if (!heap_data->is_nomap) {
  355. pr_err("secure carveout heap memory regions need to be created with no-map\n");
  356. return -EINVAL;
  357. }
  358. sc_heap = kzalloc(sizeof(*sc_heap), GFP_KERNEL);
  359. if (!sc_heap)
  360. return -ENOMEM;
  361. ret = __carveout_heap_init(heap_data, &sc_heap->carveout_heap);
  362. if (ret)
  363. goto err;
  364. ret = hyp_assign_from_flags(heap_data->base, heap_data->size,
  365. heap_data->token);
  366. if (ret) {
  367. pr_err("secure_carveout_heap: Assign token 0x%x failed\n",
  368. heap_data->token);
  369. goto destroy_heap;
  370. }
  371. sc_heap->token = heap_data->token;
  372. sc_heap->carveout_heap.is_secure = true;
  373. exp_info.name = heap_data->name;
  374. exp_info.ops = &sc_heap_ops;
  375. exp_info.priv = sc_heap;
  376. sc_heap->carveout_heap.heap = dma_heap_add(&exp_info);
  377. if (IS_ERR(sc_heap->carveout_heap.heap)) {
  378. ret = PTR_ERR(sc_heap->carveout_heap.heap);
  379. goto destroy_heap;
  380. }
  381. list_add(&sc_heap->list, &secure_carveout_heaps);
  382. return 0;
  383. destroy_heap:
  384. carveout_heap_destroy(&sc_heap->carveout_heap);
  385. err:
  386. kfree(sc_heap);
  387. return ret;
  388. }