qcom_system_heap.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DMABUF System heap exporter
  4. * Originally copied from: drivers/dma-buf/heaps/system_heap.c as of commit
  5. * 263e38f82cbb ("dma-buf: heaps: Remove redundant heap identifier from system
  6. * heap name")
  7. *
  8. * Additions taken from modifications to drivers/dma-buf/heaps/system-heap.c,
  9. * from patches submitted, are listed below:
  10. *
  11. * Addition that modifies dma_buf ops to use SG tables taken from
  12. * drivers/dma-buf/heaps/system-heap.c in:
  13. * https://lore.kernel.org/lkml/[email protected]/
  14. *
  15. * Addition that skips unneeded syncs in the dma_buf ops taken from
  16. * https://lore.kernel.org/lkml/[email protected]/
  17. *
  18. * Addition that tries to allocate higher order pages taken from
  19. * https://lore.kernel.org/lkml/[email protected]/
  20. *
  21. * Addition that implements an uncached heap taken from
  22. * https://lore.kernel.org/lkml/[email protected]/,
  23. * with our own modificaitons made to account for core kernel changes that are
  24. * a part of the patch series.
  25. *
  26. * Pooling functionality taken from:
  27. * Git-repo: https://git.linaro.org/people/john.stultz/android-dev.git
  28. * Branch: dma-buf-heap-perf
  29. * Git-commit: 6f080eb67dce63c6efa57ef564ca4cd762ccebb0
  30. * Git-commit: 6fb9593b928c4cb485bef4e88c59c6b9fdf11352
  31. *
  32. * Copyright (C) 2011 Google, Inc.
  33. * Copyright (C) 2019, 2020 Linaro Ltd.
  34. *
  35. * Portions based off of Andrew Davis' SRAM heap:
  36. * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
  37. * Andrew F. Davis <[email protected]>
  38. *
  39. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  40. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  41. */
  42. #include <linux/dma-buf.h>
  43. #include <linux/dma-mapping.h>
  44. #include <linux/dma-heap.h>
  45. #include <linux/err.h>
  46. #include <linux/highmem.h>
  47. #include <linux/mm.h>
  48. #include <linux/module.h>
  49. #include <linux/scatterlist.h>
  50. #include <linux/slab.h>
  51. #include <linux/vmalloc.h>
  52. #include <linux/kthread.h>
  53. #include <linux/qcom_dma_heap.h>
  54. #include <uapi/linux/sched/types.h>
  55. #include <trace/hooks/mm.h>
  56. #include "qcom_dma_heap_secure_utils.h"
  57. #include "qcom_dynamic_page_pool.h"
  58. #include "qcom_sg_ops.h"
  59. #include "qcom_system_heap.h"
  60. #include "qcom_system_movable_heap.h"
  61. #if IS_ENABLED(CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL)
  62. #define DYNAMIC_POOL_FILL_MARK (100 * SZ_1M)
  63. #define DYNAMIC_POOL_LOW_MARK_PERCENT 40UL
  64. #define DYNAMIC_POOL_LOW_MARK ((DYNAMIC_POOL_FILL_MARK * DYNAMIC_POOL_LOW_MARK_PERCENT) / 100)
  65. #define DYNAMIC_POOL_REFILL_DEFER_WINDOW_MS 10
  66. #define DYNAMIC_POOL_KTHREAD_NICE_VAL 10
  67. static int get_dynamic_pool_fillmark(struct dynamic_page_pool *pool)
  68. {
  69. return DYNAMIC_POOL_FILL_MARK / (PAGE_SIZE << pool->order);
  70. }
  71. static bool dynamic_pool_fillmark_reached(struct dynamic_page_pool *pool)
  72. {
  73. return atomic_read(&pool->count) >= get_dynamic_pool_fillmark(pool);
  74. }
  75. static int get_dynamic_pool_lowmark(struct dynamic_page_pool *pool)
  76. {
  77. return DYNAMIC_POOL_LOW_MARK / (PAGE_SIZE << pool->order);
  78. }
  79. static bool dynamic_pool_count_below_lowmark(struct dynamic_page_pool *pool)
  80. {
  81. return atomic_read(&pool->count) < get_dynamic_pool_lowmark(pool);
  82. }
  83. /* Based on gfp_zone() in mm/mmzone.c since it is not exported. */
  84. enum zone_type dynamic_pool_gfp_zone(gfp_t flags)
  85. {
  86. enum zone_type z;
  87. gfp_t local_flags = flags;
  88. int bit;
  89. bit = (__force int) ((local_flags) & GFP_ZONEMASK);
  90. z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
  91. ((1 << GFP_ZONES_SHIFT) - 1);
  92. VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
  93. return z;
  94. }
  95. /*
  96. * Based on __zone_watermark_ok() in mm/page_alloc.c since it is not exported.
  97. *
  98. * Return true if free base pages are above 'mark'. For high-order checks it
  99. * will return true of the order-0 watermark is reached and there is at least
  100. * one free page of a suitable size. Checking now avoids taking the zone lock
  101. * to check in the allocation paths if no pages are free.
  102. */
  103. static bool __dynamic_pool_zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  104. int highest_zoneidx, long free_pages)
  105. {
  106. long min = mark;
  107. long unusable_free;
  108. int o;
  109. /*
  110. * Access to high atomic reserves is not required, and CMA should not be
  111. * used, since these allocations are non-movable.
  112. */
  113. unusable_free = ((1 << order) - 1) + z->nr_reserved_highatomic;
  114. #ifdef CONFIG_CMA
  115. unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
  116. #endif
  117. /* free_pages may go negative - that's OK */
  118. free_pages -= unusable_free;
  119. /*
  120. * Check watermarks for an order-0 allocation request. If these
  121. * are not met, then a high-order request also cannot go ahead
  122. * even if a suitable page happened to be free.
  123. *
  124. * 'min' can be taken as 'mark' since we do not expect these allocations
  125. * to require disruptive actions (such as running the OOM killer) or
  126. * a lot of effort.
  127. */
  128. if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
  129. return false;
  130. /* If this is an order-0 request then the watermark is fine */
  131. if (!order)
  132. return true;
  133. /* For a high-order request, check at least one suitable page is free */
  134. for (o = order; o < MAX_ORDER; o++) {
  135. struct free_area *area = &z->free_area[o];
  136. int mt;
  137. if (!area->nr_free)
  138. continue;
  139. for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
  140. #ifdef CONFIG_CMA
  141. /*
  142. * Note that this check is needed only
  143. * when MIGRATE_CMA < MIGRATE_PCPTYPES.
  144. */
  145. if (mt == MIGRATE_CMA)
  146. continue;
  147. #endif
  148. if (!free_area_empty(area, mt))
  149. return true;
  150. }
  151. }
  152. return false;
  153. }
  154. /* Based on zone_watermark_ok_safe from mm/page_alloc.c since it is not exported. */
  155. bool dynamic_pool_zone_watermark_ok_safe(struct zone *z, unsigned int order,
  156. unsigned long mark, int highest_zoneidx)
  157. {
  158. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  159. if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
  160. free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
  161. return __dynamic_pool_zone_watermark_ok(z, order, mark, highest_zoneidx, free_pages);
  162. }
  163. /* do a simple check to see if we are in any low memory situation */
  164. static bool dynamic_pool_refill_ok(struct dynamic_page_pool *pool)
  165. {
  166. struct zone *zone;
  167. int i, mark;
  168. enum zone_type classzone_idx = dynamic_pool_gfp_zone(pool->gfp_mask);
  169. s64 delta;
  170. /* check if we are within the refill defer window */
  171. delta = ktime_ms_delta(ktime_get(), pool->last_low_watermark_ktime);
  172. if (delta < DYNAMIC_POOL_REFILL_DEFER_WINDOW_MS)
  173. return false;
  174. /*
  175. * make sure that if we allocate a pool->order page from buddy,
  176. * we don't put the zone watermarks below the high threshold.
  177. * This makes sure there's no unwanted repetitive refilling and
  178. * reclaiming of buddy pages on the pool.
  179. */
  180. for (i = classzone_idx; i >= 0; i--) {
  181. zone = &NODE_DATA(numa_node_id())->node_zones[i];
  182. if (!strcmp(zone->name, "DMA32"))
  183. continue;
  184. mark = high_wmark_pages(zone);
  185. mark += 1 << pool->order;
  186. if (!dynamic_pool_zone_watermark_ok_safe(zone, pool->order, mark, classzone_idx)) {
  187. pool->last_low_watermark_ktime = ktime_get();
  188. return false;
  189. }
  190. }
  191. return true;
  192. }
  193. static void dynamic_page_pool_refill(struct dynamic_page_pool *pool)
  194. {
  195. struct page *page;
  196. gfp_t gfp_refill = (pool->gfp_mask | __GFP_RECLAIM) & ~__GFP_NORETRY;
  197. /* skip refilling order 0 pools */
  198. if (!pool->order)
  199. return;
  200. while (!dynamic_pool_fillmark_reached(pool) && dynamic_pool_refill_ok(pool)) {
  201. page = alloc_pages(gfp_refill, pool->order);
  202. if (!page)
  203. break;
  204. dynamic_page_pool_add(pool, page);
  205. }
  206. }
  207. static bool dynamic_pool_needs_refill(struct dynamic_page_pool *pool)
  208. {
  209. return pool->order && dynamic_pool_count_below_lowmark(pool);
  210. }
  211. static int system_heap_refill_worker(void *data)
  212. {
  213. struct dynamic_page_pool **pool_list = data;
  214. int i;
  215. for (;;) {
  216. for (i = 0; i < NUM_ORDERS; i++) {
  217. if (dynamic_pool_count_below_lowmark(pool_list[i]))
  218. dynamic_page_pool_refill(pool_list[i]);
  219. }
  220. set_current_state(TASK_INTERRUPTIBLE);
  221. if (unlikely(kthread_should_stop())) {
  222. set_current_state(TASK_RUNNING);
  223. break;
  224. }
  225. schedule();
  226. set_current_state(TASK_RUNNING);
  227. }
  228. return 0;
  229. }
  230. static int system_heap_create_refill_worker(struct qcom_system_heap *sys_heap, const char *name)
  231. {
  232. struct task_struct *refill_worker;
  233. struct sched_attr attr = { .sched_nice = DYNAMIC_POOL_KTHREAD_NICE_VAL };
  234. int ret;
  235. int i;
  236. refill_worker = kthread_run(system_heap_refill_worker, sys_heap->pool_list,
  237. "%s-pool-refill-thread", name);
  238. if (IS_ERR(refill_worker)) {
  239. pr_err("%s: failed to create %s-pool-refill-thread: %ld\n",
  240. __func__, name, PTR_ERR(refill_worker));
  241. return PTR_ERR(refill_worker);
  242. }
  243. ret = sched_setattr(refill_worker, &attr);
  244. if (ret) {
  245. pr_warn("%s: failed to set task priority for %s-pool-refill-thread: ret = %d\n",
  246. __func__, name, ret);
  247. kthread_stop(refill_worker);
  248. return ret;
  249. }
  250. for (i = 0; i < NUM_ORDERS; i++)
  251. sys_heap->pool_list[i]->refill_worker = refill_worker;
  252. return ret;
  253. }
  254. static void system_heap_destroy_refill_worker(struct qcom_system_heap *sys_heap)
  255. {
  256. kthread_stop(sys_heap->pool_list[0]->refill_worker);
  257. }
  258. #else
  259. static bool dynamic_pool_needs_refill(struct dynamic_page_pool *pool)
  260. {
  261. return false;
  262. }
  263. static int system_heap_create_refill_worker(struct qcom_system_heap *sys_heap, const char *name)
  264. {
  265. return 0;
  266. }
  267. static void system_heap_destroy_refill_worker(struct qcom_system_heap *sys_heap)
  268. {
  269. }
  270. #endif
  271. static int system_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
  272. {
  273. void *addr = vmap(pages, num, VM_MAP, pgprot);
  274. if (!addr)
  275. return -ENOMEM;
  276. memset(addr, 0, PAGE_SIZE * num);
  277. vunmap(addr);
  278. return 0;
  279. }
  280. static int system_heap_zero_buffer(struct qcom_sg_buffer *buffer)
  281. {
  282. struct sg_table *sgt = &buffer->sg_table;
  283. struct sg_page_iter piter;
  284. struct page *pages[32];
  285. int p = 0;
  286. int ret = 0;
  287. for_each_sgtable_page(sgt, &piter, 0) {
  288. pages[p++] = sg_page_iter_page(&piter);
  289. if (p == ARRAY_SIZE(pages)) {
  290. ret = system_heap_clear_pages(pages, p, PAGE_KERNEL);
  291. if (ret)
  292. return ret;
  293. p = 0;
  294. }
  295. }
  296. if (p)
  297. ret = system_heap_clear_pages(pages, p, PAGE_KERNEL);
  298. return ret;
  299. }
  300. static void system_heap_buf_free(struct deferred_freelist_item *item,
  301. enum df_reason reason)
  302. {
  303. struct qcom_system_heap *sys_heap;
  304. struct qcom_sg_buffer *buffer;
  305. struct sg_table *table;
  306. struct scatterlist *sg;
  307. int i, j;
  308. buffer = container_of(item, struct qcom_sg_buffer, deferred_free);
  309. sys_heap = dma_heap_get_drvdata(buffer->heap);
  310. /* Zero the buffer pages before adding back to the pool */
  311. if (reason == DF_NORMAL)
  312. if (system_heap_zero_buffer(buffer))
  313. reason = DF_UNDER_PRESSURE; // On failure, just free
  314. table = &buffer->sg_table;
  315. for_each_sg(table->sgl, sg, table->nents, i) {
  316. struct page *page = sg_page(sg);
  317. if (reason == DF_UNDER_PRESSURE) {
  318. __free_pages(page, compound_order(page));
  319. } else {
  320. for (j = 0; j < NUM_ORDERS; j++) {
  321. if (compound_order(page) == orders[j])
  322. break;
  323. }
  324. /* Do not keep page in the pool if it is a zone movable page */
  325. if (is_zone_movable_page(page)) {
  326. /* Unpin the page before freeing page back to buddy */
  327. put_page(page);
  328. __free_pages(page, compound_order(page));
  329. } else {
  330. dynamic_page_pool_free(sys_heap->pool_list[j], page);
  331. }
  332. }
  333. }
  334. atomic_long_sub(buffer->len, &sys_heap->total_bytes);
  335. sg_free_table(table);
  336. kfree(buffer);
  337. }
  338. void qcom_system_heap_free(struct qcom_sg_buffer *buffer)
  339. {
  340. deferred_free(&buffer->deferred_free, system_heap_buf_free,
  341. PAGE_ALIGN(buffer->len) / PAGE_SIZE);
  342. }
  343. struct page *qcom_sys_heap_alloc_largest_available(struct dynamic_page_pool **pools,
  344. unsigned long size,
  345. unsigned int max_order,
  346. bool movable)
  347. {
  348. struct page *page = NULL;
  349. int i;
  350. for (i = 0; i < NUM_ORDERS; i++) {
  351. unsigned long flags;
  352. if (size < (PAGE_SIZE << orders[i]))
  353. continue;
  354. if (max_order < orders[i])
  355. continue;
  356. spin_lock_irqsave(&pools[i]->lock, flags);
  357. if (pools[i]->high_count)
  358. page = dynamic_page_pool_remove(pools[i], true);
  359. else if (pools[i]->low_count)
  360. page = dynamic_page_pool_remove(pools[i], false);
  361. spin_unlock_irqrestore(&pools[i]->lock, flags);
  362. if (!page && movable)
  363. page = qcom_movable_heap_alloc_pages(pools[i]);
  364. if (!page)
  365. page = alloc_pages(pools[i]->gfp_mask, pools[i]->order);
  366. if (!page)
  367. continue;
  368. if (dynamic_pool_needs_refill(pools[i]))
  369. wake_up_process(pools[i]->refill_worker);
  370. return page;
  371. }
  372. return NULL;
  373. }
  374. int system_qcom_sg_buffer_alloc(struct dma_heap *heap,
  375. struct qcom_sg_buffer *buffer,
  376. unsigned long len,
  377. bool movable)
  378. {
  379. struct qcom_system_heap *sys_heap;
  380. unsigned long size_remaining = len;
  381. unsigned int max_order = orders[0];
  382. struct sg_table *table;
  383. struct scatterlist *sg;
  384. struct list_head pages;
  385. struct page *page, *tmp_page;
  386. int i, ret = -ENOMEM;
  387. sys_heap = dma_heap_get_drvdata(heap);
  388. INIT_LIST_HEAD(&buffer->attachments);
  389. mutex_init(&buffer->lock);
  390. buffer->heap = heap;
  391. buffer->len = len;
  392. buffer->uncached = sys_heap->uncached;
  393. buffer->free = qcom_system_heap_free;
  394. INIT_LIST_HEAD(&pages);
  395. i = 0;
  396. while (size_remaining > 0) {
  397. /*
  398. * Avoid trying to allocate memory if the process
  399. * has been killed by SIGKILL
  400. */
  401. if (fatal_signal_pending(current))
  402. goto free_mem;
  403. page = qcom_sys_heap_alloc_largest_available(sys_heap->pool_list,
  404. size_remaining,
  405. max_order,
  406. movable);
  407. if (!page)
  408. goto free_mem;
  409. list_add_tail(&page->lru, &pages);
  410. size_remaining -= page_size(page);
  411. max_order = compound_order(page);
  412. i++;
  413. }
  414. table = &buffer->sg_table;
  415. if (sg_alloc_table(table, i, GFP_KERNEL))
  416. goto free_mem;
  417. sg = table->sgl;
  418. list_for_each_entry_safe(page, tmp_page, &pages, lru) {
  419. sg_set_page(sg, page, page_size(page), 0);
  420. sg = sg_next(sg);
  421. list_del(&page->lru);
  422. }
  423. /*
  424. * For uncached buffers, we need to initially flush cpu cache, since
  425. * the __GFP_ZERO on the allocation means the zeroing was done by the
  426. * cpu and thus it is likely cached. Map (and implicitly flush) and
  427. * unmap it now so we don't get corruption later on.
  428. */
  429. if (buffer->uncached) {
  430. dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
  431. dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
  432. }
  433. return 0;
  434. free_mem:
  435. list_for_each_entry_safe(page, tmp_page, &pages, lru) {
  436. /* Unpin the memory first if it was borrowed from movable zone */
  437. if (is_zone_movable_page(page))
  438. put_page(page);
  439. __free_pages(page, compound_order(page));
  440. }
  441. return ret;
  442. }
  443. static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
  444. unsigned long len,
  445. unsigned long fd_flags,
  446. unsigned long heap_flags)
  447. {
  448. struct qcom_system_heap *sys_heap;
  449. struct qcom_sg_buffer *buffer;
  450. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  451. struct dma_buf *dmabuf;
  452. int ret;
  453. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  454. if (!buffer)
  455. return ERR_PTR(-ENOMEM);
  456. sys_heap = dma_heap_get_drvdata(heap);
  457. ret = system_qcom_sg_buffer_alloc(heap, buffer, len, false);
  458. if (ret)
  459. goto free_buf_struct;
  460. buffer->vmperm = mem_buf_vmperm_alloc(&buffer->sg_table);
  461. if (IS_ERR(buffer->vmperm)) {
  462. ret = PTR_ERR(buffer->vmperm);
  463. goto free_sys_heap_mem;
  464. }
  465. /* create the dmabuf */
  466. exp_info.exp_name = dma_heap_get_name(heap);
  467. exp_info.size = buffer->len;
  468. exp_info.flags = fd_flags;
  469. exp_info.priv = buffer;
  470. dmabuf = qcom_dma_buf_export(&exp_info, &qcom_sg_buf_ops);
  471. if (IS_ERR(dmabuf)) {
  472. ret = PTR_ERR(dmabuf);
  473. goto free_vmperm;
  474. }
  475. atomic_long_add(buffer->len, &sys_heap->total_bytes);
  476. return dmabuf;
  477. free_vmperm:
  478. mem_buf_vmperm_release(buffer->vmperm);
  479. free_sys_heap_mem:
  480. qcom_system_heap_free(buffer);
  481. return ERR_PTR(ret);
  482. free_buf_struct:
  483. kfree(buffer);
  484. return ERR_PTR(ret);
  485. }
  486. static long get_pool_size_bytes(struct dma_heap *heap)
  487. {
  488. long total_size = 0;
  489. int i;
  490. struct qcom_system_heap *sys_heap = dma_heap_get_drvdata(heap);
  491. if (!strncmp(dma_heap_get_name(heap), "system", 6))
  492. return 0;
  493. for (i = 0; i < NUM_ORDERS; i++)
  494. total_size += dynamic_page_pool_total(sys_heap->pool_list[i], true);
  495. return total_size << PAGE_SHIFT;
  496. }
  497. static const struct dma_heap_ops system_heap_ops = {
  498. .allocate = system_heap_allocate,
  499. .get_pool_size = get_pool_size_bytes,
  500. };
  501. static long get_system_heap_total_kbytes(struct dma_heap *heap)
  502. {
  503. struct qcom_system_heap *sys_heap;
  504. if (!heap)
  505. return 0;
  506. sys_heap = dma_heap_get_drvdata(heap);
  507. if (!sys_heap)
  508. return 0;
  509. return atomic_long_read(&sys_heap->total_bytes) >> 10;
  510. }
  511. static void qcom_system_heap_show_mem(void *data, unsigned int filter, nodemask_t *nodemask)
  512. {
  513. struct dma_heap *heap = (struct dma_heap *)data;
  514. long total_kbytes = get_system_heap_total_kbytes(heap);
  515. if (total_kbytes == 0)
  516. return;
  517. pr_info("%s: %ld kB\n", dma_heap_get_name(heap), total_kbytes);
  518. }
  519. static void qcom_system_heap_meminfo(void *data, struct seq_file *m)
  520. {
  521. struct dma_heap *heap = (struct dma_heap *)data;
  522. long total_kbytes = get_system_heap_total_kbytes(heap);
  523. if (total_kbytes == 0)
  524. return;
  525. show_val_meminfo(m, dma_heap_get_name(heap), total_kbytes);
  526. }
  527. void qcom_system_heap_create(const char *name, const char *system_alias, bool uncached)
  528. {
  529. struct dma_heap_export_info exp_info;
  530. struct dma_heap *heap;
  531. struct qcom_system_heap *sys_heap;
  532. int ret;
  533. ret = dynamic_page_pool_init_shrinker();
  534. if (ret)
  535. goto out;
  536. sys_heap = kzalloc(sizeof(*sys_heap), GFP_KERNEL);
  537. if (!sys_heap) {
  538. ret = -ENOMEM;
  539. goto out;
  540. }
  541. exp_info.name = name;
  542. exp_info.ops = &system_heap_ops;
  543. exp_info.priv = sys_heap;
  544. sys_heap->uncached = uncached;
  545. sys_heap->pool_list = dynamic_page_pool_create_pools(0, NULL);
  546. if (IS_ERR(sys_heap->pool_list)) {
  547. ret = PTR_ERR(sys_heap->pool_list);
  548. goto free_heap;
  549. }
  550. ret = system_heap_create_refill_worker(sys_heap, name);
  551. if (ret)
  552. goto free_pools;
  553. heap = dma_heap_add(&exp_info);
  554. if (IS_ERR(heap)) {
  555. ret = PTR_ERR(heap);
  556. goto stop_worker;
  557. }
  558. if (uncached)
  559. dma_coerce_mask_and_coherent(dma_heap_get_dev(heap),
  560. DMA_BIT_MASK(64));
  561. pr_info("%s: DMA-BUF Heap: Created '%s'\n", __func__, name);
  562. if (system_alias != NULL) {
  563. exp_info.name = system_alias;
  564. heap = dma_heap_add(&exp_info);
  565. if (IS_ERR(heap)) {
  566. pr_err("%s: Failed to create '%s', error is %d\n", __func__,
  567. system_alias, PTR_ERR(heap));
  568. return;
  569. }
  570. dma_coerce_mask_and_coherent(dma_heap_get_dev(heap), DMA_BIT_MASK(64));
  571. pr_info("%s: DMA-BUF Heap: Created '%s'\n", __func__, system_alias);
  572. }
  573. register_trace_android_vh_show_mem(qcom_system_heap_show_mem, (void *)heap);
  574. register_trace_android_vh_meminfo_proc_show(qcom_system_heap_meminfo, (void *)heap);
  575. return;
  576. stop_worker:
  577. system_heap_destroy_refill_worker(sys_heap);
  578. free_pools:
  579. dynamic_page_pool_release_pools(sys_heap->pool_list);
  580. free_heap:
  581. kfree(sys_heap);
  582. out:
  583. pr_err("%s: Failed to create '%s', error is %d\n", __func__, name, ret);
  584. }