rbin_heap.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DMABUF Rbin heap exporter for Samsung
  4. *
  5. * Copyright (c) 2021 Samsung Electronics Co., Ltd.
  6. */
  7. #include <linux/dma-buf.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/dma-heap.h>
  10. #include <linux/err.h>
  11. #include <linux/highmem.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/of_reserved_mem.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/kthread.h>
  19. #include <linux/freezer.h>
  20. #include <linux/cpuhotplug.h>
  21. #include <linux/mm_types.h>
  22. #include <linux/types.h>
  23. #include <trace/hooks/mm.h>
  24. #include "rbinregion.h"
  25. #include "deferred-free-helper.h"
  26. #include "qcom_dt_parser.h"
  27. #include "qcom_sg_ops.h"
  28. /* page types we track in the pool */
  29. enum {
  30. POOL_LOWPAGE, /* Clean lowmem pages */
  31. POOL_HIGHPAGE, /* Clean highmem pages */
  32. POOL_TYPE_SIZE,
  33. };
  34. /**
  35. * struct rbin_dmabuf_page_pool - pagepool struct
  36. * @count[]: array of number of pages of that type in the pool
  37. * @items[]: array of list of pages of the specific type
  38. * @lock: lock protecting this struct and especially the count
  39. * item list
  40. * @gfp_mask: gfp_mask to use from alloc
  41. * @order: order of pages in the pool
  42. * @list: list node for list of pools
  43. *
  44. * Allows you to keep a pool of pre allocated pages to use
  45. */
  46. struct rbin_dmabuf_page_pool {
  47. int count[POOL_TYPE_SIZE];
  48. struct list_head items[POOL_TYPE_SIZE];
  49. spinlock_t lock;
  50. gfp_t gfp_mask;
  51. unsigned int order;
  52. struct list_head list;
  53. };
  54. #define RBINHEAP_PREFIX "[RBIN-HEAP] "
  55. #define perrfn(format, arg...) \
  56. pr_err(RBINHEAP_PREFIX "%s: " format "\n", __func__, ##arg)
  57. #define perrdev(dev, format, arg...) \
  58. dev_err(dev, RBINHEAP_PREFIX format "\n", ##arg)
  59. static struct dma_heap *rbin_cached_dma_heap;
  60. static struct dma_heap *rbin_uncached_dma_heap;
  61. static const unsigned int orders[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
  62. #define NUM_ORDERS ARRAY_SIZE(orders)
  63. static int order_to_index(unsigned int order)
  64. {
  65. int i;
  66. for (i = 0; i < NUM_ORDERS; i++)
  67. if (order == orders[i])
  68. return i;
  69. BUG();
  70. return -1;
  71. }
  72. struct rbin_heap {
  73. struct task_struct *task;
  74. struct task_struct *task_shrink;
  75. bool task_run;
  76. bool shrink_run;
  77. wait_queue_head_t waitqueue;
  78. unsigned long count;
  79. struct rbin_dmabuf_page_pool *pools[NUM_ORDERS];
  80. };
  81. static void rbin_page_pool_add(struct rbin_dmabuf_page_pool *pool, struct page *page)
  82. {
  83. int index;
  84. if (PageHighMem(page))
  85. index = POOL_HIGHPAGE;
  86. else
  87. index = POOL_LOWPAGE;
  88. spin_lock(&pool->lock);
  89. list_add_tail(&page->lru, &pool->items[index]);
  90. pool->count[index]++;
  91. spin_unlock(&pool->lock);
  92. }
  93. static struct page *rbin_page_pool_remove(struct rbin_dmabuf_page_pool *pool, int index)
  94. {
  95. struct page *page;
  96. spin_lock(&pool->lock);
  97. page = list_first_entry_or_null(&pool->items[index], struct page, lru);
  98. if (page) {
  99. pool->count[index]--;
  100. list_del(&page->lru);
  101. }
  102. spin_unlock(&pool->lock);
  103. return page;
  104. }
  105. static struct page *rbin_page_pool_fetch(struct rbin_dmabuf_page_pool *pool)
  106. {
  107. struct page *page = NULL;
  108. page = rbin_page_pool_remove(pool, POOL_HIGHPAGE);
  109. if (!page)
  110. page = rbin_page_pool_remove(pool, POOL_LOWPAGE);
  111. return page;
  112. }
  113. static struct rbin_dmabuf_page_pool *rbin_page_pool_create(gfp_t gfp_mask, unsigned int order)
  114. {
  115. struct rbin_dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  116. int i;
  117. if (!pool)
  118. return NULL;
  119. for (i = 0; i < POOL_TYPE_SIZE; i++) {
  120. pool->count[i] = 0;
  121. INIT_LIST_HEAD(&pool->items[i]);
  122. }
  123. pool->gfp_mask = gfp_mask | __GFP_COMP;
  124. pool->order = order;
  125. spin_lock_init(&pool->lock);
  126. return pool;
  127. }
  128. static void rbin_page_pool_free(struct rbin_dmabuf_page_pool *pool, struct page *page)
  129. {
  130. rbin_page_pool_add(pool, page);
  131. }
  132. static struct page *alloc_rbin_page(unsigned long size, unsigned long last_size)
  133. {
  134. struct page *page = ERR_PTR(-ENOMEM);
  135. phys_addr_t paddr = -ENOMEM;
  136. void *addr;
  137. int order;
  138. order = min(get_order(last_size), get_order(size));
  139. for (; order >= 0; order--) {
  140. size = min_t(unsigned long, size, PAGE_SIZE << order);
  141. paddr = dmabuf_rbin_allocate(size);
  142. if (paddr == -ENOMEM)
  143. continue;
  144. if (paddr == -EBUSY)
  145. page = ERR_PTR(-EBUSY);
  146. break;
  147. }
  148. if (!IS_ERR_VALUE(paddr)) {
  149. page = phys_to_page(paddr);
  150. INIT_LIST_HEAD(&page->lru);
  151. addr = page_address(page);
  152. memset(addr, 0, size);
  153. set_page_private(page, size);
  154. }
  155. return page;
  156. }
  157. static inline void do_expand(struct rbin_heap *rbin_heap,
  158. struct page *page, unsigned int nr_pages)
  159. {
  160. unsigned int rem_nr_pages;
  161. unsigned int order;
  162. unsigned int total_nr_pages;
  163. unsigned int free_nr_page;
  164. struct page *free_page;
  165. struct rbin_dmabuf_page_pool *pool;
  166. total_nr_pages = page_private(page) >> PAGE_SHIFT;
  167. rem_nr_pages = total_nr_pages - nr_pages;
  168. free_page = page + total_nr_pages;
  169. while (rem_nr_pages) {
  170. order = ilog2(rem_nr_pages);
  171. free_nr_page = 1 << order;
  172. free_page -= free_nr_page;
  173. set_page_private(free_page, free_nr_page << PAGE_SHIFT);
  174. pool = rbin_heap->pools[order_to_index(order)];
  175. rbin_page_pool_free(pool, free_page);
  176. rem_nr_pages -= free_nr_page;
  177. }
  178. set_page_private(page, nr_pages << PAGE_SHIFT);
  179. }
  180. static struct page *alloc_rbin_page_from_pool(struct rbin_heap *rbin_heap,
  181. unsigned long size)
  182. {
  183. struct page *page = NULL;
  184. unsigned int size_order = get_order(size);
  185. unsigned int nr_pages = size >> PAGE_SHIFT;
  186. int i;
  187. /* try the same or higher order */
  188. for (i = NUM_ORDERS - 1; i >= 0; i--) {
  189. if (orders[i] < size_order)
  190. continue;
  191. page = rbin_page_pool_fetch(rbin_heap->pools[i]);
  192. if (!page)
  193. continue;
  194. if (nr_pages < (1 << orders[i]))
  195. do_expand(rbin_heap, page, nr_pages);
  196. goto done;
  197. }
  198. /* try lower order */
  199. for (i = 0; i < NUM_ORDERS; i++) {
  200. if (orders[i] >= size_order)
  201. continue;
  202. page = rbin_page_pool_fetch(rbin_heap->pools[i]);
  203. if (!page)
  204. continue;
  205. goto done;
  206. }
  207. done:
  208. if (page)
  209. atomic_sub(page_private(page) >> PAGE_SHIFT, &rbin_pool_pages);
  210. return page;
  211. }
  212. static void rbin_heap_free(struct qcom_sg_buffer *buffer)
  213. {
  214. struct sg_table *table = &buffer->sg_table;
  215. struct scatterlist *sg;
  216. struct page *page;
  217. int i;
  218. for_each_sg(table->sgl, sg, table->nents, i) {
  219. page = sg_page(sg);
  220. dmabuf_rbin_free(page_to_phys(page), page_private(page));
  221. }
  222. atomic_sub(buffer->len >> PAGE_SHIFT, &rbin_allocated_pages);
  223. sg_free_table(table);
  224. kfree(buffer);
  225. }
  226. static struct dma_buf *rbin_heap_allocate(struct dma_heap *heap, unsigned long len,
  227. unsigned long fd_flags, unsigned long heap_flags,
  228. bool uncached)
  229. {
  230. struct rbin_heap *rbin_heap = dma_heap_get_drvdata(heap);
  231. struct qcom_sg_buffer *buffer;
  232. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  233. unsigned long size_remain;
  234. unsigned long last_size;
  235. unsigned long nr_free;
  236. struct dma_buf *dmabuf;
  237. struct sg_table *table;
  238. struct scatterlist *sg;
  239. struct list_head pages;
  240. struct page *page, *tmp_page;
  241. int i = 0;
  242. int ret = -ENOMEM;
  243. size_remain = last_size = PAGE_ALIGN(len);
  244. nr_free = rbin_heap->count - atomic_read(&rbin_allocated_pages);
  245. if (size_remain > nr_free << PAGE_SHIFT)
  246. return ERR_PTR(ret);
  247. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  248. if (!buffer)
  249. return ERR_PTR(-ENOMEM);
  250. INIT_LIST_HEAD(&buffer->attachments);
  251. mutex_init(&buffer->lock);
  252. buffer->heap = heap;
  253. buffer->len = len;
  254. buffer->uncached = uncached;
  255. buffer->free = rbin_heap_free;
  256. INIT_LIST_HEAD(&pages);
  257. while (size_remain > 0) {
  258. /*
  259. * Avoid trying to allocate memory if the process
  260. * has been killed by SIGKILL
  261. */
  262. if (fatal_signal_pending(current)) {
  263. perrfn("Fatal signal pending pid #%d", current->pid);
  264. ret = -EINTR;
  265. goto free_buffer;
  266. }
  267. if (atomic_read(&rbin_pool_pages)) {
  268. page = alloc_rbin_page_from_pool(rbin_heap, size_remain);
  269. if (page)
  270. goto got_pg;
  271. }
  272. page = alloc_rbin_page(size_remain, last_size);
  273. if (IS_ERR(page))
  274. goto free_buffer;
  275. else
  276. last_size = page_private(page);
  277. got_pg:
  278. list_add_tail(&page->lru, &pages);
  279. size_remain -= page_private(page);
  280. i++;
  281. }
  282. table = &buffer->sg_table;
  283. if (sg_alloc_table(table, i, GFP_KERNEL)) {
  284. ret = PTR_ERR(buffer);
  285. perrfn("sg_alloc_table failed %d\n", ret);
  286. goto free_buffer;
  287. }
  288. sg = table->sgl;
  289. list_for_each_entry_safe(page, tmp_page, &pages, lru) {
  290. sg_set_page(sg, page, page_private(page), 0);
  291. sg = sg_next(sg);
  292. list_del(&page->lru);
  293. }
  294. /*
  295. * For uncached buffers, we need to initially flush cpu cache, since
  296. * the __GFP_ZERO on the allocation means the zeroing was done by the
  297. * cpu and thus it is likely cached. Map (and implicitly flush) and
  298. * unmap it now so we don't get corruption later on.
  299. */
  300. if (buffer->uncached) {
  301. dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
  302. dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
  303. }
  304. buffer->vmperm = mem_buf_vmperm_alloc(table);
  305. if (IS_ERR(buffer->vmperm)) {
  306. ret = PTR_ERR(buffer->vmperm);
  307. perrfn("vmperm error %d\n", ret);
  308. goto free_sg;
  309. }
  310. /* create the dmabuf */
  311. exp_info.exp_name = dma_heap_get_name(heap);
  312. exp_info.size = buffer->len;
  313. exp_info.flags = fd_flags;
  314. exp_info.priv = buffer;
  315. dmabuf = mem_buf_dma_buf_export(&exp_info, &qcom_sg_buf_ops);
  316. if (IS_ERR(dmabuf)) {
  317. ret = PTR_ERR(dmabuf);
  318. goto vmperm_release;
  319. }
  320. atomic_add(len >> PAGE_SHIFT, &rbin_allocated_pages);
  321. return dmabuf;
  322. vmperm_release:
  323. mem_buf_vmperm_release(buffer->vmperm);
  324. free_sg:
  325. sg_free_table(table);
  326. free_buffer:
  327. list_for_each_entry_safe(page, tmp_page, &pages, lru)
  328. dmabuf_rbin_free(page_to_phys(page), page_private(page));
  329. kfree(buffer);
  330. return ERR_PTR(ret);
  331. }
  332. static struct rbin_heap *g_rbin_heap;
  333. void wake_dmabuf_rbin_heap_prereclaim(void)
  334. {
  335. if (g_rbin_heap) {
  336. g_rbin_heap->task_run = 1;
  337. wake_up(&g_rbin_heap->waitqueue);
  338. }
  339. }
  340. void wake_dmabuf_rbin_heap_shrink(void)
  341. {
  342. if (g_rbin_heap) {
  343. g_rbin_heap->shrink_run = 1;
  344. wake_up(&g_rbin_heap->waitqueue);
  345. }
  346. }
  347. static void dmabuf_rbin_heap_destroy_pools(struct rbin_dmabuf_page_pool **pools)
  348. {
  349. int i;
  350. for (i = 0; i < NUM_ORDERS; i++)
  351. kfree(pools[i]);
  352. }
  353. static int dmabuf_rbin_heap_create_pools(struct rbin_dmabuf_page_pool **pools)
  354. {
  355. int i;
  356. for (i = 0; i < NUM_ORDERS; i++) {
  357. pools[i] = rbin_page_pool_create(GFP_KERNEL, orders[i]);
  358. if (!pools[i])
  359. goto err_create_pool;
  360. }
  361. return 0;
  362. err_create_pool:
  363. dmabuf_rbin_heap_destroy_pools(pools);
  364. return -ENOMEM;
  365. }
  366. #define RBIN_CORE_NUM_FIRST 0
  367. #define RBIN_CORE_NUM_LAST 3
  368. static struct cpumask rbin_cpumask;
  369. static void init_rbin_cpumask(void)
  370. {
  371. int i;
  372. cpumask_clear(&rbin_cpumask);
  373. for (i = RBIN_CORE_NUM_FIRST; i <= RBIN_CORE_NUM_LAST; i++)
  374. cpumask_set_cpu(i, &rbin_cpumask);
  375. }
  376. static int rbin_cpu_online(unsigned int cpu)
  377. {
  378. if (cpumask_any_and(cpu_online_mask, &rbin_cpumask) < nr_cpu_ids) {
  379. /* One of our CPUs online: restore mask */
  380. set_cpus_allowed_ptr(g_rbin_heap->task, &rbin_cpumask);
  381. set_cpus_allowed_ptr(g_rbin_heap->task_shrink, &rbin_cpumask);
  382. }
  383. return 0;
  384. }
  385. static int dmabuf_rbin_heap_prereclaim(void *data)
  386. {
  387. struct rbin_heap *rbin_heap = data;
  388. unsigned int order;
  389. unsigned long size = PAGE_SIZE << orders[0];
  390. unsigned long last_size;
  391. struct rbin_dmabuf_page_pool *pool;
  392. struct page *page;
  393. unsigned long jiffies_bstop;
  394. set_cpus_allowed_ptr(current, &rbin_cpumask);
  395. while (true) {
  396. wait_event_freezable(rbin_heap->waitqueue, rbin_heap->task_run);
  397. jiffies_bstop = jiffies + (HZ / 10);
  398. last_size = size;
  399. while (true) {
  400. page = alloc_rbin_page(size, last_size);
  401. if (PTR_ERR(page) == -ENOMEM)
  402. break;
  403. if (PTR_ERR(page) == -EBUSY) {
  404. if (time_is_after_jiffies(jiffies_bstop))
  405. continue;
  406. else
  407. break;
  408. }
  409. last_size = page_private(page);
  410. order = get_order(page_private(page));
  411. pool = rbin_heap->pools[order_to_index(order)];
  412. rbin_page_pool_free(pool, page);
  413. atomic_add(1 << order, &rbin_pool_pages);
  414. }
  415. rbin_heap->task_run = 0;
  416. }
  417. return 0;
  418. }
  419. static int dmabuf_rbin_heap_shrink(void *data)
  420. {
  421. struct rbin_heap *rbin_heap = data;
  422. unsigned long size = PAGE_SIZE << orders[0];
  423. struct page *page;
  424. set_cpus_allowed_ptr(current, &rbin_cpumask);
  425. while (true) {
  426. wait_event_freezable(rbin_heap->waitqueue, rbin_heap->shrink_run);
  427. while (true) {
  428. page = alloc_rbin_page_from_pool(rbin_heap, size);
  429. if (!page)
  430. break;
  431. dmabuf_rbin_free(page_to_phys(page), page_private(page));
  432. }
  433. rbin_heap->shrink_run = 0;
  434. }
  435. return 0;
  436. }
  437. /* Dummy function to be used until we can call coerce_mask_and_coherent */
  438. static struct dma_buf *rbin_heap_allocate_not_initialized(struct dma_heap *heap,
  439. unsigned long len,
  440. unsigned long fd_flags,
  441. unsigned long heap_flags)
  442. {
  443. return ERR_PTR(-EBUSY);
  444. }
  445. static struct dma_buf *rbin_cached_heap_allocate(struct dma_heap *heap,
  446. unsigned long len,
  447. unsigned long fd_flags,
  448. unsigned long heap_flags)
  449. {
  450. return rbin_heap_allocate(heap, len, fd_flags, heap_flags, false);
  451. }
  452. static struct dma_heap_ops rbin_cached_heap_ops = {
  453. .allocate = rbin_heap_allocate_not_initialized,
  454. };
  455. static struct dma_buf *rbin_uncached_heap_allocate(struct dma_heap *heap,
  456. unsigned long len,
  457. unsigned long fd_flags,
  458. unsigned long heap_flags)
  459. {
  460. return rbin_heap_allocate(heap, len, fd_flags, heap_flags, true);
  461. }
  462. static struct dma_heap_ops rbin_uncached_heap_ops = {
  463. /* After rbin_heap_create is complete, we will swap this */
  464. .allocate = rbin_heap_allocate_not_initialized,
  465. };
  466. struct kobject *rbin_kobject;
  467. static void rbin_heap_show_mem(void *data, unsigned int filter, nodemask_t *nodemask)
  468. {
  469. struct dma_heap *heap = (struct dma_heap *)data;
  470. struct rbin_heap *rbin_heap;
  471. if (!heap)
  472. return;
  473. rbin_heap = dma_heap_get_drvdata(heap);
  474. if (!rbin_heap)
  475. return;
  476. pr_info("rbintotal: %u kB rbinpool: %u kB rbinfree: %u kB rbincache: %u kB\n",
  477. rbin_heap->count << (PAGE_SHIFT - 10),
  478. atomic_read(&rbin_pool_pages) << (PAGE_SHIFT - 10),
  479. atomic_read(&rbin_free_pages) << (PAGE_SHIFT - 10),
  480. atomic_read(&rbin_cached_pages) << (PAGE_SHIFT - 10));
  481. }
  482. static void show_rbin_meminfo(void *data, struct seq_file *m)
  483. {
  484. struct dma_heap *heap = (struct dma_heap *)data;
  485. struct rbin_heap *rbin_heap;
  486. u64 rbin_allocated_kb, rbin_pool_kb;
  487. if (!heap)
  488. return;
  489. rbin_heap = dma_heap_get_drvdata(heap);
  490. if (!rbin_heap)
  491. return;
  492. rbin_allocated_kb = (u64)(atomic_read(&rbin_allocated_pages) << (PAGE_SHIFT - 10));
  493. rbin_pool_kb = (u64)(atomic_read(&rbin_pool_pages) << (PAGE_SHIFT - 10));
  494. show_val_meminfo(m, "RbinTotal", rbin_heap->count << (PAGE_SHIFT - 10));
  495. show_val_meminfo(m, "RbinAlloced", rbin_allocated_kb + rbin_pool_kb);
  496. show_val_meminfo(m, "RbinPool", rbin_pool_kb);
  497. show_val_meminfo(m, "RbinFree", (u64)(atomic_read(&rbin_free_pages) << (PAGE_SHIFT - 10)));
  498. show_val_meminfo(m, "RbinCached", (u64)(atomic_read(&rbin_cached_pages) << (PAGE_SHIFT - 10)));
  499. }
  500. static void rbin_cache_adjust(void *data, unsigned long *cached)
  501. {
  502. *cached += (unsigned long)atomic_read(&rbin_cached_pages);
  503. }
  504. static void rbin_available_adjust(void *data, unsigned long *available)
  505. {
  506. *available += (unsigned long)atomic_read(&rbin_cached_pages);
  507. *available += (unsigned long)atomic_read(&rbin_free_pages);
  508. }
  509. static void rbin_meminfo_adjust(void *data, unsigned long *totalram,
  510. unsigned long *freeram)
  511. {
  512. struct dma_heap *heap = (struct dma_heap *)data;
  513. struct rbin_heap *rbin_heap;
  514. if (!heap)
  515. return;
  516. rbin_heap = dma_heap_get_drvdata(heap);
  517. if (!rbin_heap)
  518. return;
  519. *totalram += rbin_heap->count;
  520. *freeram += (unsigned long)atomic_read(&rbin_free_pages);
  521. }
  522. int add_rbin_heap(struct platform_heap *heap_data)
  523. {
  524. struct dma_heap_export_info exp_info;
  525. struct rbin_heap *rbin_heap;
  526. int ret = 0;
  527. if (!heap_data->base) {
  528. perrdev(heap_data->dev, "memory-region has no base");
  529. ret = -ENODEV;
  530. goto out;
  531. }
  532. if (!heap_data->size) {
  533. perrdev(heap_data->dev, "memory-region has no size");
  534. ret = -ENOMEM;
  535. goto out;
  536. }
  537. rbin_heap = kzalloc(sizeof(struct rbin_heap), GFP_KERNEL);
  538. if (!rbin_heap) {
  539. perrdev(heap_data->dev, "failed to alloc rbin_heap");
  540. ret = -ENOMEM;
  541. goto out;
  542. }
  543. rbin_kobject = kobject_create_and_add("rbin", kernel_kobj);
  544. if (!rbin_kobject) {
  545. perrdev(heap_data->dev, "failed to create rbin_kobject");
  546. ret = -ENOMEM;
  547. goto free_rbin_heap;
  548. }
  549. if (dmabuf_rbin_heap_create_pools(rbin_heap->pools)) {
  550. perrdev(heap_data->dev, "failed to create dma-buf page pool");
  551. ret = -ENOMEM;
  552. goto free_rbin_kobject;
  553. }
  554. ret = init_rbinregion(heap_data->base, heap_data->size);
  555. if (ret) {
  556. perrdev(heap_data->dev, "failed to init rbinregion");
  557. goto destroy_pools;
  558. }
  559. init_rbin_cpumask();
  560. init_waitqueue_head(&rbin_heap->waitqueue);
  561. rbin_heap->count = heap_data->size >> PAGE_SHIFT;
  562. rbin_heap->task = kthread_run(dmabuf_rbin_heap_prereclaim, rbin_heap, "rbin");
  563. rbin_heap->task_shrink = kthread_run(dmabuf_rbin_heap_shrink, rbin_heap, "rbin_shrink");
  564. g_rbin_heap = rbin_heap;
  565. pr_info("%s created %s\n", __func__, heap_data->name);
  566. exp_info.name = "qcom,camera";
  567. exp_info.ops = &rbin_cached_heap_ops;
  568. exp_info.priv = rbin_heap;
  569. rbin_cached_dma_heap = dma_heap_add(&exp_info);
  570. if (IS_ERR(rbin_cached_dma_heap)) {
  571. perrdev(heap_data->dev, "failed to dma_heap_add camera");
  572. ret = PTR_ERR(rbin_cached_dma_heap);
  573. goto destroy_pools;
  574. }
  575. exp_info.name = "qcom,camera-uncached";
  576. exp_info.ops = &rbin_uncached_heap_ops;
  577. exp_info.priv = NULL;
  578. rbin_uncached_dma_heap = dma_heap_add(&exp_info);
  579. if (IS_ERR(rbin_uncached_dma_heap)) {
  580. perrdev(heap_data->dev, "failed to dma_heap_add camera-uncached");
  581. ret = PTR_ERR(rbin_uncached_dma_heap);
  582. goto destroy_pools;
  583. }
  584. dma_coerce_mask_and_coherent(dma_heap_get_dev(rbin_uncached_dma_heap), DMA_BIT_MASK(64));
  585. mb(); /* make sure we only set allocate after dma_mask is set */
  586. rbin_cached_heap_ops.allocate = rbin_cached_heap_allocate;
  587. rbin_uncached_heap_ops.allocate = rbin_uncached_heap_allocate;
  588. register_trace_android_vh_show_mem(rbin_heap_show_mem, (void *)rbin_cached_dma_heap);
  589. register_trace_android_vh_meminfo_proc_show(show_rbin_meminfo, (void *)rbin_cached_dma_heap);
  590. register_trace_android_vh_meminfo_cache_adjust(rbin_cache_adjust, NULL);
  591. register_trace_android_vh_si_mem_available_adjust(rbin_available_adjust, NULL);
  592. register_trace_android_vh_si_meminfo_adjust(rbin_meminfo_adjust, (void *)rbin_cached_dma_heap);
  593. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
  594. "ion/rbin:online", rbin_cpu_online,
  595. NULL);
  596. if (ret < 0)
  597. pr_err("rbin: failed to register 'online' hotplug state\n");
  598. pr_info("%s done\n", __func__);
  599. return 0;
  600. destroy_pools:
  601. dmabuf_rbin_heap_destroy_pools(rbin_heap->pools);
  602. free_rbin_kobject:
  603. kobject_put(rbin_kobject);
  604. free_rbin_heap:
  605. kfree(rbin_heap);
  606. out:
  607. return ret;
  608. }