123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * DMABUF Rbin heap exporter for Samsung
- *
- * Copyright (c) 2021 Samsung Electronics Co., Ltd.
- */
- #include <linux/dma-buf.h>
- #include <linux/dma-mapping.h>
- #include <linux/dma-heap.h>
- #include <linux/err.h>
- #include <linux/highmem.h>
- #include <linux/module.h>
- #include <linux/of.h>
- #include <linux/of_reserved_mem.h>
- #include <linux/scatterlist.h>
- #include <linux/slab.h>
- #include <linux/vmalloc.h>
- #include <linux/kthread.h>
- #include <linux/freezer.h>
- #include <linux/cpuhotplug.h>
- #include <linux/mm_types.h>
- #include <linux/types.h>
- #include <trace/hooks/mm.h>
- #include "rbinregion.h"
- #include "deferred-free-helper.h"
- #include "qcom_dt_parser.h"
- #include "qcom_sg_ops.h"
- /* page types we track in the pool */
- enum {
- POOL_LOWPAGE, /* Clean lowmem pages */
- POOL_HIGHPAGE, /* Clean highmem pages */
- POOL_TYPE_SIZE,
- };
- /**
- * struct rbin_dmabuf_page_pool - pagepool struct
- * @count[]: array of number of pages of that type in the pool
- * @items[]: array of list of pages of the specific type
- * @lock: lock protecting this struct and especially the count
- * item list
- * @gfp_mask: gfp_mask to use from alloc
- * @order: order of pages in the pool
- * @list: list node for list of pools
- *
- * Allows you to keep a pool of pre allocated pages to use
- */
- struct rbin_dmabuf_page_pool {
- int count[POOL_TYPE_SIZE];
- struct list_head items[POOL_TYPE_SIZE];
- spinlock_t lock;
- gfp_t gfp_mask;
- unsigned int order;
- struct list_head list;
- };
- #define RBINHEAP_PREFIX "[RBIN-HEAP] "
- #define perrfn(format, arg...) \
- pr_err(RBINHEAP_PREFIX "%s: " format "\n", __func__, ##arg)
- #define perrdev(dev, format, arg...) \
- dev_err(dev, RBINHEAP_PREFIX format "\n", ##arg)
- static struct dma_heap *rbin_cached_dma_heap;
- static struct dma_heap *rbin_uncached_dma_heap;
- static const unsigned int orders[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
- #define NUM_ORDERS ARRAY_SIZE(orders)
- static int order_to_index(unsigned int order)
- {
- int i;
- for (i = 0; i < NUM_ORDERS; i++)
- if (order == orders[i])
- return i;
- BUG();
- return -1;
- }
- struct rbin_heap {
- struct task_struct *task;
- struct task_struct *task_shrink;
- bool task_run;
- bool shrink_run;
- wait_queue_head_t waitqueue;
- unsigned long count;
- struct rbin_dmabuf_page_pool *pools[NUM_ORDERS];
- };
- static void rbin_page_pool_add(struct rbin_dmabuf_page_pool *pool, struct page *page)
- {
- int index;
- if (PageHighMem(page))
- index = POOL_HIGHPAGE;
- else
- index = POOL_LOWPAGE;
- spin_lock(&pool->lock);
- list_add_tail(&page->lru, &pool->items[index]);
- pool->count[index]++;
- spin_unlock(&pool->lock);
- }
- static struct page *rbin_page_pool_remove(struct rbin_dmabuf_page_pool *pool, int index)
- {
- struct page *page;
- spin_lock(&pool->lock);
- page = list_first_entry_or_null(&pool->items[index], struct page, lru);
- if (page) {
- pool->count[index]--;
- list_del(&page->lru);
- }
- spin_unlock(&pool->lock);
- return page;
- }
- static struct page *rbin_page_pool_fetch(struct rbin_dmabuf_page_pool *pool)
- {
- struct page *page = NULL;
- page = rbin_page_pool_remove(pool, POOL_HIGHPAGE);
- if (!page)
- page = rbin_page_pool_remove(pool, POOL_LOWPAGE);
- return page;
- }
- static struct rbin_dmabuf_page_pool *rbin_page_pool_create(gfp_t gfp_mask, unsigned int order)
- {
- struct rbin_dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
- int i;
- if (!pool)
- return NULL;
- for (i = 0; i < POOL_TYPE_SIZE; i++) {
- pool->count[i] = 0;
- INIT_LIST_HEAD(&pool->items[i]);
- }
- pool->gfp_mask = gfp_mask | __GFP_COMP;
- pool->order = order;
- spin_lock_init(&pool->lock);
- return pool;
- }
- static void rbin_page_pool_free(struct rbin_dmabuf_page_pool *pool, struct page *page)
- {
- rbin_page_pool_add(pool, page);
- }
- static struct page *alloc_rbin_page(unsigned long size, unsigned long last_size)
- {
- struct page *page = ERR_PTR(-ENOMEM);
- phys_addr_t paddr = -ENOMEM;
- void *addr;
- int order;
- order = min(get_order(last_size), get_order(size));
- for (; order >= 0; order--) {
- size = min_t(unsigned long, size, PAGE_SIZE << order);
- paddr = dmabuf_rbin_allocate(size);
- if (paddr == -ENOMEM)
- continue;
- if (paddr == -EBUSY)
- page = ERR_PTR(-EBUSY);
- break;
- }
- if (!IS_ERR_VALUE(paddr)) {
- page = phys_to_page(paddr);
- INIT_LIST_HEAD(&page->lru);
- addr = page_address(page);
- memset(addr, 0, size);
- set_page_private(page, size);
- }
- return page;
- }
- static inline void do_expand(struct rbin_heap *rbin_heap,
- struct page *page, unsigned int nr_pages)
- {
- unsigned int rem_nr_pages;
- unsigned int order;
- unsigned int total_nr_pages;
- unsigned int free_nr_page;
- struct page *free_page;
- struct rbin_dmabuf_page_pool *pool;
- total_nr_pages = page_private(page) >> PAGE_SHIFT;
- rem_nr_pages = total_nr_pages - nr_pages;
- free_page = page + total_nr_pages;
- while (rem_nr_pages) {
- order = ilog2(rem_nr_pages);
- free_nr_page = 1 << order;
- free_page -= free_nr_page;
- set_page_private(free_page, free_nr_page << PAGE_SHIFT);
- pool = rbin_heap->pools[order_to_index(order)];
- rbin_page_pool_free(pool, free_page);
- rem_nr_pages -= free_nr_page;
- }
- set_page_private(page, nr_pages << PAGE_SHIFT);
- }
- static struct page *alloc_rbin_page_from_pool(struct rbin_heap *rbin_heap,
- unsigned long size)
- {
- struct page *page = NULL;
- unsigned int size_order = get_order(size);
- unsigned int nr_pages = size >> PAGE_SHIFT;
- int i;
- /* try the same or higher order */
- for (i = NUM_ORDERS - 1; i >= 0; i--) {
- if (orders[i] < size_order)
- continue;
- page = rbin_page_pool_fetch(rbin_heap->pools[i]);
- if (!page)
- continue;
- if (nr_pages < (1 << orders[i]))
- do_expand(rbin_heap, page, nr_pages);
- goto done;
- }
- /* try lower order */
- for (i = 0; i < NUM_ORDERS; i++) {
- if (orders[i] >= size_order)
- continue;
- page = rbin_page_pool_fetch(rbin_heap->pools[i]);
- if (!page)
- continue;
- goto done;
- }
- done:
- if (page)
- atomic_sub(page_private(page) >> PAGE_SHIFT, &rbin_pool_pages);
- return page;
- }
- static void rbin_heap_free(struct qcom_sg_buffer *buffer)
- {
- struct sg_table *table = &buffer->sg_table;
- struct scatterlist *sg;
- struct page *page;
- int i;
- for_each_sg(table->sgl, sg, table->nents, i) {
- page = sg_page(sg);
- dmabuf_rbin_free(page_to_phys(page), page_private(page));
- }
- atomic_sub(buffer->len >> PAGE_SHIFT, &rbin_allocated_pages);
- sg_free_table(table);
- kfree(buffer);
- }
- static struct dma_buf *rbin_heap_allocate(struct dma_heap *heap, unsigned long len,
- unsigned long fd_flags, unsigned long heap_flags,
- bool uncached)
- {
- struct rbin_heap *rbin_heap = dma_heap_get_drvdata(heap);
- struct qcom_sg_buffer *buffer;
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- unsigned long size_remain;
- unsigned long last_size;
- unsigned long nr_free;
- struct dma_buf *dmabuf;
- struct sg_table *table;
- struct scatterlist *sg;
- struct list_head pages;
- struct page *page, *tmp_page;
- int i = 0;
- int ret = -ENOMEM;
- size_remain = last_size = PAGE_ALIGN(len);
- nr_free = rbin_heap->count - atomic_read(&rbin_allocated_pages);
- if (size_remain > nr_free << PAGE_SHIFT)
- return ERR_PTR(ret);
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&buffer->attachments);
- mutex_init(&buffer->lock);
- buffer->heap = heap;
- buffer->len = len;
- buffer->uncached = uncached;
- buffer->free = rbin_heap_free;
- INIT_LIST_HEAD(&pages);
- while (size_remain > 0) {
- /*
- * Avoid trying to allocate memory if the process
- * has been killed by SIGKILL
- */
- if (fatal_signal_pending(current)) {
- perrfn("Fatal signal pending pid #%d", current->pid);
- ret = -EINTR;
- goto free_buffer;
- }
- if (atomic_read(&rbin_pool_pages)) {
- page = alloc_rbin_page_from_pool(rbin_heap, size_remain);
- if (page)
- goto got_pg;
- }
- page = alloc_rbin_page(size_remain, last_size);
- if (IS_ERR(page))
- goto free_buffer;
- else
- last_size = page_private(page);
- got_pg:
- list_add_tail(&page->lru, &pages);
- size_remain -= page_private(page);
- i++;
- }
- table = &buffer->sg_table;
- if (sg_alloc_table(table, i, GFP_KERNEL)) {
- ret = PTR_ERR(buffer);
- perrfn("sg_alloc_table failed %d\n", ret);
- goto free_buffer;
- }
- sg = table->sgl;
- list_for_each_entry_safe(page, tmp_page, &pages, lru) {
- sg_set_page(sg, page, page_private(page), 0);
- sg = sg_next(sg);
- list_del(&page->lru);
- }
- /*
- * For uncached buffers, we need to initially flush cpu cache, since
- * the __GFP_ZERO on the allocation means the zeroing was done by the
- * cpu and thus it is likely cached. Map (and implicitly flush) and
- * unmap it now so we don't get corruption later on.
- */
- if (buffer->uncached) {
- dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
- dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
- }
- buffer->vmperm = mem_buf_vmperm_alloc(table);
- if (IS_ERR(buffer->vmperm)) {
- ret = PTR_ERR(buffer->vmperm);
- perrfn("vmperm error %d\n", ret);
- goto free_sg;
- }
- /* create the dmabuf */
- exp_info.exp_name = dma_heap_get_name(heap);
- exp_info.size = buffer->len;
- exp_info.flags = fd_flags;
- exp_info.priv = buffer;
- dmabuf = mem_buf_dma_buf_export(&exp_info, &qcom_sg_buf_ops);
- if (IS_ERR(dmabuf)) {
- ret = PTR_ERR(dmabuf);
- goto vmperm_release;
- }
- atomic_add(len >> PAGE_SHIFT, &rbin_allocated_pages);
- return dmabuf;
- vmperm_release:
- mem_buf_vmperm_release(buffer->vmperm);
- free_sg:
- sg_free_table(table);
- free_buffer:
- list_for_each_entry_safe(page, tmp_page, &pages, lru)
- dmabuf_rbin_free(page_to_phys(page), page_private(page));
- kfree(buffer);
- return ERR_PTR(ret);
- }
- static struct rbin_heap *g_rbin_heap;
- void wake_dmabuf_rbin_heap_prereclaim(void)
- {
- if (g_rbin_heap) {
- g_rbin_heap->task_run = 1;
- wake_up(&g_rbin_heap->waitqueue);
- }
- }
- void wake_dmabuf_rbin_heap_shrink(void)
- {
- if (g_rbin_heap) {
- g_rbin_heap->shrink_run = 1;
- wake_up(&g_rbin_heap->waitqueue);
- }
- }
- static void dmabuf_rbin_heap_destroy_pools(struct rbin_dmabuf_page_pool **pools)
- {
- int i;
- for (i = 0; i < NUM_ORDERS; i++)
- kfree(pools[i]);
- }
- static int dmabuf_rbin_heap_create_pools(struct rbin_dmabuf_page_pool **pools)
- {
- int i;
- for (i = 0; i < NUM_ORDERS; i++) {
- pools[i] = rbin_page_pool_create(GFP_KERNEL, orders[i]);
- if (!pools[i])
- goto err_create_pool;
- }
- return 0;
- err_create_pool:
- dmabuf_rbin_heap_destroy_pools(pools);
- return -ENOMEM;
- }
- #define RBIN_CORE_NUM_FIRST 0
- #define RBIN_CORE_NUM_LAST 3
- static struct cpumask rbin_cpumask;
- static void init_rbin_cpumask(void)
- {
- int i;
- cpumask_clear(&rbin_cpumask);
- for (i = RBIN_CORE_NUM_FIRST; i <= RBIN_CORE_NUM_LAST; i++)
- cpumask_set_cpu(i, &rbin_cpumask);
- }
- static int rbin_cpu_online(unsigned int cpu)
- {
- if (cpumask_any_and(cpu_online_mask, &rbin_cpumask) < nr_cpu_ids) {
- /* One of our CPUs online: restore mask */
- set_cpus_allowed_ptr(g_rbin_heap->task, &rbin_cpumask);
- set_cpus_allowed_ptr(g_rbin_heap->task_shrink, &rbin_cpumask);
- }
- return 0;
- }
- static int dmabuf_rbin_heap_prereclaim(void *data)
- {
- struct rbin_heap *rbin_heap = data;
- unsigned int order;
- unsigned long size = PAGE_SIZE << orders[0];
- unsigned long last_size;
- struct rbin_dmabuf_page_pool *pool;
- struct page *page;
- unsigned long jiffies_bstop;
- set_cpus_allowed_ptr(current, &rbin_cpumask);
- while (true) {
- wait_event_freezable(rbin_heap->waitqueue, rbin_heap->task_run);
- jiffies_bstop = jiffies + (HZ / 10);
- last_size = size;
- while (true) {
- page = alloc_rbin_page(size, last_size);
- if (PTR_ERR(page) == -ENOMEM)
- break;
- if (PTR_ERR(page) == -EBUSY) {
- if (time_is_after_jiffies(jiffies_bstop))
- continue;
- else
- break;
- }
- last_size = page_private(page);
- order = get_order(page_private(page));
- pool = rbin_heap->pools[order_to_index(order)];
- rbin_page_pool_free(pool, page);
- atomic_add(1 << order, &rbin_pool_pages);
- }
- rbin_heap->task_run = 0;
- }
- return 0;
- }
- static int dmabuf_rbin_heap_shrink(void *data)
- {
- struct rbin_heap *rbin_heap = data;
- unsigned long size = PAGE_SIZE << orders[0];
- struct page *page;
- set_cpus_allowed_ptr(current, &rbin_cpumask);
- while (true) {
- wait_event_freezable(rbin_heap->waitqueue, rbin_heap->shrink_run);
- while (true) {
- page = alloc_rbin_page_from_pool(rbin_heap, size);
- if (!page)
- break;
- dmabuf_rbin_free(page_to_phys(page), page_private(page));
- }
- rbin_heap->shrink_run = 0;
- }
- return 0;
- }
- /* Dummy function to be used until we can call coerce_mask_and_coherent */
- static struct dma_buf *rbin_heap_allocate_not_initialized(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
- {
- return ERR_PTR(-EBUSY);
- }
- static struct dma_buf *rbin_cached_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
- {
- return rbin_heap_allocate(heap, len, fd_flags, heap_flags, false);
- }
- static struct dma_heap_ops rbin_cached_heap_ops = {
- .allocate = rbin_heap_allocate_not_initialized,
- };
- static struct dma_buf *rbin_uncached_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
- {
- return rbin_heap_allocate(heap, len, fd_flags, heap_flags, true);
- }
- static struct dma_heap_ops rbin_uncached_heap_ops = {
- /* After rbin_heap_create is complete, we will swap this */
- .allocate = rbin_heap_allocate_not_initialized,
- };
- struct kobject *rbin_kobject;
- static void rbin_heap_show_mem(void *data, unsigned int filter, nodemask_t *nodemask)
- {
- struct dma_heap *heap = (struct dma_heap *)data;
- struct rbin_heap *rbin_heap;
- if (!heap)
- return;
- rbin_heap = dma_heap_get_drvdata(heap);
- if (!rbin_heap)
- return;
- pr_info("rbintotal: %u kB rbinpool: %u kB rbinfree: %u kB rbincache: %u kB\n",
- rbin_heap->count << (PAGE_SHIFT - 10),
- atomic_read(&rbin_pool_pages) << (PAGE_SHIFT - 10),
- atomic_read(&rbin_free_pages) << (PAGE_SHIFT - 10),
- atomic_read(&rbin_cached_pages) << (PAGE_SHIFT - 10));
- }
- static void show_rbin_meminfo(void *data, struct seq_file *m)
- {
- struct dma_heap *heap = (struct dma_heap *)data;
- struct rbin_heap *rbin_heap;
- u64 rbin_allocated_kb, rbin_pool_kb;
- if (!heap)
- return;
- rbin_heap = dma_heap_get_drvdata(heap);
- if (!rbin_heap)
- return;
- rbin_allocated_kb = (u64)(atomic_read(&rbin_allocated_pages) << (PAGE_SHIFT - 10));
- rbin_pool_kb = (u64)(atomic_read(&rbin_pool_pages) << (PAGE_SHIFT - 10));
- show_val_meminfo(m, "RbinTotal", rbin_heap->count << (PAGE_SHIFT - 10));
- show_val_meminfo(m, "RbinAlloced", rbin_allocated_kb + rbin_pool_kb);
- show_val_meminfo(m, "RbinPool", rbin_pool_kb);
- show_val_meminfo(m, "RbinFree", (u64)(atomic_read(&rbin_free_pages) << (PAGE_SHIFT - 10)));
- show_val_meminfo(m, "RbinCached", (u64)(atomic_read(&rbin_cached_pages) << (PAGE_SHIFT - 10)));
- }
- static void rbin_cache_adjust(void *data, unsigned long *cached)
- {
- *cached += (unsigned long)atomic_read(&rbin_cached_pages);
- }
- static void rbin_available_adjust(void *data, unsigned long *available)
- {
- *available += (unsigned long)atomic_read(&rbin_cached_pages);
- *available += (unsigned long)atomic_read(&rbin_free_pages);
- }
- static void rbin_meminfo_adjust(void *data, unsigned long *totalram,
- unsigned long *freeram)
- {
- struct dma_heap *heap = (struct dma_heap *)data;
- struct rbin_heap *rbin_heap;
- if (!heap)
- return;
- rbin_heap = dma_heap_get_drvdata(heap);
- if (!rbin_heap)
- return;
- *totalram += rbin_heap->count;
- *freeram += (unsigned long)atomic_read(&rbin_free_pages);
- }
- int add_rbin_heap(struct platform_heap *heap_data)
- {
- struct dma_heap_export_info exp_info;
- struct rbin_heap *rbin_heap;
- int ret = 0;
- if (!heap_data->base) {
- perrdev(heap_data->dev, "memory-region has no base");
- ret = -ENODEV;
- goto out;
- }
- if (!heap_data->size) {
- perrdev(heap_data->dev, "memory-region has no size");
- ret = -ENOMEM;
- goto out;
- }
- rbin_heap = kzalloc(sizeof(struct rbin_heap), GFP_KERNEL);
- if (!rbin_heap) {
- perrdev(heap_data->dev, "failed to alloc rbin_heap");
- ret = -ENOMEM;
- goto out;
- }
- rbin_kobject = kobject_create_and_add("rbin", kernel_kobj);
- if (!rbin_kobject) {
- perrdev(heap_data->dev, "failed to create rbin_kobject");
- ret = -ENOMEM;
- goto free_rbin_heap;
- }
- if (dmabuf_rbin_heap_create_pools(rbin_heap->pools)) {
- perrdev(heap_data->dev, "failed to create dma-buf page pool");
- ret = -ENOMEM;
- goto free_rbin_kobject;
- }
- ret = init_rbinregion(heap_data->base, heap_data->size);
- if (ret) {
- perrdev(heap_data->dev, "failed to init rbinregion");
- goto destroy_pools;
- }
- init_rbin_cpumask();
- init_waitqueue_head(&rbin_heap->waitqueue);
- rbin_heap->count = heap_data->size >> PAGE_SHIFT;
- rbin_heap->task = kthread_run(dmabuf_rbin_heap_prereclaim, rbin_heap, "rbin");
- rbin_heap->task_shrink = kthread_run(dmabuf_rbin_heap_shrink, rbin_heap, "rbin_shrink");
- g_rbin_heap = rbin_heap;
- pr_info("%s created %s\n", __func__, heap_data->name);
- exp_info.name = "qcom,camera";
- exp_info.ops = &rbin_cached_heap_ops;
- exp_info.priv = rbin_heap;
- rbin_cached_dma_heap = dma_heap_add(&exp_info);
- if (IS_ERR(rbin_cached_dma_heap)) {
- perrdev(heap_data->dev, "failed to dma_heap_add camera");
- ret = PTR_ERR(rbin_cached_dma_heap);
- goto destroy_pools;
- }
- exp_info.name = "qcom,camera-uncached";
- exp_info.ops = &rbin_uncached_heap_ops;
- exp_info.priv = NULL;
- rbin_uncached_dma_heap = dma_heap_add(&exp_info);
- if (IS_ERR(rbin_uncached_dma_heap)) {
- perrdev(heap_data->dev, "failed to dma_heap_add camera-uncached");
- ret = PTR_ERR(rbin_uncached_dma_heap);
- goto destroy_pools;
- }
- dma_coerce_mask_and_coherent(dma_heap_get_dev(rbin_uncached_dma_heap), DMA_BIT_MASK(64));
- mb(); /* make sure we only set allocate after dma_mask is set */
- rbin_cached_heap_ops.allocate = rbin_cached_heap_allocate;
- rbin_uncached_heap_ops.allocate = rbin_uncached_heap_allocate;
- register_trace_android_vh_show_mem(rbin_heap_show_mem, (void *)rbin_cached_dma_heap);
- register_trace_android_vh_meminfo_proc_show(show_rbin_meminfo, (void *)rbin_cached_dma_heap);
- register_trace_android_vh_meminfo_cache_adjust(rbin_cache_adjust, NULL);
- register_trace_android_vh_si_mem_available_adjust(rbin_available_adjust, NULL);
- register_trace_android_vh_si_meminfo_adjust(rbin_meminfo_adjust, (void *)rbin_cached_dma_heap);
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "ion/rbin:online", rbin_cpu_online,
- NULL);
- if (ret < 0)
- pr_err("rbin: failed to register 'online' hotplug state\n");
- pr_info("%s done\n", __func__);
- return 0;
- destroy_pools:
- dmabuf_rbin_heap_destroy_pools(rbin_heap->pools);
- free_rbin_kobject:
- kobject_put(rbin_kobject);
- free_rbin_heap:
- kfree(rbin_heap);
- out:
- return ret;
- }
|