qcom_cma_heap.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DMABUF CMA heap exporter
  4. * Copied from drivers/dma-buf/heaps/cma_heap.c as of commit b61614ec318a
  5. * ("dma-buf: heaps: Add CMA heap to dmabuf heaps")
  6. *
  7. * Copyright (C) 2012, 2019 Linaro Ltd.
  8. * Author: <[email protected]> for ST-Ericsson.
  9. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  10. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  11. */
  12. #include <linux/cma.h>
  13. #include <linux/device.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/dma-heap.h>
  16. #include <linux/dma-map-ops.h>
  17. #include <linux/err.h>
  18. #include <linux/errno.h>
  19. #include <linux/highmem.h>
  20. #include <linux/module.h>
  21. #include <linux/slab.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/sched/signal.h>
  24. #include <linux/list.h>
  25. #include <linux/of.h>
  26. #include "qcom_cma_heap.h"
  27. #include "qcom_sg_ops.h"
  28. struct cma_heap {
  29. struct cma *cma;
  30. /* max_align is in units of page_order, similar to CONFIG_CMA_ALIGNMENT */
  31. u32 max_align;
  32. bool uncached;
  33. bool is_nomap;
  34. };
  35. struct dmabuf_cma_info {
  36. void *cpu_addr;
  37. dma_addr_t handle;
  38. struct qcom_sg_buffer buf;
  39. };
  40. static void cma_heap_free(struct qcom_sg_buffer *buffer)
  41. {
  42. struct cma_heap *cma_heap;
  43. struct dmabuf_cma_info *info;
  44. info = container_of(buffer, struct dmabuf_cma_info, buf);
  45. cma_heap = dma_heap_get_drvdata(buffer->heap);
  46. if (info->cpu_addr) {
  47. struct device *dev = dma_heap_get_dev(buffer->heap);
  48. dma_free_attrs(dev, PAGE_ALIGN(buffer->len), info->cpu_addr,
  49. info->handle, 0);
  50. } else {
  51. struct page *cma_pages = sg_page(buffer->sg_table.sgl);
  52. unsigned long nr_pages = buffer->len >> PAGE_SHIFT;
  53. /* release memory */
  54. cma_release(cma_heap->cma, cma_pages, nr_pages);
  55. }
  56. /* free page list */
  57. sg_free_table(&buffer->sg_table);
  58. kfree(info);
  59. }
  60. static bool dmabuf_cma_is_nomap(struct device *dev)
  61. {
  62. struct device_node *mem_region;
  63. bool val = false;
  64. mem_region = of_parse_phandle(dev->of_node, "memory-region", 0);
  65. if (!mem_region)
  66. goto err;
  67. val = of_property_read_bool(mem_region, "no-map");
  68. err:
  69. of_node_put(mem_region);
  70. return val;
  71. }
  72. /* dmabuf heap CMA operations functions */
  73. struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
  74. unsigned long len,
  75. unsigned long fd_flags,
  76. unsigned long heap_flags)
  77. {
  78. struct cma_heap *cma_heap;
  79. struct qcom_sg_buffer *helper_buffer;
  80. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  81. struct page *cma_pages;
  82. size_t size = PAGE_ALIGN(len);
  83. unsigned long nr_pages = size >> PAGE_SHIFT;
  84. unsigned long align = get_order(size);
  85. struct dma_buf *dmabuf;
  86. struct dmabuf_cma_info *info;
  87. int ret = -ENOMEM;
  88. cma_heap = dma_heap_get_drvdata(heap);
  89. info = kzalloc(sizeof(*info), GFP_KERNEL);
  90. if (!info)
  91. return ERR_PTR(-ENOMEM);
  92. if (align > cma_heap->max_align)
  93. align = cma_heap->max_align;
  94. helper_buffer = &info->buf;
  95. helper_buffer->heap = heap;
  96. INIT_LIST_HEAD(&helper_buffer->attachments);
  97. mutex_init(&helper_buffer->lock);
  98. helper_buffer->len = size;
  99. helper_buffer->uncached = cma_heap->uncached;
  100. helper_buffer->free = cma_heap_free;
  101. if (cma_heap->is_nomap) {
  102. struct device *dev = dma_heap_get_dev(heap);
  103. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  104. info->cpu_addr = dma_alloc_wc(dev, size, &info->handle,
  105. GFP_KERNEL);
  106. if (!info->cpu_addr) {
  107. dev_err(dev, "failed to allocate buffer\n");
  108. goto free_info;
  109. }
  110. cma_pages = pfn_to_page(PFN_DOWN(info->handle));
  111. } else {
  112. cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
  113. if (!cma_pages)
  114. goto free_info;
  115. if (PageHighMem(cma_pages)) {
  116. unsigned long nr_clear_pages = nr_pages;
  117. struct page *page = cma_pages;
  118. while (nr_clear_pages > 0) {
  119. void *vaddr = kmap_local_page(page);
  120. memset(vaddr, 0, PAGE_SIZE);
  121. kunmap_local(vaddr);
  122. /*
  123. * Avoid wasting time zeroing memory if the process
  124. * has been killed by SIGKILL
  125. */
  126. if (fatal_signal_pending(current))
  127. goto free_cma;
  128. page++;
  129. nr_clear_pages--;
  130. }
  131. } else {
  132. memset(page_address(cma_pages), 0, size);
  133. }
  134. }
  135. ret = sg_alloc_table(&helper_buffer->sg_table, 1, GFP_KERNEL);
  136. if (ret)
  137. goto free_cma;
  138. sg_set_page(helper_buffer->sg_table.sgl, cma_pages, size, 0);
  139. helper_buffer->vmperm = mem_buf_vmperm_alloc(&helper_buffer->sg_table);
  140. if (IS_ERR(helper_buffer->vmperm))
  141. goto free_sgtable;
  142. if (helper_buffer->uncached && !cma_heap->is_nomap) {
  143. dma_map_sgtable(dma_heap_get_dev(heap), &helper_buffer->sg_table,
  144. DMA_BIDIRECTIONAL, 0);
  145. dma_unmap_sgtable(dma_heap_get_dev(heap), &helper_buffer->sg_table,
  146. DMA_BIDIRECTIONAL, 0);
  147. }
  148. /* create the dmabuf */
  149. exp_info.exp_name = dma_heap_get_name(heap);
  150. exp_info.size = helper_buffer->len;
  151. exp_info.flags = fd_flags;
  152. exp_info.priv = helper_buffer;
  153. dmabuf = qcom_dma_buf_export(&exp_info, &qcom_sg_buf_ops);
  154. if (IS_ERR(dmabuf)) {
  155. ret = PTR_ERR(dmabuf);
  156. goto vmperm_release;
  157. }
  158. return dmabuf;
  159. vmperm_release:
  160. mem_buf_vmperm_release(helper_buffer->vmperm);
  161. free_sgtable:
  162. sg_free_table(&helper_buffer->sg_table);
  163. free_cma:
  164. if (info->cpu_addr)
  165. dma_free_attrs(dma_heap_get_dev(heap), size, info->cpu_addr,
  166. info->handle, 0);
  167. else
  168. cma_release(cma_heap->cma, cma_pages, nr_pages);
  169. free_info:
  170. kfree(info);
  171. return ERR_PTR(ret);
  172. }
  173. static const struct dma_heap_ops cma_heap_ops = {
  174. .allocate = cma_heap_allocate,
  175. };
  176. static int __add_cma_heap(struct platform_heap *heap_data, void *data)
  177. {
  178. struct cma_heap *cma_heap;
  179. struct dma_heap_export_info exp_info;
  180. struct dma_heap *heap;
  181. cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
  182. if (!cma_heap)
  183. return -ENOMEM;
  184. cma_heap->cma = heap_data->dev->cma_area;
  185. cma_heap->max_align = CONFIG_CMA_ALIGNMENT;
  186. if (heap_data->max_align)
  187. cma_heap->max_align = heap_data->max_align;
  188. cma_heap->uncached = heap_data->is_uncached;
  189. exp_info.name = heap_data->name;
  190. exp_info.ops = &cma_heap_ops;
  191. exp_info.priv = cma_heap;
  192. heap = dma_heap_add(&exp_info);
  193. if (IS_ERR(heap)) {
  194. int ret = PTR_ERR(heap);
  195. kfree(cma_heap);
  196. return ret;
  197. }
  198. if (cma_heap->uncached)
  199. dma_coerce_mask_and_coherent(dma_heap_get_dev(heap),
  200. DMA_BIT_MASK(64));
  201. cma_heap->is_nomap = dmabuf_cma_is_nomap(heap_data->dev);
  202. #ifdef CONFIG_DMA_DECLARE_COHERENT
  203. if (cma_heap->is_nomap && !dma_heap_get_dev(heap)->dma_mem)
  204. dma_heap_get_dev(heap)->dma_mem = heap_data->dev->dma_mem;
  205. #endif
  206. return 0;
  207. }
  208. int qcom_add_cma_heap(struct platform_heap *heap_data)
  209. {
  210. return __add_cma_heap(heap_data, NULL);
  211. }