qcom_ubwcp_heap.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-heap.h>
  7. #include <linux/ubwcp_dma_heap.h>
  8. #include <trace/hooks/dmabuf.h>
  9. #include <linux/msm_dma_iommu_mapping.h>
  10. #include <linux/qcom-dma-mapping.h>
  11. #include "qcom_system_heap.h"
  12. static struct dma_heap *sys_heap;
  13. struct ubwcp_driver_ops {
  14. init_buffer init_buffer;
  15. free_buffer free_buffer;
  16. lock_buffer lock_buffer;
  17. unlock_buffer unlock_buffer;
  18. } ubwcp_driver_ops;
  19. struct ubwcp_buffer {
  20. struct qcom_sg_buffer qcom_sg_buf;
  21. bool ubwcp_init_complete;
  22. struct rw_semaphore linear_mode_sem;
  23. bool is_linear;
  24. atomic_t cpu_map_count;
  25. phys_addr_t ula_pa_addr;
  26. size_t ula_pa_size;
  27. };
  28. struct qcom_ubwcp_heap {
  29. bool movable;
  30. };
  31. static int ubwcp_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
  32. enum dma_data_direction direction)
  33. {
  34. struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  35. int ret;
  36. down_read(&buffer->linear_mode_sem);
  37. if (!buffer->is_linear)
  38. ret = ubwcp_driver_ops.lock_buffer(dmabuf, direction);
  39. else
  40. ret = qcom_sg_dma_buf_begin_cpu_access(dmabuf, direction);
  41. up_read(&buffer->linear_mode_sem);
  42. return ret;
  43. }
  44. static int ubwcp_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
  45. enum dma_data_direction direction)
  46. {
  47. struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  48. int ret;
  49. down_read(&buffer->linear_mode_sem);
  50. if (!buffer->is_linear)
  51. ret = ubwcp_driver_ops.unlock_buffer(dmabuf, direction);
  52. else
  53. ret = qcom_sg_dma_buf_end_cpu_access(dmabuf, direction);
  54. up_read(&buffer->linear_mode_sem);
  55. return ret;
  56. }
  57. static int ubwcp_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
  58. enum dma_data_direction direction,
  59. unsigned int offset,
  60. unsigned int len)
  61. {
  62. struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  63. int ret;
  64. down_read(&buffer->linear_mode_sem);
  65. if (!buffer->is_linear) {
  66. pr_err("%s: isn't in linear mode, bailing\n", __func__);
  67. ret = -EINVAL;
  68. } else {
  69. ret = qcom_sg_dma_buf_begin_cpu_access_partial(dmabuf, direction, offset,
  70. len);
  71. }
  72. up_read(&buffer->linear_mode_sem);
  73. return ret;
  74. }
  75. static int ubwcp_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
  76. enum dma_data_direction direction,
  77. unsigned int offset,
  78. unsigned int len)
  79. {
  80. struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  81. int ret;
  82. down_read(&buffer->linear_mode_sem);
  83. if (!buffer->is_linear) {
  84. pr_err("%s: isn't in linear mode, bailing\n", __func__);
  85. ret = -EINVAL;
  86. } else {
  87. ret = qcom_sg_dma_buf_end_cpu_access_partial(dmabuf, direction, offset,
  88. len);
  89. }
  90. up_read(&buffer->linear_mode_sem);
  91. return ret;
  92. }
  93. static void qcom_sg_vm_ops_open(struct vm_area_struct *vma)
  94. {
  95. struct ubwcp_buffer *buffer = vma->vm_private_data;
  96. atomic_inc(&buffer->cpu_map_count);
  97. mem_buf_vmperm_pin(buffer->qcom_sg_buf.vmperm);
  98. }
  99. static void qcom_sg_vm_ops_close(struct vm_area_struct *vma)
  100. {
  101. struct ubwcp_buffer *buffer = vma->vm_private_data;
  102. atomic_dec(&buffer->cpu_map_count);
  103. mem_buf_vmperm_unpin(buffer->qcom_sg_buf.vmperm);
  104. }
  105. static const struct vm_operations_struct qcom_sg_vm_ops = {
  106. .open = qcom_sg_vm_ops_open,
  107. .close = qcom_sg_vm_ops_close,
  108. };
  109. static int ubwcp_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  110. {
  111. struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  112. unsigned long vaddr = vma->vm_start;
  113. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  114. unsigned long map_len = vma->vm_end - vma->vm_start;
  115. int ret = 0;
  116. down_read(&buffer->linear_mode_sem);
  117. if (buffer->is_linear) {
  118. ret = qcom_sg_mmap(dmabuf, vma);
  119. goto unlock;
  120. }
  121. if (map_len + offset > buffer->ula_pa_size) {
  122. pr_err("mmap is too large!\n");
  123. ret = -EINVAL;
  124. goto unlock;
  125. }
  126. mem_buf_vmperm_pin(buffer->qcom_sg_buf.vmperm);
  127. if (!mem_buf_vmperm_can_mmap(buffer->qcom_sg_buf.vmperm, vma)) {
  128. mem_buf_vmperm_unpin(buffer->qcom_sg_buf.vmperm);
  129. ret = -EPERM;
  130. goto unlock;
  131. }
  132. vma->vm_ops = &qcom_sg_vm_ops;
  133. vma->vm_private_data = buffer;
  134. ret = remap_pfn_range(vma, vaddr,
  135. (buffer->ula_pa_addr >> PAGE_SHIFT) + offset,
  136. map_len, vma->vm_page_prot);
  137. if (ret) {
  138. mem_buf_vmperm_unpin(buffer->qcom_sg_buf.vmperm);
  139. goto unlock;
  140. }
  141. atomic_inc(&buffer->cpu_map_count);
  142. unlock:
  143. up_read(&buffer->linear_mode_sem);
  144. return ret;
  145. }
  146. static int ubwcp_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
  147. {
  148. struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  149. int ret;
  150. down_read(&buffer->linear_mode_sem);
  151. if (!buffer->is_linear) {
  152. pr_err("%s: isn't in linear mode, bailing\n", __func__);
  153. ret = -EINVAL;
  154. goto unlock;
  155. }
  156. ret = qcom_sg_vmap(dmabuf, map);
  157. if (ret)
  158. goto unlock;
  159. atomic_inc(&buffer->cpu_map_count);
  160. unlock:
  161. up_read(&buffer->linear_mode_sem);
  162. return ret;
  163. }
  164. static void ubwcp_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
  165. {
  166. struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  167. down_read(&buffer->linear_mode_sem);
  168. if (!buffer->is_linear)
  169. pr_err("%s: isn't in linear mode, bailing\n", __func__);
  170. else
  171. qcom_sg_vunmap(dmabuf, map);
  172. WARN_ON(atomic_read(&buffer->cpu_map_count) <= 0);
  173. atomic_dec(&buffer->cpu_map_count);
  174. up_read(&buffer->linear_mode_sem);
  175. }
  176. static void ubwcp_release(struct dma_buf *dmabuf)
  177. {
  178. int ret;
  179. struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  180. if (buffer->ubwcp_init_complete) {
  181. ret = ubwcp_driver_ops.free_buffer(dmabuf);
  182. if (ret) {
  183. pr_err("%s: UBWC-P buffer not freed, err: %d\n", __func__, ret);
  184. return;
  185. }
  186. }
  187. ret = mem_buf_vmperm_release(buffer->qcom_sg_buf.vmperm);
  188. if (ret) {
  189. pr_err("%s: Failed to release vmperm, err: %d\n", __func__, ret);
  190. return;
  191. }
  192. msm_dma_buf_freed(dmabuf->priv);
  193. qcom_system_heap_free(&buffer->qcom_sg_buf);
  194. }
  195. struct mem_buf_dma_buf_ops ubwcp_ops = {
  196. .attach = qcom_sg_attach,
  197. .lookup = qcom_sg_lookup_vmperm,
  198. .dma_ops = {
  199. .attach = NULL, /* Will be set by mem_buf_dma_buf_export */
  200. .detach = qcom_sg_detach,
  201. .map_dma_buf = qcom_sg_map_dma_buf,
  202. .unmap_dma_buf = qcom_sg_unmap_dma_buf,
  203. .begin_cpu_access = ubwcp_dma_buf_begin_cpu_access,
  204. .end_cpu_access = ubwcp_dma_buf_end_cpu_access,
  205. .begin_cpu_access_partial = ubwcp_dma_buf_begin_cpu_access_partial,
  206. .end_cpu_access_partial = ubwcp_dma_buf_end_cpu_access_partial,
  207. .mmap = ubwcp_mmap,
  208. .vmap = ubwcp_vmap,
  209. .vunmap = ubwcp_vunmap,
  210. .release = ubwcp_release,
  211. }
  212. };
  213. int msm_ubwcp_dma_buf_configure_mmap(struct dma_buf *dmabuf, bool linear,
  214. phys_addr_t ula_pa_addr,
  215. size_t ula_pa_size)
  216. {
  217. struct ubwcp_buffer *buffer;
  218. int ret = 0;
  219. if (dmabuf->ops != &ubwcp_ops.dma_ops) {
  220. pr_err("%s: User didn't pass in DMA-BUF!\n", __func__);
  221. return -EINVAL;
  222. }
  223. if (ula_pa_addr % PAGE_SIZE || ula_pa_size % PAGE_SIZE) {
  224. pr_err("%s: ULA PA addr and ULA PA map size must be page_aligned!\n",
  225. __func__);
  226. return -EINVAL;
  227. }
  228. buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  229. down_write(&buffer->linear_mode_sem);
  230. if (atomic_read(&buffer->cpu_map_count)) {
  231. pr_err("%s: Buffer already mapped!\n", __func__);
  232. ret = -EINVAL;
  233. goto unlock;
  234. }
  235. buffer->is_linear = linear;
  236. buffer->ula_pa_addr = ula_pa_addr;
  237. buffer->ula_pa_size = ula_pa_size;
  238. unlock:
  239. up_write(&buffer->linear_mode_sem);
  240. return ret;
  241. }
  242. EXPORT_SYMBOL(msm_ubwcp_dma_buf_configure_mmap);
  243. static struct dma_buf *ubwcp_allocate(struct dma_heap *heap,
  244. unsigned long len,
  245. unsigned long fd_flags,
  246. unsigned long heap_flags)
  247. {
  248. struct ubwcp_buffer *buffer;
  249. struct qcom_ubwcp_heap *ubwcp_heap;
  250. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  251. struct dma_buf *dmabuf;
  252. int ret = -ENOMEM;
  253. ubwcp_heap = dma_heap_get_drvdata(heap);
  254. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  255. if (!buffer)
  256. return ERR_PTR(-ENOMEM);
  257. init_rwsem(&buffer->linear_mode_sem);
  258. ret = system_qcom_sg_buffer_alloc(sys_heap, &buffer->qcom_sg_buf, len, ubwcp_heap->movable);
  259. if (ret)
  260. goto free_buf_struct;
  261. buffer->qcom_sg_buf.vmperm = mem_buf_vmperm_alloc(&buffer->qcom_sg_buf.sg_table);
  262. if (IS_ERR(buffer->qcom_sg_buf.vmperm)) {
  263. ret = PTR_ERR(buffer->qcom_sg_buf.vmperm);
  264. goto free_sys_heap_mem;
  265. }
  266. /* Make the buffer linear by default */
  267. buffer->is_linear = true;
  268. /* create the dmabuf */
  269. exp_info.exp_name = dma_heap_get_name(heap);
  270. exp_info.size = buffer->qcom_sg_buf.len;
  271. exp_info.flags = fd_flags;
  272. exp_info.priv = &buffer->qcom_sg_buf;
  273. dmabuf = qcom_dma_buf_export(&exp_info, &ubwcp_ops);
  274. if (IS_ERR(dmabuf)) {
  275. ret = PTR_ERR(dmabuf);
  276. goto free_vmperm;
  277. }
  278. ret = ubwcp_driver_ops.init_buffer(dmabuf);
  279. if (ret)
  280. goto buf_release;
  281. buffer->ubwcp_init_complete = true;
  282. return dmabuf;
  283. buf_release:
  284. dma_buf_put(dmabuf);
  285. return ERR_PTR(ret);
  286. free_vmperm:
  287. mem_buf_vmperm_release(buffer->qcom_sg_buf.vmperm);
  288. free_sys_heap_mem:
  289. qcom_system_heap_free(&buffer->qcom_sg_buf);
  290. return ERR_PTR(ret);
  291. free_buf_struct:
  292. kfree(buffer);
  293. return ERR_PTR(ret);
  294. }
  295. static const struct dma_heap_ops ubwcp_heap_ops = {
  296. .allocate = ubwcp_allocate,
  297. };
  298. static void ignore_vmap_bounds_check(void *unused, struct dma_buf *dmabuf, bool *result)
  299. {
  300. struct ubwcp_buffer *buffer;
  301. if (dmabuf->ops != &ubwcp_ops.dma_ops) {
  302. *result = false;
  303. return;
  304. }
  305. buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
  306. if (buffer->is_linear)
  307. *result = false;
  308. else
  309. *result = true;
  310. }
  311. int qcom_ubwcp_heap_create(char *name, bool movable)
  312. {
  313. struct dma_heap_export_info exp_info;
  314. struct dma_heap *heap;
  315. struct qcom_ubwcp_heap *ubwcp_heap;
  316. static bool vmap_registered;
  317. int ret;
  318. /* This function should only be called once */
  319. if (!vmap_registered) {
  320. ret = register_trace_android_vh_ignore_dmabuf_vmap_bounds(ignore_vmap_bounds_check,
  321. NULL);
  322. if (ret) {
  323. pr_err("%s: Unable to register vmap bounds tracehook\n", __func__);
  324. goto out;
  325. }
  326. vmap_registered = true;
  327. }
  328. sys_heap = dma_heap_find("qcom,system");
  329. if (!sys_heap) {
  330. pr_err("%s: Unable to find 'qcom,system'\n", __func__);
  331. ret = -EINVAL;
  332. goto out;
  333. }
  334. ubwcp_heap = kzalloc(sizeof(*ubwcp_heap), GFP_KERNEL);
  335. if (!ubwcp_heap) {
  336. ret = -ENOMEM;
  337. goto ubwcp_alloc_free;
  338. }
  339. ubwcp_heap->movable = movable;
  340. exp_info.name = name;
  341. exp_info.ops = &ubwcp_heap_ops;
  342. exp_info.priv = ubwcp_heap;
  343. heap = dma_heap_add(&exp_info);
  344. if (IS_ERR(heap)) {
  345. ret = PTR_ERR(heap);
  346. goto ubwcp_alloc_free;
  347. }
  348. pr_info("%s: DMA-BUF Heap: Created '%s'\n", __func__, name);
  349. return 0;
  350. ubwcp_alloc_free:
  351. kfree(ubwcp_heap);
  352. out:
  353. pr_err("%s: Failed to create '%s', error is %d\n", __func__, name, ret);
  354. return ret;
  355. }
  356. int msm_ubwcp_set_ops(init_buffer init_buf_fn_ptr,
  357. free_buffer free_buf_fn_ptr,
  358. lock_buffer lock_buf_fn_ptr,
  359. unlock_buffer unlock_buf_fn_ptr)
  360. {
  361. int ret = 0;
  362. if (!init_buf_fn_ptr || !free_buf_fn_ptr || !lock_buf_fn_ptr ||
  363. !unlock_buf_fn_ptr) {
  364. pr_err("%s: Missing function pointer\n", __func__);
  365. return -EINVAL;
  366. }
  367. ubwcp_driver_ops.init_buffer = init_buf_fn_ptr;
  368. ubwcp_driver_ops.free_buffer = free_buf_fn_ptr;
  369. ubwcp_driver_ops.lock_buffer = lock_buf_fn_ptr;
  370. ubwcp_driver_ops.unlock_buffer = unlock_buf_fn_ptr;
  371. ret = qcom_ubwcp_heap_create("qcom,ubwcp", false);
  372. if (ret)
  373. return ret;
  374. #ifdef CONFIG_QCOM_DMABUF_HEAPS_UBWCP_MOVABLE
  375. ret = qcom_ubwcp_heap_create("qcom,ubwcp-movable", true);
  376. #endif
  377. return ret;
  378. }
  379. EXPORT_SYMBOL(msm_ubwcp_set_ops);
  380. MODULE_IMPORT_NS(DMA_BUF);