qcom_sg_ops.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * mmap() algorithm taken from drivers/staging/android/ion/ion_heap.c as
  4. * of commit a3ec289e74b4 ("arm-smmu: Fix missing qsmmuv500 callback")
  5. *
  6. * Copyright (C) 2011 Google, Inc.
  7. * Copyright (C) 2019, 2020 Linaro Ltd.
  8. *
  9. * Portions based off of Andrew Davis' SRAM heap:
  10. * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
  11. * Andrew F. Davis <[email protected]>
  12. *
  13. * These ops were base of the ops in drivers/dma-buf/heaps/system-heap.c from
  14. * https://lore.kernel.org/lkml/[email protected]/
  15. *
  16. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  17. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  18. */
  19. #include <linux/dma-buf.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/dma-heap.h>
  22. #include <linux/err.h>
  23. #include <linux/highmem.h>
  24. #include <linux/mm.h>
  25. #include <linux/module.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/slab.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/of.h>
  30. #include <linux/dma-map-ops.h>
  31. #include <linux/qcom_dma_heap.h>
  32. #include <linux/msm_dma_iommu_mapping.h>
  33. #include <linux/qti-smmu-proxy-callbacks.h>
  34. #include "qcom_sg_ops.h"
  35. int proxy_invalid_map(struct device *dev, struct sg_table *table,
  36. struct dma_buf *dmabuf)
  37. {
  38. WARN(1, "Trying to map with SMMU proxy driver when it has not fully probed!\n");
  39. return -EINVAL;
  40. }
  41. void proxy_invalid_unmap(struct device *dev, struct sg_table *table,
  42. struct dma_buf *dmabuf)
  43. {
  44. WARN(1, "Trying to unmap with SMMU proxy driver when it has not fully probed!\n");
  45. }
  46. static struct smmu_proxy_callbacks smmu_proxy_callback_ops = {
  47. .map_sgtable = proxy_invalid_map,
  48. .unmap_sgtable = proxy_invalid_unmap,
  49. };
  50. static struct sg_table *dup_sg_table(struct sg_table *table)
  51. {
  52. struct sg_table *new_table;
  53. int ret, i;
  54. struct scatterlist *sg, *new_sg;
  55. new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
  56. if (!new_table)
  57. return ERR_PTR(-ENOMEM);
  58. ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
  59. if (ret) {
  60. kfree(new_table);
  61. return ERR_PTR(-ENOMEM);
  62. }
  63. new_sg = new_table->sgl;
  64. for_each_sgtable_sg(table, sg, i) {
  65. sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
  66. new_sg = sg_next(new_sg);
  67. }
  68. return new_table;
  69. }
  70. int qcom_sg_attach(struct dma_buf *dmabuf,
  71. struct dma_buf_attachment *attachment)
  72. {
  73. struct qcom_sg_buffer *buffer = dmabuf->priv;
  74. struct dma_heap_attachment *a;
  75. struct sg_table *table;
  76. a = kzalloc(sizeof(*a), GFP_KERNEL);
  77. if (!a)
  78. return -ENOMEM;
  79. table = dup_sg_table(&buffer->sg_table);
  80. if (IS_ERR(table)) {
  81. kfree(a);
  82. return -ENOMEM;
  83. }
  84. a->table = table;
  85. a->dev = attachment->dev;
  86. INIT_LIST_HEAD(&a->list);
  87. a->mapped = false;
  88. attachment->priv = a;
  89. mutex_lock(&buffer->lock);
  90. list_add(&a->list, &buffer->attachments);
  91. mutex_unlock(&buffer->lock);
  92. return 0;
  93. }
  94. void qcom_sg_detach(struct dma_buf *dmabuf,
  95. struct dma_buf_attachment *attachment)
  96. {
  97. struct qcom_sg_buffer *buffer = dmabuf->priv;
  98. struct dma_heap_attachment *a = attachment->priv;
  99. mutex_lock(&buffer->lock);
  100. list_del(&a->list);
  101. mutex_unlock(&buffer->lock);
  102. sg_free_table(a->table);
  103. kfree(a->table);
  104. kfree(a);
  105. }
  106. struct sg_table *qcom_sg_map_dma_buf(struct dma_buf_attachment *attachment,
  107. enum dma_data_direction direction)
  108. {
  109. struct dma_heap_attachment *a = attachment->priv;
  110. struct sg_table *table = a->table;
  111. struct qcom_sg_buffer *buffer;
  112. struct mem_buf_vmperm *vmperm;
  113. unsigned long attrs = attachment->dma_map_attrs;
  114. int ret;
  115. buffer = attachment->dmabuf->priv;
  116. vmperm = buffer->vmperm;
  117. if (smmu_proxy_callback_ops.map_sgtable &&
  118. (attrs & DMA_ATTR_QTI_SMMU_PROXY_MAP)) {
  119. ret = smmu_proxy_callback_ops.map_sgtable(attachment->dev, table,
  120. attachment->dmabuf);
  121. return ret ? ERR_PTR(ret) : table;
  122. }
  123. /* Prevent map/unmap during begin/end_cpu_access */
  124. mutex_lock(&buffer->lock);
  125. /* Ensure VM permissions are constant while the buffer is mapped */
  126. mem_buf_vmperm_pin(vmperm);
  127. if (buffer->uncached || !mem_buf_vmperm_can_cmo(vmperm))
  128. attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  129. if (attrs & DMA_ATTR_DELAYED_UNMAP) {
  130. ret = msm_dma_map_sgtable(attachment->dev, table, direction,
  131. attachment->dmabuf, attrs);
  132. } else if (!a->mapped) {
  133. ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
  134. } else {
  135. dev_err(attachment->dev, "Error: Dma-buf is already mapped!\n");
  136. ret = -EBUSY;
  137. }
  138. if (ret) {
  139. table = ERR_PTR(ret);
  140. goto err_map_sgtable;
  141. }
  142. a->mapped = true;
  143. mutex_unlock(&buffer->lock);
  144. return table;
  145. err_map_sgtable:
  146. mem_buf_vmperm_unpin(vmperm);
  147. mutex_unlock(&buffer->lock);
  148. return table;
  149. }
  150. void qcom_sg_unmap_dma_buf(struct dma_buf_attachment *attachment,
  151. struct sg_table *table,
  152. enum dma_data_direction direction)
  153. {
  154. struct dma_heap_attachment *a = attachment->priv;
  155. struct qcom_sg_buffer *buffer;
  156. struct mem_buf_vmperm *vmperm;
  157. unsigned long attrs = attachment->dma_map_attrs;
  158. buffer = attachment->dmabuf->priv;
  159. vmperm = buffer->vmperm;
  160. if (smmu_proxy_callback_ops.unmap_sgtable &&
  161. (attrs & DMA_ATTR_QTI_SMMU_PROXY_MAP)) {
  162. smmu_proxy_callback_ops.unmap_sgtable(attachment->dev, table,
  163. attachment->dmabuf);
  164. return;
  165. }
  166. /* Prevent map/unmap during begin/end_cpu_access */
  167. mutex_lock(&buffer->lock);
  168. if (buffer->uncached || !mem_buf_vmperm_can_cmo(vmperm))
  169. attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  170. a->mapped = false;
  171. if (attrs & DMA_ATTR_DELAYED_UNMAP) {
  172. msm_dma_unmap_sgtable(attachment->dev, table, direction,
  173. attachment->dmabuf, attrs);
  174. } else {
  175. dma_unmap_sgtable(attachment->dev, table, direction, attrs);
  176. }
  177. mem_buf_vmperm_unpin(vmperm);
  178. mutex_unlock(&buffer->lock);
  179. }
  180. int qcom_sg_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
  181. enum dma_data_direction direction)
  182. {
  183. struct qcom_sg_buffer *buffer = dmabuf->priv;
  184. struct dma_heap_attachment *a;
  185. if (buffer->uncached)
  186. return 0;
  187. mutex_lock(&buffer->lock);
  188. /* Keep the same behavior as ion by returning 0 instead of -EPERM */
  189. if (!mem_buf_vmperm_can_cmo(buffer->vmperm)) {
  190. mutex_unlock(&buffer->lock);
  191. return 0;
  192. }
  193. if (buffer->vmap_cnt)
  194. invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
  195. list_for_each_entry(a, &buffer->attachments, list) {
  196. if (!a->mapped)
  197. continue;
  198. dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
  199. }
  200. mutex_unlock(&buffer->lock);
  201. return 0;
  202. }
  203. int qcom_sg_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
  204. enum dma_data_direction direction)
  205. {
  206. struct qcom_sg_buffer *buffer = dmabuf->priv;
  207. struct dma_heap_attachment *a;
  208. if (buffer->uncached)
  209. return 0;
  210. mutex_lock(&buffer->lock);
  211. /* Keep the same behavior as ion by returning 0 instead of -EPERM */
  212. if (!mem_buf_vmperm_can_cmo(buffer->vmperm)) {
  213. mutex_unlock(&buffer->lock);
  214. return 0;
  215. }
  216. if (buffer->vmap_cnt)
  217. flush_kernel_vmap_range(buffer->vaddr, buffer->len);
  218. list_for_each_entry(a, &buffer->attachments, list) {
  219. if (!a->mapped)
  220. continue;
  221. dma_sync_sgtable_for_device(a->dev, a->table, direction);
  222. }
  223. mutex_unlock(&buffer->lock);
  224. return 0;
  225. }
  226. static int sgl_sync_range(struct device *dev, struct scatterlist *sgl,
  227. unsigned int nents, unsigned long offset,
  228. unsigned long length,
  229. enum dma_data_direction dir, bool for_cpu)
  230. {
  231. int i;
  232. struct scatterlist *sg;
  233. unsigned int len = 0;
  234. dma_addr_t sg_dma_addr;
  235. for_each_sg(sgl, sg, nents, i) {
  236. if (sg_dma_len(sg) == 0)
  237. break;
  238. if (i > 0) {
  239. if (!get_dma_ops(dev))
  240. return 0;
  241. pr_warn_ratelimited("Partial cmo only supported with 1 segment\n"
  242. "is dma_set_max_seg_size being set on dev:%s\n",
  243. dev_name(dev));
  244. return -EINVAL;
  245. }
  246. }
  247. for_each_sg(sgl, sg, nents, i) {
  248. unsigned int sg_offset, sg_left, size = 0;
  249. if (i == 0)
  250. sg_dma_addr = sg_dma_address(sg);
  251. len += sg->length;
  252. if (len <= offset) {
  253. sg_dma_addr += sg->length;
  254. continue;
  255. }
  256. sg_left = len - offset;
  257. sg_offset = sg->length - sg_left;
  258. size = (length < sg_left) ? length : sg_left;
  259. if (for_cpu)
  260. dma_sync_single_range_for_cpu(dev, sg_dma_addr,
  261. sg_offset, size, dir);
  262. else
  263. dma_sync_single_range_for_device(dev, sg_dma_addr,
  264. sg_offset, size, dir);
  265. offset += size;
  266. length -= size;
  267. sg_dma_addr += sg->length;
  268. if (length == 0)
  269. break;
  270. }
  271. return 0;
  272. }
  273. int qcom_sg_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
  274. enum dma_data_direction dir,
  275. unsigned int offset,
  276. unsigned int len)
  277. {
  278. struct qcom_sg_buffer *buffer = dmabuf->priv;
  279. struct dma_heap_attachment *a;
  280. int ret = 0;
  281. if (buffer->uncached)
  282. return 0;
  283. mutex_lock(&buffer->lock);
  284. /* Keep the same behavior as ion by returning 0 instead of -EPERM */
  285. if (!mem_buf_vmperm_can_cmo(buffer->vmperm)) {
  286. mutex_unlock(&buffer->lock);
  287. return 0;
  288. }
  289. if (buffer->vmap_cnt)
  290. invalidate_kernel_vmap_range(buffer->vaddr + offset, len);
  291. list_for_each_entry(a, &buffer->attachments, list) {
  292. if (!a->mapped)
  293. continue;
  294. ret = sgl_sync_range(a->dev, a->table->sgl, a->table->orig_nents,
  295. offset, len, dir, true);
  296. }
  297. mutex_unlock(&buffer->lock);
  298. return ret;
  299. }
  300. int qcom_sg_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
  301. enum dma_data_direction direction,
  302. unsigned int offset,
  303. unsigned int len)
  304. {
  305. struct qcom_sg_buffer *buffer = dmabuf->priv;
  306. struct dma_heap_attachment *a;
  307. int ret = 0;
  308. if (buffer->uncached)
  309. return 0;
  310. mutex_lock(&buffer->lock);
  311. /* Keep the same behavior as ion by returning 0 instead of -EPERM */
  312. if (!mem_buf_vmperm_can_cmo(buffer->vmperm)) {
  313. mutex_unlock(&buffer->lock);
  314. return 0;
  315. }
  316. if (buffer->vmap_cnt)
  317. flush_kernel_vmap_range(buffer->vaddr + offset, len);
  318. list_for_each_entry(a, &buffer->attachments, list) {
  319. if (!a->mapped)
  320. continue;
  321. ret = sgl_sync_range(a->dev, a->table->sgl, a->table->orig_nents,
  322. offset, len, direction, false);
  323. }
  324. mutex_unlock(&buffer->lock);
  325. return ret;
  326. }
  327. static void qcom_sg_vm_ops_open(struct vm_area_struct *vma)
  328. {
  329. struct mem_buf_vmperm *vmperm = vma->vm_private_data;
  330. mem_buf_vmperm_pin(vmperm);
  331. }
  332. static void qcom_sg_vm_ops_close(struct vm_area_struct *vma)
  333. {
  334. struct mem_buf_vmperm *vmperm = vma->vm_private_data;
  335. mem_buf_vmperm_unpin(vmperm);
  336. }
  337. static const struct vm_operations_struct qcom_sg_vm_ops = {
  338. .open = qcom_sg_vm_ops_open,
  339. .close = qcom_sg_vm_ops_close,
  340. };
  341. int qcom_sg_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  342. {
  343. struct qcom_sg_buffer *buffer = dmabuf->priv;
  344. struct sg_table *table = &buffer->sg_table;
  345. struct scatterlist *sg;
  346. unsigned long addr = vma->vm_start;
  347. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  348. int ret;
  349. int i;
  350. mem_buf_vmperm_pin(buffer->vmperm);
  351. if (!mem_buf_vmperm_can_mmap(buffer->vmperm, vma)) {
  352. mem_buf_vmperm_unpin(buffer->vmperm);
  353. return -EPERM;
  354. }
  355. vma->vm_ops = &qcom_sg_vm_ops;
  356. vma->vm_private_data = buffer->vmperm;
  357. if (buffer->uncached)
  358. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  359. for_each_sg(table->sgl, sg, table->nents, i) {
  360. struct page *page = sg_page(sg);
  361. unsigned long remainder = vma->vm_end - addr;
  362. unsigned long len = sg->length;
  363. if (offset >= sg->length) {
  364. offset -= sg->length;
  365. continue;
  366. } else if (offset) {
  367. page += offset / PAGE_SIZE;
  368. len = sg->length - offset;
  369. offset = 0;
  370. }
  371. len = min(len, remainder);
  372. ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
  373. vma->vm_page_prot);
  374. if (ret) {
  375. mem_buf_vmperm_unpin(buffer->vmperm);
  376. return ret;
  377. }
  378. addr += len;
  379. if (addr >= vma->vm_end)
  380. return 0;
  381. }
  382. return 0;
  383. }
  384. void *qcom_sg_do_vmap(struct qcom_sg_buffer *buffer)
  385. {
  386. struct sg_table *table = &buffer->sg_table;
  387. int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
  388. struct page **pages = vmalloc(sizeof(struct page *) * npages);
  389. struct page **tmp = pages;
  390. struct sg_page_iter piter;
  391. pgprot_t pgprot = PAGE_KERNEL;
  392. void *vaddr;
  393. if (!pages)
  394. return ERR_PTR(-ENOMEM);
  395. if (buffer->uncached)
  396. pgprot = pgprot_writecombine(PAGE_KERNEL);
  397. for_each_sgtable_page(table, &piter, 0) {
  398. WARN_ON(tmp - pages >= npages);
  399. *tmp++ = sg_page_iter_page(&piter);
  400. }
  401. vaddr = vmap(pages, npages, VM_MAP, pgprot);
  402. vfree(pages);
  403. if (!vaddr)
  404. return ERR_PTR(-ENOMEM);
  405. return vaddr;
  406. }
  407. int qcom_sg_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
  408. {
  409. struct qcom_sg_buffer *buffer = dmabuf->priv;
  410. void *vaddr;
  411. int ret = 0;
  412. mem_buf_vmperm_pin(buffer->vmperm);
  413. if (!mem_buf_vmperm_can_vmap(buffer->vmperm)) {
  414. mem_buf_vmperm_unpin(buffer->vmperm);
  415. return -EPERM;
  416. }
  417. mutex_lock(&buffer->lock);
  418. if (buffer->vmap_cnt) {
  419. buffer->vmap_cnt++;
  420. iosys_map_set_vaddr(map, buffer->vaddr);
  421. goto out;
  422. }
  423. vaddr = qcom_sg_do_vmap(buffer);
  424. if (IS_ERR(vaddr)) {
  425. ret = PTR_ERR(vaddr);
  426. mem_buf_vmperm_unpin(buffer->vmperm);
  427. goto out;
  428. }
  429. buffer->vaddr = vaddr;
  430. buffer->vmap_cnt++;
  431. iosys_map_set_vaddr(map, buffer->vaddr);
  432. out:
  433. mutex_unlock(&buffer->lock);
  434. return ret;
  435. }
  436. void qcom_sg_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
  437. {
  438. struct qcom_sg_buffer *buffer = dmabuf->priv;
  439. mutex_lock(&buffer->lock);
  440. if (!--buffer->vmap_cnt) {
  441. vunmap(buffer->vaddr);
  442. buffer->vaddr = NULL;
  443. }
  444. mem_buf_vmperm_unpin(buffer->vmperm);
  445. mutex_unlock(&buffer->lock);
  446. iosys_map_clear(map);
  447. }
  448. void qcom_sg_release(struct dma_buf *dmabuf)
  449. {
  450. struct qcom_sg_buffer *buffer = dmabuf->priv;
  451. if (mem_buf_vmperm_release(buffer->vmperm))
  452. return;
  453. msm_dma_buf_freed(buffer);
  454. buffer->free(buffer);
  455. }
  456. struct mem_buf_vmperm *qcom_sg_lookup_vmperm(struct dma_buf *dmabuf)
  457. {
  458. struct qcom_sg_buffer *buffer = dmabuf->priv;
  459. return buffer->vmperm;
  460. }
  461. struct mem_buf_dma_buf_ops qcom_sg_buf_ops = {
  462. .attach = qcom_sg_attach,
  463. .lookup = qcom_sg_lookup_vmperm,
  464. .dma_ops = {
  465. .attach = NULL, /* Will be set by mem_buf_dma_buf_export */
  466. .detach = qcom_sg_detach,
  467. .map_dma_buf = qcom_sg_map_dma_buf,
  468. .unmap_dma_buf = qcom_sg_unmap_dma_buf,
  469. .begin_cpu_access = qcom_sg_dma_buf_begin_cpu_access,
  470. .end_cpu_access = qcom_sg_dma_buf_end_cpu_access,
  471. .begin_cpu_access_partial = qcom_sg_dma_buf_begin_cpu_access_partial,
  472. .end_cpu_access_partial = qcom_sg_dma_buf_end_cpu_access_partial,
  473. .mmap = qcom_sg_mmap,
  474. .vmap = qcom_sg_vmap,
  475. .vunmap = qcom_sg_vunmap,
  476. .release = qcom_sg_release,
  477. }
  478. };
  479. EXPORT_SYMBOL(qcom_sg_buf_ops);
  480. int qti_smmu_proxy_register_callbacks(smmu_proxy_map_sgtable map_sgtable_fn_ptr,
  481. smmu_proxy_unmap_sgtable unmap_sgtable_fn_ptr)
  482. {
  483. smmu_proxy_callback_ops.map_sgtable = map_sgtable_fn_ptr;
  484. smmu_proxy_callback_ops.unmap_sgtable = unmap_sgtable_fn_ptr;
  485. return 0;
  486. }
  487. EXPORT_SYMBOL(qti_smmu_proxy_register_callbacks);