msm_smem.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-direction.h>
  7. #include <linux/iommu.h>
  8. #include <linux/msm_dma_iommu_mapping.h>
  9. #include <linux/ion.h>
  10. #include <linux/msm_ion.h>
  11. #include <linux/slab.h>
  12. #include <linux/types.h>
  13. #include "msm_cvp_core.h"
  14. #include "msm_cvp_debug.h"
  15. #include "msm_cvp_resources.h"
  16. static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align,
  17. dma_addr_t *iova, u32 flags, unsigned long ion_flags,
  18. struct msm_cvp_platform_resources *res,
  19. struct cvp_dma_mapping_info *mapping_info)
  20. {
  21. int rc = 0;
  22. struct dma_buf_attachment *attach;
  23. struct sg_table *table = NULL;
  24. struct context_bank_info *cb = NULL;
  25. if (!dbuf || !iova || !mapping_info) {
  26. dprintk(CVP_ERR, "Invalid params: %pK, %pK, %pK\n",
  27. dbuf, iova, mapping_info);
  28. return -EINVAL;
  29. }
  30. if (is_iommu_present(res)) {
  31. cb = msm_cvp_smem_get_context_bank((flags & SMEM_SECURE),
  32. res, ion_flags);
  33. if (!cb) {
  34. dprintk(CVP_ERR,
  35. "%s: Failed to get context bank device\n",
  36. __func__);
  37. rc = -EIO;
  38. goto mem_map_failed;
  39. }
  40. /* Prepare a dma buf for dma on the given device */
  41. attach = dma_buf_attach(dbuf, cb->dev);
  42. if (IS_ERR_OR_NULL(attach)) {
  43. rc = PTR_ERR(attach) ?: -ENOMEM;
  44. dprintk(CVP_ERR, "Failed to attach dmabuf\n");
  45. goto mem_buf_attach_failed;
  46. }
  47. /*
  48. * Get the scatterlist for the given attachment
  49. * Mapping of sg is taken care by map attachment
  50. */
  51. attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP;
  52. /*
  53. * We do not need dma_map function to perform cache operations
  54. * on the whole buffer size and hence pass skip sync flag.
  55. * We do the required cache operations separately for the
  56. * required buffer size
  57. */
  58. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  59. if (res->sys_cache_present)
  60. attach->dma_map_attrs |=
  61. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  62. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  63. if (IS_ERR_OR_NULL(table)) {
  64. rc = PTR_ERR(table) ?: -ENOMEM;
  65. dprintk(CVP_ERR, "Failed to map table\n");
  66. goto mem_map_table_failed;
  67. }
  68. if (table->sgl) {
  69. *iova = table->sgl->dma_address;
  70. } else {
  71. dprintk(CVP_ERR, "sgl is NULL\n");
  72. rc = -ENOMEM;
  73. goto mem_map_sg_failed;
  74. }
  75. mapping_info->dev = cb->dev;
  76. mapping_info->domain = cb->domain;
  77. mapping_info->table = table;
  78. mapping_info->attach = attach;
  79. mapping_info->buf = dbuf;
  80. mapping_info->cb_info = (void *)cb;
  81. } else {
  82. dprintk(CVP_MEM, "iommu not present, use phys mem addr\n");
  83. }
  84. return 0;
  85. mem_map_sg_failed:
  86. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  87. mem_map_table_failed:
  88. dma_buf_detach(dbuf, attach);
  89. mem_buf_attach_failed:
  90. mem_map_failed:
  91. return rc;
  92. }
  93. static int msm_dma_put_device_address(u32 flags,
  94. struct cvp_dma_mapping_info *mapping_info)
  95. {
  96. int rc = 0;
  97. if (!mapping_info) {
  98. dprintk(CVP_WARN, "Invalid mapping_info\n");
  99. return -EINVAL;
  100. }
  101. if (!mapping_info->dev || !mapping_info->table ||
  102. !mapping_info->buf || !mapping_info->attach ||
  103. !mapping_info->cb_info) {
  104. dprintk(CVP_WARN, "Invalid params\n");
  105. return -EINVAL;
  106. }
  107. dma_buf_unmap_attachment(mapping_info->attach,
  108. mapping_info->table, DMA_BIDIRECTIONAL);
  109. dma_buf_detach(mapping_info->buf, mapping_info->attach);
  110. mapping_info->dev = NULL;
  111. mapping_info->domain = NULL;
  112. mapping_info->table = NULL;
  113. mapping_info->attach = NULL;
  114. mapping_info->buf = NULL;
  115. mapping_info->cb_info = NULL;
  116. return rc;
  117. }
  118. struct dma_buf *msm_cvp_smem_get_dma_buf(int fd)
  119. {
  120. struct dma_buf *dma_buf;
  121. dma_buf = dma_buf_get(fd);
  122. if (IS_ERR_OR_NULL(dma_buf)) {
  123. dprintk(CVP_ERR, "Failed to get dma_buf for %d, error %ld\n",
  124. fd, PTR_ERR(dma_buf));
  125. dma_buf = NULL;
  126. }
  127. return dma_buf;
  128. }
  129. void msm_cvp_smem_put_dma_buf(void *dma_buf)
  130. {
  131. if (!dma_buf) {
  132. dprintk(CVP_ERR, "%s: NULL dma_buf\n", __func__);
  133. return;
  134. }
  135. dma_buf_put((struct dma_buf *)dma_buf);
  136. }
  137. int msm_cvp_map_smem(struct msm_cvp_inst *inst,
  138. struct msm_cvp_smem *smem,
  139. const char *str)
  140. {
  141. int rc = 0;
  142. dma_addr_t iova = 0;
  143. u32 temp = 0;
  144. u32 align = SZ_4K;
  145. struct dma_buf *dma_buf;
  146. unsigned long ion_flags = 0;
  147. if (!inst || !smem) {
  148. dprintk(CVP_ERR, "%s: Invalid params: %pK %pK\n",
  149. __func__, inst, smem);
  150. return -EINVAL;
  151. }
  152. dma_buf = smem->dma_buf;
  153. rc = dma_buf_get_flags(dma_buf, &ion_flags);
  154. if (rc) {
  155. dprintk(CVP_ERR, "Failed to get dma buf flags: %d\n", rc);
  156. goto exit;
  157. }
  158. if (ion_flags & ION_FLAG_CACHED)
  159. smem->flags |= SMEM_CACHED;
  160. if (ion_flags & ION_FLAG_SECURE)
  161. smem->flags |= SMEM_SECURE;
  162. rc = msm_dma_get_device_address(dma_buf, align, &iova, smem->flags,
  163. ion_flags, &(inst->core->resources),
  164. &smem->mapping_info);
  165. if (rc) {
  166. dprintk(CVP_ERR, "Failed to get device address: %d\n", rc);
  167. goto exit;
  168. }
  169. temp = (u32)iova;
  170. if ((dma_addr_t)temp != iova) {
  171. dprintk(CVP_ERR, "iova(%pa) truncated to %#x", &iova, temp);
  172. rc = -EINVAL;
  173. goto exit;
  174. }
  175. smem->size = dma_buf->size;
  176. smem->device_addr = (u32)iova;
  177. print_smem(CVP_MEM, str, inst, smem);
  178. return rc;
  179. exit:
  180. smem->device_addr = 0x0;
  181. return rc;
  182. }
  183. int msm_cvp_unmap_smem(struct msm_cvp_inst *inst,
  184. struct msm_cvp_smem *smem,
  185. const char *str)
  186. {
  187. int rc = 0;
  188. if (!smem) {
  189. dprintk(CVP_ERR, "%s: Invalid params: %pK\n", __func__, smem);
  190. rc = -EINVAL;
  191. goto exit;
  192. }
  193. print_smem(CVP_MEM, str, inst, smem);
  194. rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info);
  195. if (rc) {
  196. dprintk(CVP_ERR, "Failed to put device address: %d\n", rc);
  197. goto exit;
  198. }
  199. smem->device_addr = 0x0;
  200. exit:
  201. return rc;
  202. }
  203. static int alloc_dma_mem(size_t size, u32 align, u32 flags, int map_kernel,
  204. struct msm_cvp_platform_resources *res, struct msm_cvp_smem *mem)
  205. {
  206. dma_addr_t iova = 0;
  207. unsigned long heap_mask = 0;
  208. int rc = 0;
  209. int ion_flags = 0;
  210. struct dma_buf *dbuf = NULL;
  211. if (!res) {
  212. dprintk(CVP_ERR, "%s: NULL res\n", __func__);
  213. return -EINVAL;
  214. }
  215. align = ALIGN(align, SZ_4K);
  216. size = ALIGN(size, SZ_4K);
  217. if (is_iommu_present(res)) {
  218. if (flags & SMEM_ADSP) {
  219. dprintk(CVP_MEM, "Allocating from ADSP heap\n");
  220. heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
  221. } else {
  222. heap_mask = ION_HEAP(ION_SYSTEM_HEAP_ID);
  223. }
  224. } else {
  225. dprintk(CVP_MEM,
  226. "allocate shared memory from adsp heap size %zx align %d\n",
  227. size, align);
  228. heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
  229. }
  230. if (flags & SMEM_CACHED)
  231. ion_flags |= ION_FLAG_CACHED;
  232. if (flags & SMEM_NON_PIXEL)
  233. ion_flags |= ION_FLAG_CP_NON_PIXEL;
  234. if (flags & SMEM_SECURE) {
  235. ion_flags |= ION_FLAG_SECURE;
  236. heap_mask = ION_HEAP(ION_SECURE_HEAP_ID);
  237. }
  238. dbuf = ion_alloc(size, heap_mask, ion_flags);
  239. if (IS_ERR_OR_NULL(dbuf)) {
  240. dprintk(CVP_ERR,
  241. "Failed to allocate shared memory = %x bytes, %llx, %x\n",
  242. size, heap_mask, ion_flags);
  243. rc = -ENOMEM;
  244. goto fail_shared_mem_alloc;
  245. }
  246. mem->flags = flags;
  247. mem->ion_flags = ion_flags;
  248. mem->size = size;
  249. mem->dma_buf = dbuf;
  250. mem->kvaddr = NULL;
  251. rc = msm_dma_get_device_address(dbuf, align, &iova, flags,
  252. ion_flags, res, &mem->mapping_info);
  253. if (rc) {
  254. dprintk(CVP_ERR, "Failed to get device address: %d\n",
  255. rc);
  256. goto fail_device_address;
  257. }
  258. mem->device_addr = (u32)iova;
  259. if ((dma_addr_t)mem->device_addr != iova) {
  260. dprintk(CVP_ERR, "iova(%pa) truncated to %#x",
  261. &iova, mem->device_addr);
  262. goto fail_device_address;
  263. }
  264. if (map_kernel) {
  265. dma_buf_begin_cpu_access(dbuf, DMA_BIDIRECTIONAL);
  266. mem->kvaddr = dma_buf_vmap(dbuf);
  267. if (!mem->kvaddr) {
  268. dprintk(CVP_ERR,
  269. "Failed to map shared mem in kernel\n");
  270. rc = -EIO;
  271. goto fail_map;
  272. }
  273. }
  274. dprintk(CVP_MEM,
  275. "%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK, ion_flags = %#x, flags = %#lx\n",
  276. __func__, mem->dma_buf, mem->device_addr, mem->size,
  277. mem->kvaddr, mem->ion_flags, mem->flags);
  278. return rc;
  279. fail_map:
  280. if (map_kernel)
  281. dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL);
  282. fail_device_address:
  283. dma_buf_put(dbuf);
  284. fail_shared_mem_alloc:
  285. return rc;
  286. }
  287. static int free_dma_mem(struct msm_cvp_smem *mem)
  288. {
  289. dprintk(CVP_MEM,
  290. "%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK, ion_flags = %#x\n",
  291. __func__, mem->dma_buf, mem->device_addr, mem->size,
  292. mem->kvaddr, mem->ion_flags);
  293. if (mem->device_addr) {
  294. msm_dma_put_device_address(mem->flags, &mem->mapping_info);
  295. mem->device_addr = 0x0;
  296. }
  297. if (mem->kvaddr) {
  298. dma_buf_vunmap(mem->dma_buf, mem->kvaddr);
  299. mem->kvaddr = NULL;
  300. dma_buf_end_cpu_access(mem->dma_buf, DMA_BIDIRECTIONAL);
  301. }
  302. if (mem->dma_buf) {
  303. dma_buf_put(mem->dma_buf);
  304. mem->dma_buf = NULL;
  305. }
  306. return 0;
  307. }
  308. int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags, int map_kernel,
  309. void *res, struct msm_cvp_smem *smem)
  310. {
  311. int rc = 0;
  312. if (!smem || !size) {
  313. dprintk(CVP_ERR, "%s: NULL smem or %d size\n",
  314. __func__, (u32)size);
  315. return -EINVAL;
  316. }
  317. rc = alloc_dma_mem(size, align, flags, map_kernel,
  318. (struct msm_cvp_platform_resources *)res,
  319. smem);
  320. return rc;
  321. }
  322. int msm_cvp_smem_free(struct msm_cvp_smem *smem)
  323. {
  324. int rc = 0;
  325. if (!smem) {
  326. dprintk(CVP_ERR, "NULL smem passed\n");
  327. return -EINVAL;
  328. }
  329. rc = free_dma_mem(smem);
  330. return rc;
  331. };
  332. int msm_cvp_smem_cache_operations(struct dma_buf *dbuf,
  333. enum smem_cache_ops cache_op, unsigned long offset, unsigned long size)
  334. {
  335. int rc = 0;
  336. unsigned long flags = 0;
  337. if (!dbuf) {
  338. dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
  339. return -EINVAL;
  340. }
  341. /* Return if buffer doesn't support caching */
  342. rc = dma_buf_get_flags(dbuf, &flags);
  343. if (rc) {
  344. dprintk(CVP_ERR, "%s: dma_buf_get_flags failed, err %d\n",
  345. __func__, rc);
  346. return rc;
  347. } else if (!(flags & ION_FLAG_CACHED)) {
  348. return rc;
  349. }
  350. switch (cache_op) {
  351. case SMEM_CACHE_CLEAN:
  352. case SMEM_CACHE_CLEAN_INVALIDATE:
  353. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_BIDIRECTIONAL,
  354. offset, size);
  355. if (rc)
  356. break;
  357. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_BIDIRECTIONAL,
  358. offset, size);
  359. break;
  360. case SMEM_CACHE_INVALIDATE:
  361. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  362. offset, size);
  363. if (rc)
  364. break;
  365. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
  366. offset, size);
  367. break;
  368. default:
  369. dprintk(CVP_ERR, "%s: cache (%d) operation not supported\n",
  370. __func__, cache_op);
  371. rc = -EINVAL;
  372. break;
  373. }
  374. return rc;
  375. }
  376. struct context_bank_info *msm_cvp_smem_get_context_bank(bool is_secure,
  377. struct msm_cvp_platform_resources *res, unsigned long ion_flags)
  378. {
  379. struct context_bank_info *cb = NULL, *match = NULL;
  380. char *search_str;
  381. char *non_secure_cb = "cvp_hlos";
  382. char *secure_nonpixel_cb = "cvp_sec_nonpixel";
  383. char *secure_pixel_cb = "cvp_sec_pixel";
  384. if (ion_flags & ION_FLAG_CP_PIXEL)
  385. search_str = secure_pixel_cb;
  386. else if (ion_flags & ION_FLAG_CP_NON_PIXEL)
  387. search_str = secure_nonpixel_cb;
  388. else
  389. search_str = non_secure_cb;
  390. list_for_each_entry(cb, &res->context_banks, list) {
  391. if (cb->is_secure == is_secure &&
  392. !strcmp(search_str, cb->name)) {
  393. match = cb;
  394. break;
  395. }
  396. }
  397. if (!match)
  398. dprintk(CVP_ERR,
  399. "%s: cb not found for ion_flags %x, is_secure %d\n",
  400. __func__, ion_flags, is_secure);
  401. return match;
  402. }