msm_vidc_memory.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-heap.h>
  7. #include <linux/msm_ion.h>
  8. #include <linux/ion.h>
  9. #include "msm_vidc_memory.h"
  10. #include "msm_vidc_debug.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_dt.h"
  13. #include "msm_vidc_core.h"
  14. struct msm_vidc_buf_region_name {
  15. enum msm_vidc_buffer_region region;
  16. char *name;
  17. };
  18. struct context_bank_info *get_context_bank(struct msm_vidc_core *core,
  19. enum msm_vidc_buffer_region region)
  20. {
  21. const char *name;
  22. struct context_bank_info *cb = NULL, *match = NULL;
  23. static const struct msm_vidc_buf_region_name buf_region_name[] = {
  24. {MSM_VIDC_REGION_NONE, "none" },
  25. {MSM_VIDC_NON_SECURE, "venus_ns" },
  26. {MSM_VIDC_NON_SECURE_PIXEL, "venus_ns_pixel" },
  27. {MSM_VIDC_SECURE_PIXEL, "venus_sec_pixel" },
  28. {MSM_VIDC_SECURE_NONPIXEL, "venus_sec_non_pixel" },
  29. {MSM_VIDC_SECURE_BITSTREAM, "venus_sec_bitstream" },
  30. };
  31. if (!region || region > ARRAY_SIZE(buf_region_name))
  32. goto exit;
  33. if (buf_region_name[region].region != region)
  34. goto exit;
  35. name = buf_region_name[region].name;
  36. list_for_each_entry(cb, &core->dt->context_banks, list) {
  37. if (!strcmp(cb->name, name)) {
  38. match = cb;
  39. break;
  40. }
  41. }
  42. if (!match)
  43. d_vpr_e("cb not found for region %#x\n", region);
  44. return match;
  45. exit:
  46. d_vpr_e("Invalid region %#x\n", region);
  47. return NULL;
  48. }
  49. struct dma_buf *msm_vidc_memory_get_dmabuf(int fd)
  50. {
  51. struct dma_buf *dmabuf;
  52. dmabuf = dma_buf_get(fd);
  53. if (IS_ERR_OR_NULL(dmabuf)) {
  54. d_vpr_e("Failed to get dmabuf for %d, error %ld\n",
  55. fd, PTR_ERR(dmabuf));
  56. dmabuf = NULL;
  57. }
  58. return dmabuf;
  59. }
  60. void msm_vidc_memory_put_dmabuf(void *dmabuf)
  61. {
  62. if (!dmabuf) {
  63. d_vpr_e("%s: NULL dmabuf\n", __func__);
  64. return;
  65. }
  66. dma_buf_put((struct dma_buf *)dmabuf);
  67. }
  68. int msm_vidc_memory_map(struct msm_vidc_core *core, struct msm_vidc_map *map)
  69. {
  70. int rc = 0;
  71. struct dma_buf_attachment *attach = NULL;
  72. struct sg_table *table = NULL;
  73. struct context_bank_info *cb = NULL;
  74. if (!core || !map) {
  75. d_vpr_e("%s: invalid params\n", __func__);
  76. return -EINVAL;
  77. }
  78. if (map->refcount) {
  79. map->refcount++;
  80. return 0;
  81. }
  82. cb = get_context_bank(core, map->region);
  83. if (!cb) {
  84. d_vpr_e("%s: Failed to get context bank device\n",
  85. __func__);
  86. rc = -EIO;
  87. goto error_cb;
  88. }
  89. /* Prepare a dma buf for dma on the given device */
  90. attach = dma_buf_attach(map->dmabuf, cb->dev);
  91. if (IS_ERR_OR_NULL(attach)) {
  92. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -ENOMEM;
  93. d_vpr_e("Failed to attach dmabuf\n");
  94. goto error_attach;
  95. }
  96. /*
  97. * Get the scatterlist for the given attachment
  98. * Mapping of sg is taken care by map attachment
  99. */
  100. attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP;
  101. /*
  102. * We do not need dma_map function to perform cache operations
  103. * on the whole buffer size and hence pass skip sync flag.
  104. * We do the required cache operations separately for the
  105. * required buffer size
  106. */
  107. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  108. if (core->dt->sys_cache_present)
  109. attach->dma_map_attrs |=
  110. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  111. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  112. if (IS_ERR_OR_NULL(table)) {
  113. rc = PTR_ERR(table) ? PTR_ERR(table) : -ENOMEM;
  114. d_vpr_e("Failed to map table\n");
  115. goto error_table;
  116. }
  117. if (!table->sgl) {
  118. d_vpr_e("sgl is NULL\n");
  119. rc = -ENOMEM;
  120. goto error_sg;
  121. }
  122. map->device_addr = table->sgl->dma_address;
  123. map->table = table;
  124. map->attach = attach;
  125. map->refcount++;
  126. return 0;
  127. error_sg:
  128. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  129. error_table:
  130. dma_buf_detach(map->dmabuf, attach);
  131. error_attach:
  132. error_cb:
  133. return rc;
  134. }
  135. int msm_vidc_memory_unmap(struct msm_vidc_core *core, struct msm_vidc_map *map)
  136. {
  137. int rc = 0;
  138. if (!core || !map) {
  139. d_vpr_e("%s: invalid params\n", __func__);
  140. return -EINVAL;
  141. }
  142. if (map->refcount) {
  143. map->refcount--;
  144. } else {
  145. d_vpr_e("unmap called while refcount is zero already\n");
  146. return -EINVAL;
  147. }
  148. if (map->refcount)
  149. goto exit;
  150. dma_buf_unmap_attachment(map->attach, map->table, DMA_BIDIRECTIONAL);
  151. dma_buf_detach(map->dmabuf, map->attach);
  152. map->device_addr = 0x0;
  153. map->dmabuf = NULL;
  154. map->attach = NULL;
  155. map->table = NULL;
  156. exit:
  157. return rc;
  158. }
  159. int msm_vidc_memory_alloc(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  160. {
  161. int rc = 0;
  162. int size = 0;
  163. struct dma_heap *heap;
  164. char *heap_name = NULL;
  165. if (!mem) {
  166. d_vpr_e("%s: invalid params\n", __func__);
  167. return -EINVAL;
  168. }
  169. size = ALIGN(mem->size, SZ_4K);
  170. if (mem->secure) {
  171. switch (mem->region) {
  172. case MSM_VIDC_SECURE_PIXEL:
  173. heap_name = "qcom,secure-pixel";
  174. break;
  175. case MSM_VIDC_SECURE_NONPIXEL:
  176. heap_name = "qcom,secure-non-pixel";
  177. break;
  178. case MSM_VIDC_SECURE_BITSTREAM:
  179. default:
  180. d_vpr_e("invalid secure region : %#x\n", mem->region);
  181. return -EINVAL;
  182. }
  183. } else {
  184. heap_name = "qcom,system";
  185. }
  186. heap = dma_heap_find(heap_name);
  187. mem->dmabuf = dma_heap_buffer_alloc(heap, size, 0, 0);
  188. if (IS_ERR_OR_NULL(mem->dmabuf)) {
  189. d_vpr_e("%s: dma heap %s alloc failed\n", __func__, heap_name);
  190. mem->dmabuf = NULL;
  191. rc = -ENOMEM;
  192. goto error;
  193. }
  194. if (mem->map_kernel) {
  195. dma_buf_begin_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  196. mem->kvaddr = dma_buf_vmap(mem->dmabuf);
  197. if (!mem->kvaddr) {
  198. d_vpr_e("%s: kernel map failed\n", __func__);
  199. rc = -EIO;
  200. goto error;
  201. }
  202. }
  203. d_vpr_h(
  204. "%s: dmabuf = %pK, size = %d, kvaddr = %pK, buffer_type = %#x secure %d region %d\n",
  205. __func__, mem->dmabuf, mem->size, mem->kvaddr, mem->type,
  206. mem->secure, mem->region);
  207. return 0;
  208. error:
  209. msm_vidc_memory_free(core, mem);
  210. return rc;
  211. }
  212. int msm_vidc_memory_free(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  213. {
  214. int rc = 0;
  215. if (!mem || !mem->dmabuf) {
  216. d_vpr_e("%s: invalid params\n", __func__);
  217. return -EINVAL;
  218. }
  219. d_vpr_h(
  220. "%s: dmabuf = %pK, size = %d, kvaddr = %pK, buffer_type = %#x\n",
  221. __func__, mem->dmabuf, mem->size, mem->kvaddr, mem->type);
  222. if (mem->kvaddr) {
  223. dma_buf_vunmap(mem->dmabuf, mem->kvaddr);
  224. mem->kvaddr = NULL;
  225. dma_buf_end_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  226. }
  227. if (mem->dmabuf) {
  228. dma_heap_buffer_free(mem->dmabuf);
  229. mem->dmabuf = NULL;
  230. }
  231. return rc;
  232. };
  233. /*
  234. int msm_memory_cache_operations(struct msm_vidc_inst *inst,
  235. struct dma_buf *dbuf, enum smem_cache_ops cache_op,
  236. unsigned long offset, unsigned long size, u32 sid)
  237. {
  238. int rc = 0;
  239. unsigned long flags = 0;
  240. if (!inst) {
  241. d_vpr_e("%s: invalid parameters\n", __func__);
  242. return -EINVAL;
  243. }
  244. if (!dbuf) {
  245. i_vpr_e(inst, "%s: invalid params\n", __func__);
  246. return -EINVAL;
  247. }
  248. rc = dma_buf_get_flags(dbuf, &flags);
  249. if (rc) {
  250. i_vpr_e(inst, "%s: dma_buf_get_flags failed, err %d\n",
  251. __func__, rc);
  252. return rc;
  253. } else if (!(flags & ION_FLAG_CACHED)) {
  254. return rc;
  255. }
  256. switch (cache_op) {
  257. case SMEM_CACHE_CLEAN:
  258. case SMEM_CACHE_CLEAN_INVALIDATE:
  259. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  260. offset, size);
  261. if (rc)
  262. break;
  263. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  264. offset, size);
  265. break;
  266. case SMEM_CACHE_INVALIDATE:
  267. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  268. offset, size);
  269. if (rc)
  270. break;
  271. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
  272. offset, size);
  273. break;
  274. default:
  275. i_vpr_e(inst, "%s: cache (%d) operation not supported\n",
  276. __func__, cache_op);
  277. rc = -EINVAL;
  278. break;
  279. }
  280. return rc;
  281. }
  282. int msm_smem_memory_prefetch(struct msm_vidc_inst *inst)
  283. {
  284. int i, rc = 0;
  285. struct memory_regions *vidc_regions = NULL;
  286. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  287. if (!inst) {
  288. d_vpr_e("%s: invalid parameters\n", __func__);
  289. return -EINVAL;
  290. }
  291. vidc_regions = &inst->regions;
  292. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  293. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  294. __func__, vidc_regions->num_regions,
  295. MEMORY_REGIONS_MAX);
  296. return -EINVAL;
  297. }
  298. memset(ion_region, 0, sizeof(ion_region));
  299. for (i = 0; i < vidc_regions->num_regions; i++) {
  300. ion_region[i].size = vidc_regions->region[i].size;
  301. ion_region[i].vmid = vidc_regions->region[i].vmid;
  302. }
  303. rc = msm_ion_heap_prefetch(ION_SECURE_HEAP_ID, ion_region,
  304. vidc_regions->num_regions);
  305. if (rc)
  306. i_vpr_e(inst, "%s: prefetch failed, ret: %d\n",
  307. __func__, rc);
  308. else
  309. i_vpr_l(inst, "%s: prefetch succeeded\n", __func__);
  310. return rc;
  311. }
  312. int msm_smem_memory_drain(struct msm_vidc_inst *inst)
  313. {
  314. int i, rc = 0;
  315. struct memory_regions *vidc_regions = NULL;
  316. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  317. if (!inst) {
  318. d_vpr_e("%s: invalid parameters\n", __func__);
  319. return -EINVAL;
  320. }
  321. vidc_regions = &inst->regions;
  322. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  323. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  324. __func__, vidc_regions->num_regions,
  325. MEMORY_REGIONS_MAX);
  326. return -EINVAL;
  327. }
  328. memset(ion_region, 0, sizeof(ion_region));
  329. for (i = 0; i < vidc_regions->num_regions; i++) {
  330. ion_region[i].size = vidc_regions->region[i].size;
  331. ion_region[i].vmid = vidc_regions->region[i].vmid;
  332. }
  333. rc = msm_ion_heap_drain(ION_SECURE_HEAP_ID, ion_region,
  334. vidc_regions->num_regions);
  335. if (rc)
  336. i_vpr_e(inst, "%s: drain failed, ret: %d\n", __func__, rc);
  337. else
  338. i_vpr_l(inst, "%s: drain succeeded\n", __func__);
  339. return rc;
  340. }
  341. */