msm_vidc_memory.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-heap.h>
  7. #include <linux/msm_ion.h>
  8. #include <linux/ion.h>
  9. #include "msm_vidc_memory.h"
  10. #include "msm_vidc_debug.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_dt.h"
  13. #include "msm_vidc_core.h"
  14. struct context_bank_info *get_context_bank(struct msm_vidc_core *core,
  15. enum msm_vidc_buffer_region region)
  16. {
  17. char *name;
  18. struct context_bank_info *cb = NULL, *match = NULL;
  19. switch (region) {
  20. case MSM_VIDC_NON_SECURE:
  21. name = "venus_ns";
  22. break;
  23. case MSM_VIDC_SECURE_PIXEL:
  24. name = "venus_sec_pixel";
  25. break;
  26. case MSM_VIDC_SECURE_NONPIXEL:
  27. name = "venus_sec_non_pixel";
  28. break;
  29. case MSM_VIDC_SECURE_BITSTREAM:
  30. name = "venus_sec_bitstream";
  31. break;
  32. default:
  33. d_vpr_e("invalid region : %#x\n", region);
  34. return NULL;
  35. }
  36. list_for_each_entry(cb, &core->dt->context_banks, list) {
  37. if (!strcmp(cb->name, name)) {
  38. match = cb;
  39. break;
  40. }
  41. }
  42. if (!match)
  43. d_vpr_e("cb not found for region %#x\n", region);
  44. return match;
  45. }
  46. struct dma_buf *msm_vidc_memory_get_dmabuf(int fd)
  47. {
  48. struct dma_buf *dmabuf;
  49. dmabuf = dma_buf_get(fd);
  50. if (IS_ERR_OR_NULL(dmabuf)) {
  51. d_vpr_e("Failed to get dmabuf for %d, error %ld\n",
  52. fd, PTR_ERR(dmabuf));
  53. dmabuf = NULL;
  54. }
  55. return dmabuf;
  56. }
  57. void msm_vidc_memory_put_dmabuf(void *dmabuf)
  58. {
  59. if (!dmabuf) {
  60. d_vpr_e("%s: NULL dmabuf\n", __func__);
  61. return;
  62. }
  63. dma_buf_put((struct dma_buf *)dmabuf);
  64. }
  65. int msm_vidc_memory_map(struct msm_vidc_core *core, struct msm_vidc_map *map)
  66. {
  67. int rc = 0;
  68. struct dma_buf_attachment *attach = NULL;
  69. struct sg_table *table = NULL;
  70. struct context_bank_info *cb = NULL;
  71. if (!core || !map) {
  72. d_vpr_e("%s: invalid params\n", __func__);
  73. return -EINVAL;
  74. }
  75. if (map->refcount) {
  76. map->refcount++;
  77. return 0;
  78. }
  79. cb = get_context_bank(core, map->region);
  80. if (!cb) {
  81. d_vpr_e("%s: Failed to get context bank device\n",
  82. __func__);
  83. rc = -EIO;
  84. goto error_cb;
  85. }
  86. /* Prepare a dma buf for dma on the given device */
  87. attach = dma_buf_attach(map->dmabuf, cb->dev);
  88. if (IS_ERR_OR_NULL(attach)) {
  89. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -ENOMEM;
  90. d_vpr_e("Failed to attach dmabuf\n");
  91. goto error_attach;
  92. }
  93. /*
  94. * Get the scatterlist for the given attachment
  95. * Mapping of sg is taken care by map attachment
  96. */
  97. attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP;
  98. /*
  99. * We do not need dma_map function to perform cache operations
  100. * on the whole buffer size and hence pass skip sync flag.
  101. * We do the required cache operations separately for the
  102. * required buffer size
  103. */
  104. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  105. if (core->dt->sys_cache_present)
  106. attach->dma_map_attrs |=
  107. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  108. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  109. if (IS_ERR_OR_NULL(table)) {
  110. rc = PTR_ERR(table) ? PTR_ERR(table) : -ENOMEM;
  111. d_vpr_e("Failed to map table\n");
  112. goto error_table;
  113. }
  114. if (!table->sgl) {
  115. d_vpr_e("sgl is NULL\n");
  116. rc = -ENOMEM;
  117. goto error_sg;
  118. }
  119. map->device_addr = table->sgl->dma_address;
  120. map->table = table;
  121. map->attach = attach;
  122. map->refcount++;
  123. return 0;
  124. error_sg:
  125. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  126. error_table:
  127. dma_buf_detach(map->dmabuf, attach);
  128. error_attach:
  129. error_cb:
  130. return rc;
  131. }
  132. int msm_vidc_memory_unmap(struct msm_vidc_core *core, struct msm_vidc_map *map)
  133. {
  134. int rc = 0;
  135. if (!core || !map) {
  136. d_vpr_e("%s: invalid params\n", __func__);
  137. return -EINVAL;
  138. }
  139. if (map->refcount) {
  140. map->refcount--;
  141. } else {
  142. d_vpr_e("unmap called while refcount is zero already\n");
  143. return -EINVAL;
  144. }
  145. if (map->refcount)
  146. goto exit;
  147. dma_buf_unmap_attachment(map->attach, map->table, DMA_BIDIRECTIONAL);
  148. dma_buf_detach(map->dmabuf, map->attach);
  149. map->device_addr = 0x0;
  150. map->dmabuf = NULL;
  151. map->attach = NULL;
  152. map->table = NULL;
  153. exit:
  154. return rc;
  155. }
  156. int msm_vidc_memory_alloc(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  157. {
  158. int rc = 0;
  159. int size = 0;
  160. struct dma_heap *heap;
  161. char *heap_name = NULL;
  162. if (!mem) {
  163. d_vpr_e("%s: invalid params\n", __func__);
  164. return -EINVAL;
  165. }
  166. size = ALIGN(mem->size, SZ_4K);
  167. /* All dma-heap allocations are cached by default. */
  168. if (mem->secure) {
  169. switch (mem->region) {
  170. case MSM_VIDC_SECURE_PIXEL:
  171. heap_name = "qcom,secure-pixel";
  172. break;
  173. case MSM_VIDC_SECURE_NONPIXEL:
  174. heap_name = "qcom,secure-non-pixel";
  175. break;
  176. case MSM_VIDC_SECURE_BITSTREAM:
  177. default:
  178. d_vpr_e("invalid secure region : %#x\n", mem->region);
  179. return -EINVAL;
  180. }
  181. } else {
  182. heap_name = "qcom,system";
  183. }
  184. heap = dma_heap_find(heap_name);
  185. mem->dmabuf = dma_heap_buffer_alloc(heap, size, 0, 0);
  186. if (IS_ERR_OR_NULL(mem->dmabuf)) {
  187. d_vpr_e("%s: dma heap %s alloc failed\n", __func__, heap_name);
  188. mem->dmabuf = NULL;
  189. rc = -ENOMEM;
  190. goto error;
  191. }
  192. if (mem->map_kernel) {
  193. dma_buf_begin_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  194. mem->kvaddr = dma_buf_vmap(mem->dmabuf);
  195. if (!mem->kvaddr) {
  196. d_vpr_e("%s: kernel map failed\n", __func__);
  197. rc = -EIO;
  198. goto error;
  199. }
  200. }
  201. d_vpr_h(
  202. "%s: dmabuf = %pK, size = %d, kvaddr = %pK, buffer_type = %#x\n",
  203. __func__, mem->dmabuf, mem->size, mem->kvaddr, mem->type);
  204. return 0;
  205. error:
  206. msm_vidc_memory_free(core, mem);
  207. return rc;
  208. }
  209. int msm_vidc_memory_free(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  210. {
  211. int rc = 0;
  212. if (!mem || !mem->dmabuf) {
  213. d_vpr_e("%s: invalid params\n", __func__);
  214. return -EINVAL;
  215. }
  216. d_vpr_h(
  217. "%s: dmabuf = %pK, size = %d, kvaddr = %pK, buffer_type = %#x\n",
  218. __func__, mem->dmabuf, mem->size, mem->kvaddr, mem->type);
  219. if (mem->kvaddr) {
  220. dma_buf_vunmap(mem->dmabuf, mem->kvaddr);
  221. mem->kvaddr = NULL;
  222. dma_buf_end_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  223. }
  224. if (mem->dmabuf) {
  225. dma_heap_buffer_free(mem->dmabuf);
  226. mem->dmabuf = NULL;
  227. }
  228. return rc;
  229. };
  230. /*
  231. int msm_memory_cache_operations(struct msm_vidc_inst *inst,
  232. struct dma_buf *dbuf, enum smem_cache_ops cache_op,
  233. unsigned long offset, unsigned long size, u32 sid)
  234. {
  235. int rc = 0;
  236. unsigned long flags = 0;
  237. if (!inst) {
  238. d_vpr_e("%s: invalid parameters\n", __func__);
  239. return -EINVAL;
  240. }
  241. if (!dbuf) {
  242. i_vpr_e(inst, "%s: invalid params\n", __func__);
  243. return -EINVAL;
  244. }
  245. rc = dma_buf_get_flags(dbuf, &flags);
  246. if (rc) {
  247. i_vpr_e(inst, "%s: dma_buf_get_flags failed, err %d\n",
  248. __func__, rc);
  249. return rc;
  250. } else if (!(flags & ION_FLAG_CACHED)) {
  251. return rc;
  252. }
  253. switch (cache_op) {
  254. case SMEM_CACHE_CLEAN:
  255. case SMEM_CACHE_CLEAN_INVALIDATE:
  256. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  257. offset, size);
  258. if (rc)
  259. break;
  260. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  261. offset, size);
  262. break;
  263. case SMEM_CACHE_INVALIDATE:
  264. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  265. offset, size);
  266. if (rc)
  267. break;
  268. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
  269. offset, size);
  270. break;
  271. default:
  272. i_vpr_e(inst, "%s: cache (%d) operation not supported\n",
  273. __func__, cache_op);
  274. rc = -EINVAL;
  275. break;
  276. }
  277. return rc;
  278. }
  279. int msm_smem_memory_prefetch(struct msm_vidc_inst *inst)
  280. {
  281. int i, rc = 0;
  282. struct memory_regions *vidc_regions = NULL;
  283. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  284. if (!inst) {
  285. d_vpr_e("%s: invalid parameters\n", __func__);
  286. return -EINVAL;
  287. }
  288. vidc_regions = &inst->regions;
  289. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  290. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  291. __func__, vidc_regions->num_regions,
  292. MEMORY_REGIONS_MAX);
  293. return -EINVAL;
  294. }
  295. memset(ion_region, 0, sizeof(ion_region));
  296. for (i = 0; i < vidc_regions->num_regions; i++) {
  297. ion_region[i].size = vidc_regions->region[i].size;
  298. ion_region[i].vmid = vidc_regions->region[i].vmid;
  299. }
  300. rc = msm_ion_heap_prefetch(ION_SECURE_HEAP_ID, ion_region,
  301. vidc_regions->num_regions);
  302. if (rc)
  303. i_vpr_e(inst, "%s: prefetch failed, ret: %d\n",
  304. __func__, rc);
  305. else
  306. i_vpr_l(inst, "%s: prefetch succeeded\n", __func__);
  307. return rc;
  308. }
  309. int msm_smem_memory_drain(struct msm_vidc_inst *inst)
  310. {
  311. int i, rc = 0;
  312. struct memory_regions *vidc_regions = NULL;
  313. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  314. if (!inst) {
  315. d_vpr_e("%s: invalid parameters\n", __func__);
  316. return -EINVAL;
  317. }
  318. vidc_regions = &inst->regions;
  319. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  320. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  321. __func__, vidc_regions->num_regions,
  322. MEMORY_REGIONS_MAX);
  323. return -EINVAL;
  324. }
  325. memset(ion_region, 0, sizeof(ion_region));
  326. for (i = 0; i < vidc_regions->num_regions; i++) {
  327. ion_region[i].size = vidc_regions->region[i].size;
  328. ion_region[i].vmid = vidc_regions->region[i].vmid;
  329. }
  330. rc = msm_ion_heap_drain(ION_SECURE_HEAP_ID, ion_region,
  331. vidc_regions->num_regions);
  332. if (rc)
  333. i_vpr_e(inst, "%s: drain failed, ret: %d\n", __func__, rc);
  334. else
  335. i_vpr_l(inst, "%s: drain succeeded\n", __func__);
  336. return rc;
  337. }
  338. */