msm_vidc_memory.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-heap.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/qcom-dma-mapping.h>
  9. #include <linux/mem-buf.h>
  10. #include <soc/qcom/secure_buffer.h>
  11. #include "msm_vidc_memory.h"
  12. #include "msm_vidc_debug.h"
  13. #include "msm_vidc_internal.h"
  14. #include "msm_vidc_driver.h"
  15. #include "msm_vidc_dt.h"
  16. #include "msm_vidc_core.h"
  17. struct msm_vidc_buf_region_name {
  18. enum msm_vidc_buffer_region region;
  19. char *name;
  20. };
  21. struct context_bank_info *get_context_bank(struct msm_vidc_core *core,
  22. enum msm_vidc_buffer_region region)
  23. {
  24. const char *name;
  25. struct context_bank_info *cb = NULL, *match = NULL;
  26. static const struct msm_vidc_buf_region_name buf_region_name[] = {
  27. {MSM_VIDC_REGION_NONE, "none" },
  28. {MSM_VIDC_NON_SECURE, "venus_ns" },
  29. {MSM_VIDC_NON_SECURE_PIXEL, "venus_ns_pixel" },
  30. {MSM_VIDC_SECURE_PIXEL, "venus_sec_pixel" },
  31. {MSM_VIDC_SECURE_NONPIXEL, "venus_sec_non_pixel" },
  32. {MSM_VIDC_SECURE_BITSTREAM, "venus_sec_bitstream" },
  33. };
  34. if (!region || region > ARRAY_SIZE(buf_region_name))
  35. goto exit;
  36. if (buf_region_name[region].region != region)
  37. goto exit;
  38. name = buf_region_name[region].name;
  39. list_for_each_entry(cb, &core->dt->context_banks, list) {
  40. if (!strcmp(cb->name, name)) {
  41. match = cb;
  42. break;
  43. }
  44. }
  45. if (!match)
  46. d_vpr_e("cb not found for region %#x\n", region);
  47. return match;
  48. exit:
  49. d_vpr_e("Invalid region %#x\n", region);
  50. return NULL;
  51. }
  52. struct dma_buf *msm_vidc_memory_get_dmabuf(struct msm_vidc_inst *inst, int fd)
  53. {
  54. struct msm_memory_dmabuf *buf = NULL;
  55. struct dma_buf *dmabuf = NULL;
  56. bool found = false;
  57. if (!inst) {
  58. d_vpr_e("%s: invalid params\n", __func__);
  59. return NULL;
  60. }
  61. /* get local dmabuf ref for tracking */
  62. dmabuf = dma_buf_get(fd);
  63. if (IS_ERR_OR_NULL(dmabuf)) {
  64. d_vpr_e("Failed to get dmabuf for %d, error %ld\n",
  65. fd, PTR_ERR(dmabuf));
  66. return NULL;
  67. }
  68. /* track dmabuf - inc refcount if already present */
  69. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  70. if (buf->dmabuf == dmabuf) {
  71. buf->refcount++;
  72. found = true;
  73. break;
  74. }
  75. }
  76. if (found) {
  77. /* put local dmabuf ref */
  78. dma_buf_put(dmabuf);
  79. return dmabuf;
  80. }
  81. /* get tracker instance from pool */
  82. buf = msm_memory_alloc(inst, MSM_MEM_POOL_DMABUF);
  83. if (!buf) {
  84. i_vpr_e(inst, "%s: dmabuf alloc failed\n", __func__);
  85. dma_buf_put(dmabuf);
  86. return NULL;
  87. }
  88. /* hold dmabuf strong ref in tracker */
  89. buf->dmabuf = dmabuf;
  90. buf->refcount = 1;
  91. INIT_LIST_HEAD(&buf->list);
  92. /* add new dmabuf entry to tracker */
  93. list_add_tail(&buf->list, &inst->dmabuf_tracker);
  94. return dmabuf;
  95. }
  96. void msm_vidc_memory_put_dmabuf(struct msm_vidc_inst *inst, struct dma_buf *dmabuf)
  97. {
  98. struct msm_memory_dmabuf *buf = NULL;
  99. bool found = false;
  100. if (!inst || !dmabuf) {
  101. d_vpr_e("%s: invalid params\n", __func__);
  102. return;
  103. }
  104. /* track dmabuf - dec refcount if already present */
  105. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  106. if (buf->dmabuf == dmabuf) {
  107. buf->refcount--;
  108. found = true;
  109. break;
  110. }
  111. }
  112. if (!found) {
  113. i_vpr_e(inst, "%s: invalid dmabuf %#x\n", __func__, dmabuf);
  114. return;
  115. }
  116. /* non-zero refcount - do nothing */
  117. if (buf->refcount)
  118. return;
  119. /* remove dmabuf entry from tracker */
  120. list_del(&buf->list);
  121. /* release dmabuf strong ref from tracker */
  122. dma_buf_put(buf->dmabuf);
  123. /* put tracker instance back to pool */
  124. msm_memory_free(inst, buf);
  125. }
  126. void msm_vidc_memory_put_dmabuf_completely(struct msm_vidc_inst *inst,
  127. struct msm_memory_dmabuf *buf)
  128. {
  129. if (!inst || !buf) {
  130. d_vpr_e("%s: invalid params\n", __func__);
  131. return;
  132. }
  133. while (buf->refcount) {
  134. buf->refcount--;
  135. if (!buf->refcount) {
  136. /* remove dmabuf entry from tracker */
  137. list_del(&buf->list);
  138. /* release dmabuf strong ref from tracker */
  139. dma_buf_put(buf->dmabuf);
  140. /* put tracker instance back to pool */
  141. msm_memory_free(inst, buf);
  142. break;
  143. }
  144. }
  145. }
  146. int msm_vidc_memory_map(struct msm_vidc_core *core, struct msm_vidc_map *map)
  147. {
  148. int rc = 0;
  149. struct dma_buf_attachment *attach = NULL;
  150. struct sg_table *table = NULL;
  151. struct context_bank_info *cb = NULL;
  152. if (!core || !map) {
  153. d_vpr_e("%s: invalid params\n", __func__);
  154. return -EINVAL;
  155. }
  156. if (map->refcount) {
  157. map->refcount++;
  158. goto exit;
  159. }
  160. cb = get_context_bank(core, map->region);
  161. if (!cb) {
  162. d_vpr_e("%s: Failed to get context bank device\n",
  163. __func__);
  164. rc = -EIO;
  165. goto error_cb;
  166. }
  167. /* Prepare a dma buf for dma on the given device */
  168. attach = dma_buf_attach(map->dmabuf, cb->dev);
  169. if (IS_ERR_OR_NULL(attach)) {
  170. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -ENOMEM;
  171. d_vpr_e("Failed to attach dmabuf\n");
  172. goto error_attach;
  173. }
  174. if (!map->skip_delayed_unmap) {
  175. /*
  176. * Get the scatterlist for the given attachment
  177. * Mapping of sg is taken care by map attachment
  178. */
  179. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  180. }
  181. /*
  182. * We do not need dma_map function to perform cache operations
  183. * on the whole buffer size and hence pass skip sync flag.
  184. * We do the required cache operations separately for the
  185. * required buffer size
  186. */
  187. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  188. if (core->dt->sys_cache_present)
  189. attach->dma_map_attrs |=
  190. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  191. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  192. if (IS_ERR_OR_NULL(table)) {
  193. rc = PTR_ERR(table) ? PTR_ERR(table) : -ENOMEM;
  194. d_vpr_e("Failed to map table\n");
  195. goto error_table;
  196. }
  197. if (!table->sgl) {
  198. d_vpr_e("sgl is NULL\n");
  199. rc = -ENOMEM;
  200. goto error_sg;
  201. }
  202. map->device_addr = table->sgl->dma_address;
  203. map->table = table;
  204. map->attach = attach;
  205. map->refcount++;
  206. exit:
  207. d_vpr_l(
  208. "%s: type %11s, device_addr %#x, refcount %d, region %d\n",
  209. __func__, buf_name(map->type), map->device_addr, map->refcount, map->region);
  210. return 0;
  211. error_sg:
  212. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  213. error_table:
  214. dma_buf_detach(map->dmabuf, attach);
  215. error_attach:
  216. error_cb:
  217. return rc;
  218. }
  219. int msm_vidc_memory_unmap(struct msm_vidc_core *core,
  220. struct msm_vidc_map *map)
  221. {
  222. int rc = 0;
  223. if (!core || !map) {
  224. d_vpr_e("%s: invalid params\n", __func__);
  225. return -EINVAL;
  226. }
  227. if (map->refcount) {
  228. map->refcount--;
  229. } else {
  230. d_vpr_e("unmap called while refcount is zero already\n");
  231. return -EINVAL;
  232. }
  233. d_vpr_l(
  234. "%s: type %11s, device_addr %#x, refcount %d, region %d\n",
  235. __func__, buf_name(map->type), map->device_addr, map->refcount, map->region);
  236. if (map->refcount)
  237. goto exit;
  238. dma_buf_unmap_attachment(map->attach, map->table, DMA_BIDIRECTIONAL);
  239. dma_buf_detach(map->dmabuf, map->attach);
  240. map->device_addr = 0x0;
  241. map->attach = NULL;
  242. map->table = NULL;
  243. exit:
  244. return rc;
  245. }
  246. int msm_vidc_memory_alloc(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  247. {
  248. int rc = 0;
  249. int size = 0;
  250. struct dma_heap *heap;
  251. char *heap_name = NULL;
  252. struct mem_buf_lend_kernel_arg lend_arg;
  253. int vmids[1];
  254. int perms[1];
  255. if (!mem) {
  256. d_vpr_e("%s: invalid params\n", __func__);
  257. return -EINVAL;
  258. }
  259. size = ALIGN(mem->size, SZ_4K);
  260. if (mem->secure) {
  261. switch (mem->region) {
  262. case MSM_VIDC_SECURE_PIXEL:
  263. heap_name = "qcom,secure-pixel";
  264. break;
  265. case MSM_VIDC_SECURE_NONPIXEL:
  266. heap_name = "qcom,secure-non-pixel";
  267. break;
  268. case MSM_VIDC_SECURE_BITSTREAM:
  269. heap_name = "qcom,system";
  270. break;
  271. default:
  272. d_vpr_e("invalid secure region : %#x\n", mem->region);
  273. return -EINVAL;
  274. }
  275. } else {
  276. heap_name = "qcom,system";
  277. }
  278. heap = dma_heap_find(heap_name);
  279. mem->dmabuf = dma_heap_buffer_alloc(heap, size, 0, 0);
  280. if (IS_ERR_OR_NULL(mem->dmabuf)) {
  281. d_vpr_e("%s: dma heap %s alloc failed\n", __func__, heap_name);
  282. mem->dmabuf = NULL;
  283. rc = -ENOMEM;
  284. goto error;
  285. }
  286. if (mem->secure && mem->type == MSM_VIDC_BUF_BIN)
  287. {
  288. vmids[0] = VMID_CP_BITSTREAM;
  289. perms[0] = PERM_READ | PERM_WRITE;
  290. lend_arg.nr_acl_entries = ARRAY_SIZE(vmids);
  291. lend_arg.vmids = vmids;
  292. lend_arg.perms = perms;
  293. rc = mem_buf_lend(mem->dmabuf, &lend_arg);
  294. if (rc) {
  295. d_vpr_e("%s: BIN dmabuf %pK LEND failed, rc %d heap %s\n",
  296. __func__, mem->dmabuf, rc, heap_name);
  297. goto error;
  298. }
  299. }
  300. if (mem->map_kernel) {
  301. dma_buf_begin_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  302. mem->kvaddr = dma_buf_vmap(mem->dmabuf);
  303. if (!mem->kvaddr) {
  304. d_vpr_e("%s: kernel map failed\n", __func__);
  305. rc = -EIO;
  306. goto error;
  307. }
  308. }
  309. d_vpr_h(
  310. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  311. __func__, mem->dmabuf, mem->size, mem->kvaddr, buf_name(mem->type),
  312. mem->secure, mem->region);
  313. return 0;
  314. error:
  315. msm_vidc_memory_free(core, mem);
  316. return rc;
  317. }
  318. int msm_vidc_memory_free(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  319. {
  320. int rc = 0;
  321. if (!mem || !mem->dmabuf) {
  322. d_vpr_e("%s: invalid params\n", __func__);
  323. return -EINVAL;
  324. }
  325. d_vpr_h(
  326. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  327. __func__, mem->dmabuf, mem->size, mem->kvaddr, buf_name(mem->type),
  328. mem->secure, mem->region);
  329. if (mem->kvaddr) {
  330. dma_buf_vunmap(mem->dmabuf, mem->kvaddr);
  331. mem->kvaddr = NULL;
  332. dma_buf_end_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  333. }
  334. if (mem->dmabuf) {
  335. dma_heap_buffer_free(mem->dmabuf);
  336. mem->dmabuf = NULL;
  337. }
  338. return rc;
  339. };
  340. void *msm_memory_alloc(struct msm_vidc_inst *inst, enum msm_memory_pool_type type)
  341. {
  342. struct msm_memory_alloc_header *hdr;
  343. struct msm_memory_pool *pool;
  344. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  345. d_vpr_e("%s: Invalid params\n", __func__);
  346. return NULL;
  347. }
  348. pool = &inst->pool[type];
  349. if (!list_empty(&pool->free_pool)) {
  350. /* get 1st node from free pool */
  351. hdr = list_first_entry(&pool->free_pool,
  352. struct msm_memory_alloc_header, list);
  353. list_del_init(&hdr->list);
  354. /* reset existing data */
  355. memset((char *)hdr->buf, 0, pool->size);
  356. /* add to busy pool */
  357. list_add_tail(&hdr->list, &pool->busy_pool);
  358. /* set busy flag to true. This is to catch double free request */
  359. hdr->busy = true;
  360. return hdr->buf;
  361. }
  362. hdr = kzalloc(pool->size + sizeof(struct msm_memory_alloc_header), GFP_KERNEL);
  363. if (!hdr) {
  364. i_vpr_e(inst, "%s: buffer allocation failed\n", __func__);
  365. return NULL;
  366. }
  367. INIT_LIST_HEAD(&hdr->list);
  368. hdr->type = type;
  369. hdr->busy = true;
  370. hdr->buf = (void *)(hdr + 1);
  371. list_add_tail(&hdr->list, &pool->busy_pool);
  372. return hdr->buf;
  373. }
  374. void msm_memory_free(struct msm_vidc_inst *inst, void *vidc_buf)
  375. {
  376. struct msm_memory_alloc_header *hdr;
  377. struct msm_memory_pool *pool;
  378. if (!inst || !vidc_buf) {
  379. d_vpr_e("%s: Invalid params\n", __func__);
  380. return;
  381. }
  382. hdr = (struct msm_memory_alloc_header *)vidc_buf - 1;
  383. /* sanitize buffer addr */
  384. if (hdr->buf != vidc_buf) {
  385. i_vpr_e(inst, "%s: invalid buf addr %#x\n", __func__, vidc_buf);
  386. return;
  387. }
  388. /* sanitize pool type */
  389. if (hdr->type < 0 || hdr->type >= MSM_MEM_POOL_MAX) {
  390. i_vpr_e(inst, "%s: invalid pool type %#x\n", __func__, hdr->type);
  391. return;
  392. }
  393. pool = &inst->pool[hdr->type];
  394. /* catch double-free request */
  395. if (!hdr->busy) {
  396. i_vpr_e(inst, "%s: double free request. type %s, addr %#x\n", __func__,
  397. pool->name, vidc_buf);
  398. return;
  399. }
  400. hdr->busy = false;
  401. /* remove from busy pool */
  402. list_del_init(&hdr->list);
  403. /* add to free pool */
  404. list_add_tail(&hdr->list, &pool->free_pool);
  405. }
  406. static void msm_vidc_destroy_pool_buffers(struct msm_vidc_inst *inst,
  407. enum msm_memory_pool_type type)
  408. {
  409. struct msm_memory_alloc_header *hdr, *dummy;
  410. struct msm_memory_pool *pool;
  411. u32 fcount = 0, bcount = 0;
  412. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  413. d_vpr_e("%s: Invalid params\n", __func__);
  414. return;
  415. }
  416. pool = &inst->pool[type];
  417. /* detect memleak: busy pool is expected to be empty here */
  418. if (!list_empty(&pool->busy_pool))
  419. i_vpr_e(inst, "%s: destroy request on active buffer. type %s\n",
  420. __func__, pool->name);
  421. /* destroy all free buffers */
  422. list_for_each_entry_safe(hdr, dummy, &pool->free_pool, list) {
  423. list_del(&hdr->list);
  424. kfree(hdr);
  425. fcount++;
  426. }
  427. /* destroy all busy buffers */
  428. list_for_each_entry_safe(hdr, dummy, &pool->busy_pool, list) {
  429. list_del(&hdr->list);
  430. kfree(hdr);
  431. bcount++;
  432. }
  433. i_vpr_h(inst, "%s: type: %23s, count: free %2u, busy %2u\n",
  434. __func__, pool->name, fcount, bcount);
  435. }
  436. void msm_memory_pools_deinit(struct msm_vidc_inst *inst)
  437. {
  438. u32 i = 0;
  439. if (!inst) {
  440. d_vpr_e("%s: Invalid params\n", __func__);
  441. return;
  442. }
  443. /* destroy all buffers from all pool types */
  444. for (i = 0; i < MSM_MEM_POOL_MAX; i++)
  445. msm_vidc_destroy_pool_buffers(inst, i);
  446. }
  447. struct msm_vidc_type_size_name {
  448. enum msm_memory_pool_type type;
  449. u32 size;
  450. char *name;
  451. };
  452. static struct msm_vidc_type_size_name buftype_size_name_arr[] = {
  453. {MSM_MEM_POOL_BUFFER, sizeof(struct msm_vidc_buffer), "MSM_MEM_POOL_BUFFER" },
  454. {MSM_MEM_POOL_MAP, sizeof(struct msm_vidc_map), "MSM_MEM_POOL_MAP" },
  455. {MSM_MEM_POOL_ALLOC, sizeof(struct msm_vidc_alloc), "MSM_MEM_POOL_ALLOC" },
  456. {MSM_MEM_POOL_TIMESTAMP, sizeof(struct msm_vidc_timestamp), "MSM_MEM_POOL_TIMESTAMP" },
  457. {MSM_MEM_POOL_DMABUF, sizeof(struct msm_memory_dmabuf), "MSM_MEM_POOL_DMABUF" },
  458. };
  459. int msm_memory_pools_init(struct msm_vidc_inst *inst)
  460. {
  461. u32 i;
  462. if (!inst) {
  463. d_vpr_e("%s: Invalid params\n", __func__);
  464. return -EINVAL;
  465. }
  466. if (ARRAY_SIZE(buftype_size_name_arr) != MSM_MEM_POOL_MAX) {
  467. i_vpr_e(inst, "%s: num elements mismatch %u %u\n", __func__,
  468. ARRAY_SIZE(buftype_size_name_arr), MSM_MEM_POOL_MAX);
  469. return -EINVAL;
  470. }
  471. for (i = 0; i < MSM_MEM_POOL_MAX; i++) {
  472. if (i != buftype_size_name_arr[i].type) {
  473. i_vpr_e(inst, "%s: type mismatch %u %u\n", __func__,
  474. i, buftype_size_name_arr[i].type);
  475. return -EINVAL;
  476. }
  477. inst->pool[i].size = buftype_size_name_arr[i].size;
  478. inst->pool[i].name = buftype_size_name_arr[i].name;
  479. INIT_LIST_HEAD(&inst->pool[i].free_pool);
  480. INIT_LIST_HEAD(&inst->pool[i].busy_pool);
  481. }
  482. return 0;
  483. }
  484. /*
  485. int msm_memory_cache_operations(struct msm_vidc_inst *inst,
  486. struct dma_buf *dbuf, enum smem_cache_ops cache_op,
  487. unsigned long offset, unsigned long size, u32 sid)
  488. {
  489. int rc = 0;
  490. unsigned long flags = 0;
  491. if (!inst) {
  492. d_vpr_e("%s: invalid parameters\n", __func__);
  493. return -EINVAL;
  494. }
  495. if (!dbuf) {
  496. i_vpr_e(inst, "%s: invalid params\n", __func__);
  497. return -EINVAL;
  498. }
  499. rc = dma_buf_get_flags(dbuf, &flags);
  500. if (rc) {
  501. i_vpr_e(inst, "%s: dma_buf_get_flags failed, err %d\n",
  502. __func__, rc);
  503. return rc;
  504. } else if (!(flags & ION_FLAG_CACHED)) {
  505. return rc;
  506. }
  507. switch (cache_op) {
  508. case SMEM_CACHE_CLEAN:
  509. case SMEM_CACHE_CLEAN_INVALIDATE:
  510. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  511. offset, size);
  512. if (rc)
  513. break;
  514. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  515. offset, size);
  516. break;
  517. case SMEM_CACHE_INVALIDATE:
  518. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  519. offset, size);
  520. if (rc)
  521. break;
  522. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
  523. offset, size);
  524. break;
  525. default:
  526. i_vpr_e(inst, "%s: cache (%d) operation not supported\n",
  527. __func__, cache_op);
  528. rc = -EINVAL;
  529. break;
  530. }
  531. return rc;
  532. }
  533. int msm_smem_memory_prefetch(struct msm_vidc_inst *inst)
  534. {
  535. int i, rc = 0;
  536. struct memory_regions *vidc_regions = NULL;
  537. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  538. if (!inst) {
  539. d_vpr_e("%s: invalid parameters\n", __func__);
  540. return -EINVAL;
  541. }
  542. vidc_regions = &inst->regions;
  543. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  544. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  545. __func__, vidc_regions->num_regions,
  546. MEMORY_REGIONS_MAX);
  547. return -EINVAL;
  548. }
  549. memset(ion_region, 0, sizeof(ion_region));
  550. for (i = 0; i < vidc_regions->num_regions; i++) {
  551. ion_region[i].size = vidc_regions->region[i].size;
  552. ion_region[i].vmid = vidc_regions->region[i].vmid;
  553. }
  554. rc = msm_ion_heap_prefetch(ION_SECURE_HEAP_ID, ion_region,
  555. vidc_regions->num_regions);
  556. if (rc)
  557. i_vpr_e(inst, "%s: prefetch failed, ret: %d\n",
  558. __func__, rc);
  559. else
  560. i_vpr_l(inst, "%s: prefetch succeeded\n", __func__);
  561. return rc;
  562. }
  563. int msm_smem_memory_drain(struct msm_vidc_inst *inst)
  564. {
  565. int i, rc = 0;
  566. struct memory_regions *vidc_regions = NULL;
  567. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  568. if (!inst) {
  569. d_vpr_e("%s: invalid parameters\n", __func__);
  570. return -EINVAL;
  571. }
  572. vidc_regions = &inst->regions;
  573. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  574. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  575. __func__, vidc_regions->num_regions,
  576. MEMORY_REGIONS_MAX);
  577. return -EINVAL;
  578. }
  579. memset(ion_region, 0, sizeof(ion_region));
  580. for (i = 0; i < vidc_regions->num_regions; i++) {
  581. ion_region[i].size = vidc_regions->region[i].size;
  582. ion_region[i].vmid = vidc_regions->region[i].vmid;
  583. }
  584. rc = msm_ion_heap_drain(ION_SECURE_HEAP_ID, ion_region,
  585. vidc_regions->num_regions);
  586. if (rc)
  587. i_vpr_e(inst, "%s: drain failed, ret: %d\n", __func__, rc);
  588. else
  589. i_vpr_l(inst, "%s: drain succeeded\n", __func__);
  590. return rc;
  591. }
  592. */