msm_vidc_memory.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-heap.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/qcom-dma-mapping.h>
  9. #include <linux/mem-buf.h>
  10. #include <soc/qcom/secure_buffer.h>
  11. #include "msm_vidc_memory.h"
  12. #include "msm_vidc_debug.h"
  13. #include "msm_vidc_internal.h"
  14. #include "msm_vidc_driver.h"
  15. #include "msm_vidc_dt.h"
  16. #include "msm_vidc_core.h"
  17. #include "msm_vidc_events.h"
  18. #include "venus_hfi.h"
  19. struct msm_vidc_buf_region_name {
  20. enum msm_vidc_buffer_region region;
  21. char *name;
  22. };
  23. struct context_bank_info *get_context_bank(struct msm_vidc_core *core,
  24. enum msm_vidc_buffer_region region)
  25. {
  26. const char *name;
  27. struct context_bank_info *cb = NULL, *match = NULL;
  28. static const struct msm_vidc_buf_region_name buf_region_name[] = {
  29. {MSM_VIDC_REGION_NONE, "none" },
  30. {MSM_VIDC_NON_SECURE, "venus_ns" },
  31. {MSM_VIDC_NON_SECURE_PIXEL, "venus_ns_pixel" },
  32. {MSM_VIDC_SECURE_PIXEL, "venus_sec_pixel" },
  33. {MSM_VIDC_SECURE_NONPIXEL, "venus_sec_non_pixel" },
  34. {MSM_VIDC_SECURE_BITSTREAM, "venus_sec_bitstream" },
  35. };
  36. if (!region || region > ARRAY_SIZE(buf_region_name))
  37. goto exit;
  38. if (buf_region_name[region].region != region)
  39. goto exit;
  40. name = buf_region_name[region].name;
  41. list_for_each_entry(cb, &core->dt->context_banks, list) {
  42. if (!strcmp(cb->name, name)) {
  43. match = cb;
  44. break;
  45. }
  46. }
  47. if (!match)
  48. d_vpr_e("cb not found for region %#x\n", region);
  49. return match;
  50. exit:
  51. d_vpr_e("Invalid region %#x\n", region);
  52. return NULL;
  53. }
  54. struct dma_buf *msm_vidc_memory_get_dmabuf(struct msm_vidc_inst *inst, int fd)
  55. {
  56. struct msm_memory_dmabuf *buf = NULL;
  57. struct dma_buf *dmabuf = NULL;
  58. bool found = false;
  59. if (!inst) {
  60. d_vpr_e("%s: invalid params\n", __func__);
  61. return NULL;
  62. }
  63. /* get local dmabuf ref for tracking */
  64. dmabuf = dma_buf_get(fd);
  65. if (IS_ERR_OR_NULL(dmabuf)) {
  66. d_vpr_e("Failed to get dmabuf for %d, error %ld\n",
  67. fd, PTR_ERR(dmabuf));
  68. return NULL;
  69. }
  70. /* track dmabuf - inc refcount if already present */
  71. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  72. if (buf->dmabuf == dmabuf) {
  73. buf->refcount++;
  74. found = true;
  75. break;
  76. }
  77. }
  78. if (found) {
  79. /* put local dmabuf ref */
  80. dma_buf_put(dmabuf);
  81. return dmabuf;
  82. }
  83. /* get tracker instance from pool */
  84. buf = msm_memory_pool_alloc(inst, MSM_MEM_POOL_DMABUF);
  85. if (!buf) {
  86. i_vpr_e(inst, "%s: dmabuf alloc failed\n", __func__);
  87. dma_buf_put(dmabuf);
  88. return NULL;
  89. }
  90. /* hold dmabuf strong ref in tracker */
  91. buf->dmabuf = dmabuf;
  92. buf->refcount = 1;
  93. INIT_LIST_HEAD(&buf->list);
  94. /* add new dmabuf entry to tracker */
  95. list_add_tail(&buf->list, &inst->dmabuf_tracker);
  96. return dmabuf;
  97. }
  98. void msm_vidc_memory_put_dmabuf(struct msm_vidc_inst *inst, struct dma_buf *dmabuf)
  99. {
  100. struct msm_memory_dmabuf *buf = NULL;
  101. bool found = false;
  102. if (!inst || !dmabuf) {
  103. d_vpr_e("%s: invalid params\n", __func__);
  104. return;
  105. }
  106. /* track dmabuf - dec refcount if already present */
  107. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  108. if (buf->dmabuf == dmabuf) {
  109. buf->refcount--;
  110. found = true;
  111. break;
  112. }
  113. }
  114. if (!found) {
  115. i_vpr_e(inst, "%s: invalid dmabuf %#x\n", __func__, dmabuf);
  116. return;
  117. }
  118. /* non-zero refcount - do nothing */
  119. if (buf->refcount)
  120. return;
  121. /* remove dmabuf entry from tracker */
  122. list_del(&buf->list);
  123. /* release dmabuf strong ref from tracker */
  124. dma_buf_put(buf->dmabuf);
  125. /* put tracker instance back to pool */
  126. msm_memory_pool_free(inst, buf);
  127. }
  128. void msm_vidc_memory_put_dmabuf_completely(struct msm_vidc_inst *inst,
  129. struct msm_memory_dmabuf *buf)
  130. {
  131. if (!inst || !buf) {
  132. d_vpr_e("%s: invalid params\n", __func__);
  133. return;
  134. }
  135. while (buf->refcount) {
  136. buf->refcount--;
  137. if (!buf->refcount) {
  138. /* remove dmabuf entry from tracker */
  139. list_del(&buf->list);
  140. /* release dmabuf strong ref from tracker */
  141. dma_buf_put(buf->dmabuf);
  142. /* put tracker instance back to pool */
  143. msm_memory_pool_free(inst, buf);
  144. break;
  145. }
  146. }
  147. }
  148. static bool is_non_secure_buffer(struct dma_buf *dmabuf)
  149. {
  150. return mem_buf_dma_buf_exclusive_owner(dmabuf);
  151. }
  152. int msm_vidc_memory_map(struct msm_vidc_core *core, struct msm_vidc_map *map)
  153. {
  154. int rc = 0;
  155. struct dma_buf_attachment *attach = NULL;
  156. struct sg_table *table = NULL;
  157. struct context_bank_info *cb = NULL;
  158. if (!core || !map) {
  159. d_vpr_e("%s: invalid params\n", __func__);
  160. return -EINVAL;
  161. }
  162. if (map->refcount) {
  163. map->refcount++;
  164. goto exit;
  165. }
  166. /* reject non-secure mapping request for a secure buffer(or vice versa) */
  167. if (map->region == MSM_VIDC_NON_SECURE || map->region == MSM_VIDC_NON_SECURE_PIXEL) {
  168. if (!is_non_secure_buffer(map->dmabuf)) {
  169. d_vpr_e("%s: secure buffer mapping to non-secure region %d not allowed\n",
  170. __func__, map->region);
  171. return -EINVAL;
  172. }
  173. } else {
  174. if (is_non_secure_buffer(map->dmabuf)) {
  175. d_vpr_e("%s: non-secure buffer mapping to secure region %d not allowed\n",
  176. __func__, map->region);
  177. return -EINVAL;
  178. }
  179. }
  180. cb = get_context_bank(core, map->region);
  181. if (!cb) {
  182. d_vpr_e("%s: Failed to get context bank device\n",
  183. __func__);
  184. rc = -EIO;
  185. goto error_cb;
  186. }
  187. /* Prepare a dma buf for dma on the given device */
  188. attach = dma_buf_attach(map->dmabuf, cb->dev);
  189. if (IS_ERR_OR_NULL(attach)) {
  190. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -ENOMEM;
  191. d_vpr_e("Failed to attach dmabuf\n");
  192. goto error_attach;
  193. }
  194. if (!map->skip_delayed_unmap) {
  195. /*
  196. * Get the scatterlist for the given attachment
  197. * Mapping of sg is taken care by map attachment
  198. */
  199. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  200. }
  201. /*
  202. * We do not need dma_map function to perform cache operations
  203. * on the whole buffer size and hence pass skip sync flag.
  204. * We do the required cache operations separately for the
  205. * required buffer size
  206. */
  207. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  208. if (core->dt->sys_cache_present)
  209. attach->dma_map_attrs |=
  210. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  211. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  212. if (IS_ERR_OR_NULL(table)) {
  213. rc = PTR_ERR(table) ? PTR_ERR(table) : -ENOMEM;
  214. d_vpr_e("Failed to map table\n");
  215. goto error_table;
  216. }
  217. if (!table->sgl) {
  218. d_vpr_e("sgl is NULL\n");
  219. rc = -ENOMEM;
  220. goto error_sg;
  221. }
  222. map->device_addr = table->sgl->dma_address;
  223. map->table = table;
  224. map->attach = attach;
  225. map->refcount++;
  226. exit:
  227. d_vpr_l(
  228. "%s: type %11s, device_addr %#x, refcount %d, region %d\n",
  229. __func__, buf_name(map->type), map->device_addr, map->refcount, map->region);
  230. return 0;
  231. error_sg:
  232. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  233. error_table:
  234. dma_buf_detach(map->dmabuf, attach);
  235. error_attach:
  236. error_cb:
  237. return rc;
  238. }
  239. int msm_vidc_memory_unmap(struct msm_vidc_core *core,
  240. struct msm_vidc_map *map)
  241. {
  242. int rc = 0;
  243. if (!core || !map) {
  244. d_vpr_e("%s: invalid params\n", __func__);
  245. return -EINVAL;
  246. }
  247. if (map->refcount) {
  248. map->refcount--;
  249. } else {
  250. d_vpr_e("unmap called while refcount is zero already\n");
  251. return -EINVAL;
  252. }
  253. d_vpr_l(
  254. "%s: type %11s, device_addr %#x, refcount %d, region %d\n",
  255. __func__, buf_name(map->type), map->device_addr, map->refcount, map->region);
  256. if (map->refcount)
  257. goto exit;
  258. dma_buf_unmap_attachment(map->attach, map->table, DMA_BIDIRECTIONAL);
  259. dma_buf_detach(map->dmabuf, map->attach);
  260. map->device_addr = 0x0;
  261. map->attach = NULL;
  262. map->table = NULL;
  263. exit:
  264. return rc;
  265. }
  266. int msm_vidc_memory_alloc(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  267. {
  268. int rc = 0;
  269. int size = 0;
  270. struct dma_heap *heap;
  271. char *heap_name = NULL;
  272. struct mem_buf_lend_kernel_arg lend_arg;
  273. int vmids[1];
  274. int perms[1];
  275. if (!mem) {
  276. d_vpr_e("%s: invalid params\n", __func__);
  277. return -EINVAL;
  278. }
  279. size = ALIGN(mem->size, SZ_4K);
  280. if (mem->secure) {
  281. switch (mem->region) {
  282. case MSM_VIDC_SECURE_PIXEL:
  283. heap_name = "qcom,secure-pixel";
  284. break;
  285. case MSM_VIDC_SECURE_NONPIXEL:
  286. heap_name = "qcom,secure-non-pixel";
  287. break;
  288. case MSM_VIDC_SECURE_BITSTREAM:
  289. heap_name = "qcom,system";
  290. break;
  291. default:
  292. d_vpr_e("invalid secure region : %#x\n", mem->region);
  293. return -EINVAL;
  294. }
  295. } else {
  296. heap_name = "qcom,system";
  297. }
  298. heap = dma_heap_find(heap_name);
  299. mem->dmabuf = dma_heap_buffer_alloc(heap, size, 0, 0);
  300. if (IS_ERR_OR_NULL(mem->dmabuf)) {
  301. d_vpr_e("%s: dma heap %s alloc failed\n", __func__, heap_name);
  302. mem->dmabuf = NULL;
  303. rc = -ENOMEM;
  304. goto error;
  305. }
  306. if (mem->secure && mem->type == MSM_VIDC_BUF_BIN)
  307. {
  308. vmids[0] = VMID_CP_BITSTREAM;
  309. perms[0] = PERM_READ | PERM_WRITE;
  310. lend_arg.nr_acl_entries = ARRAY_SIZE(vmids);
  311. lend_arg.vmids = vmids;
  312. lend_arg.perms = perms;
  313. rc = mem_buf_lend(mem->dmabuf, &lend_arg);
  314. if (rc) {
  315. d_vpr_e("%s: BIN dmabuf %pK LEND failed, rc %d heap %s\n",
  316. __func__, mem->dmabuf, rc, heap_name);
  317. goto error;
  318. }
  319. }
  320. if (mem->map_kernel) {
  321. dma_buf_begin_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  322. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0))
  323. mem->kvaddr = dma_buf_vmap(mem->dmabuf);
  324. if (!mem->kvaddr) {
  325. d_vpr_e("%s: kernel map failed\n", __func__);
  326. rc = -EIO;
  327. goto error;
  328. }
  329. #else
  330. rc = dma_buf_vmap(mem->dmabuf, &mem->dmabuf_map);
  331. if (rc) {
  332. d_vpr_e("%s: kernel map failed\n", __func__);
  333. rc = -EIO;
  334. goto error;
  335. }
  336. mem->kvaddr = mem->dmabuf_map.vaddr;
  337. #endif
  338. }
  339. d_vpr_h(
  340. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  341. __func__, mem->dmabuf, mem->size, mem->kvaddr, buf_name(mem->type),
  342. mem->secure, mem->region);
  343. trace_msm_vidc_dma_buffer("ALLOC", mem->dmabuf, mem->size, mem->kvaddr,
  344. buf_name(mem->type), mem->secure, mem->region);
  345. return 0;
  346. error:
  347. msm_vidc_memory_free(core, mem);
  348. return rc;
  349. }
  350. int msm_vidc_memory_free(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  351. {
  352. int rc = 0;
  353. if (!mem || !mem->dmabuf) {
  354. d_vpr_e("%s: invalid params\n", __func__);
  355. return -EINVAL;
  356. }
  357. d_vpr_h(
  358. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  359. __func__, mem->dmabuf, mem->size, mem->kvaddr, buf_name(mem->type),
  360. mem->secure, mem->region);
  361. trace_msm_vidc_dma_buffer("FREE", mem->dmabuf, mem->size, mem->kvaddr,
  362. buf_name(mem->type), mem->secure, mem->region);
  363. if (mem->kvaddr) {
  364. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0))
  365. dma_buf_vunmap(mem->dmabuf, mem->kvaddr);
  366. #else
  367. dma_buf_vunmap(mem->dmabuf, &mem->dmabuf_map);
  368. #endif
  369. mem->kvaddr = NULL;
  370. dma_buf_end_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  371. }
  372. if (mem->dmabuf) {
  373. dma_heap_buffer_free(mem->dmabuf);
  374. mem->dmabuf = NULL;
  375. }
  376. return rc;
  377. };
  378. void *msm_memory_pool_alloc(struct msm_vidc_inst *inst, enum msm_memory_pool_type type)
  379. {
  380. struct msm_memory_alloc_header *hdr;
  381. struct msm_memory_pool *pool;
  382. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  383. d_vpr_e("%s: Invalid params\n", __func__);
  384. return NULL;
  385. }
  386. pool = &inst->pool[type];
  387. if (!list_empty(&pool->free_pool)) {
  388. /* get 1st node from free pool */
  389. hdr = list_first_entry(&pool->free_pool,
  390. struct msm_memory_alloc_header, list);
  391. list_del_init(&hdr->list);
  392. /* reset existing data */
  393. memset((char *)hdr->buf, 0, pool->size);
  394. /* add to busy pool */
  395. list_add_tail(&hdr->list, &pool->busy_pool);
  396. /* set busy flag to true. This is to catch double free request */
  397. hdr->busy = true;
  398. return hdr->buf;
  399. }
  400. hdr = kzalloc(pool->size + sizeof(struct msm_memory_alloc_header), GFP_KERNEL);
  401. if (!hdr) {
  402. i_vpr_e(inst, "%s: buffer allocation failed\n", __func__);
  403. return NULL;
  404. }
  405. INIT_LIST_HEAD(&hdr->list);
  406. hdr->type = type;
  407. hdr->busy = true;
  408. hdr->buf = (void *)(hdr + 1);
  409. list_add_tail(&hdr->list, &pool->busy_pool);
  410. return hdr->buf;
  411. }
  412. void msm_memory_pool_free(struct msm_vidc_inst *inst, void *vidc_buf)
  413. {
  414. struct msm_memory_alloc_header *hdr;
  415. struct msm_memory_pool *pool;
  416. if (!inst || !vidc_buf) {
  417. d_vpr_e("%s: Invalid params\n", __func__);
  418. return;
  419. }
  420. hdr = (struct msm_memory_alloc_header *)vidc_buf - 1;
  421. /* sanitize buffer addr */
  422. if (hdr->buf != vidc_buf) {
  423. i_vpr_e(inst, "%s: invalid buf addr %#x\n", __func__, vidc_buf);
  424. return;
  425. }
  426. /* sanitize pool type */
  427. if (hdr->type < 0 || hdr->type >= MSM_MEM_POOL_MAX) {
  428. i_vpr_e(inst, "%s: invalid pool type %#x\n", __func__, hdr->type);
  429. return;
  430. }
  431. pool = &inst->pool[hdr->type];
  432. /* catch double-free request */
  433. if (!hdr->busy) {
  434. i_vpr_e(inst, "%s: double free request. type %s, addr %#x\n", __func__,
  435. pool->name, vidc_buf);
  436. return;
  437. }
  438. hdr->busy = false;
  439. /* remove from busy pool */
  440. list_del_init(&hdr->list);
  441. /* add to free pool */
  442. list_add_tail(&hdr->list, &pool->free_pool);
  443. }
  444. static void msm_vidc_destroy_pool_buffers(struct msm_vidc_inst *inst,
  445. enum msm_memory_pool_type type)
  446. {
  447. struct msm_memory_alloc_header *hdr, *dummy;
  448. struct msm_memory_pool *pool;
  449. u32 fcount = 0, bcount = 0;
  450. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  451. d_vpr_e("%s: Invalid params\n", __func__);
  452. return;
  453. }
  454. pool = &inst->pool[type];
  455. /* detect memleak: busy pool is expected to be empty here */
  456. if (!list_empty(&pool->busy_pool))
  457. i_vpr_e(inst, "%s: destroy request on active buffer. type %s\n",
  458. __func__, pool->name);
  459. /* destroy all free buffers */
  460. list_for_each_entry_safe(hdr, dummy, &pool->free_pool, list) {
  461. list_del(&hdr->list);
  462. kfree(hdr);
  463. fcount++;
  464. }
  465. /* destroy all busy buffers */
  466. list_for_each_entry_safe(hdr, dummy, &pool->busy_pool, list) {
  467. list_del(&hdr->list);
  468. kfree(hdr);
  469. bcount++;
  470. }
  471. i_vpr_h(inst, "%s: type: %23s, count: free %2u, busy %2u\n",
  472. __func__, pool->name, fcount, bcount);
  473. }
  474. void msm_memory_pools_deinit(struct msm_vidc_inst *inst)
  475. {
  476. u32 i = 0;
  477. if (!inst) {
  478. d_vpr_e("%s: Invalid params\n", __func__);
  479. return;
  480. }
  481. /* destroy all buffers from all pool types */
  482. for (i = 0; i < MSM_MEM_POOL_MAX; i++)
  483. msm_vidc_destroy_pool_buffers(inst, i);
  484. }
  485. struct msm_vidc_type_size_name {
  486. enum msm_memory_pool_type type;
  487. u32 size;
  488. char *name;
  489. };
  490. static struct msm_vidc_type_size_name buftype_size_name_arr[] = {
  491. {MSM_MEM_POOL_BUFFER, sizeof(struct msm_vidc_buffer), "MSM_MEM_POOL_BUFFER" },
  492. {MSM_MEM_POOL_MAP, sizeof(struct msm_vidc_map), "MSM_MEM_POOL_MAP" },
  493. {MSM_MEM_POOL_ALLOC, sizeof(struct msm_vidc_alloc), "MSM_MEM_POOL_ALLOC" },
  494. {MSM_MEM_POOL_TIMESTAMP, sizeof(struct msm_vidc_timestamp), "MSM_MEM_POOL_TIMESTAMP" },
  495. {MSM_MEM_POOL_DMABUF, sizeof(struct msm_memory_dmabuf), "MSM_MEM_POOL_DMABUF" },
  496. {MSM_MEM_POOL_PACKET, sizeof(struct hfi_pending_packet) + MSM_MEM_POOL_PACKET_SIZE,
  497. "MSM_MEM_POOL_PACKET"},
  498. };
  499. int msm_memory_pools_init(struct msm_vidc_inst *inst)
  500. {
  501. u32 i;
  502. if (!inst) {
  503. d_vpr_e("%s: Invalid params\n", __func__);
  504. return -EINVAL;
  505. }
  506. if (ARRAY_SIZE(buftype_size_name_arr) != MSM_MEM_POOL_MAX) {
  507. i_vpr_e(inst, "%s: num elements mismatch %lu %u\n", __func__,
  508. ARRAY_SIZE(buftype_size_name_arr), MSM_MEM_POOL_MAX);
  509. return -EINVAL;
  510. }
  511. for (i = 0; i < MSM_MEM_POOL_MAX; i++) {
  512. if (i != buftype_size_name_arr[i].type) {
  513. i_vpr_e(inst, "%s: type mismatch %u %u\n", __func__,
  514. i, buftype_size_name_arr[i].type);
  515. return -EINVAL;
  516. }
  517. inst->pool[i].size = buftype_size_name_arr[i].size;
  518. inst->pool[i].name = buftype_size_name_arr[i].name;
  519. INIT_LIST_HEAD(&inst->pool[i].free_pool);
  520. INIT_LIST_HEAD(&inst->pool[i].busy_pool);
  521. }
  522. return 0;
  523. }
  524. /*
  525. int msm_memory_cache_operations(struct msm_vidc_inst *inst,
  526. struct dma_buf *dbuf, enum smem_cache_ops cache_op,
  527. unsigned long offset, unsigned long size, u32 sid)
  528. {
  529. int rc = 0;
  530. unsigned long flags = 0;
  531. if (!inst) {
  532. d_vpr_e("%s: invalid parameters\n", __func__);
  533. return -EINVAL;
  534. }
  535. if (!dbuf) {
  536. i_vpr_e(inst, "%s: invalid params\n", __func__);
  537. return -EINVAL;
  538. }
  539. rc = dma_buf_get_flags(dbuf, &flags);
  540. if (rc) {
  541. i_vpr_e(inst, "%s: dma_buf_get_flags failed, err %d\n",
  542. __func__, rc);
  543. return rc;
  544. } else if (!(flags & ION_FLAG_CACHED)) {
  545. return rc;
  546. }
  547. switch (cache_op) {
  548. case SMEM_CACHE_CLEAN:
  549. case SMEM_CACHE_CLEAN_INVALIDATE:
  550. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  551. offset, size);
  552. if (rc)
  553. break;
  554. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  555. offset, size);
  556. break;
  557. case SMEM_CACHE_INVALIDATE:
  558. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  559. offset, size);
  560. if (rc)
  561. break;
  562. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
  563. offset, size);
  564. break;
  565. default:
  566. i_vpr_e(inst, "%s: cache (%d) operation not supported\n",
  567. __func__, cache_op);
  568. rc = -EINVAL;
  569. break;
  570. }
  571. return rc;
  572. }
  573. int msm_smem_memory_prefetch(struct msm_vidc_inst *inst)
  574. {
  575. int i, rc = 0;
  576. struct memory_regions *vidc_regions = NULL;
  577. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  578. if (!inst) {
  579. d_vpr_e("%s: invalid parameters\n", __func__);
  580. return -EINVAL;
  581. }
  582. vidc_regions = &inst->regions;
  583. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  584. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  585. __func__, vidc_regions->num_regions,
  586. MEMORY_REGIONS_MAX);
  587. return -EINVAL;
  588. }
  589. memset(ion_region, 0, sizeof(ion_region));
  590. for (i = 0; i < vidc_regions->num_regions; i++) {
  591. ion_region[i].size = vidc_regions->region[i].size;
  592. ion_region[i].vmid = vidc_regions->region[i].vmid;
  593. }
  594. rc = msm_ion_heap_prefetch(ION_SECURE_HEAP_ID, ion_region,
  595. vidc_regions->num_regions);
  596. if (rc)
  597. i_vpr_e(inst, "%s: prefetch failed, ret: %d\n",
  598. __func__, rc);
  599. else
  600. i_vpr_l(inst, "%s: prefetch succeeded\n", __func__);
  601. return rc;
  602. }
  603. int msm_smem_memory_drain(struct msm_vidc_inst *inst)
  604. {
  605. int i, rc = 0;
  606. struct memory_regions *vidc_regions = NULL;
  607. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  608. if (!inst) {
  609. d_vpr_e("%s: invalid parameters\n", __func__);
  610. return -EINVAL;
  611. }
  612. vidc_regions = &inst->regions;
  613. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  614. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  615. __func__, vidc_regions->num_regions,
  616. MEMORY_REGIONS_MAX);
  617. return -EINVAL;
  618. }
  619. memset(ion_region, 0, sizeof(ion_region));
  620. for (i = 0; i < vidc_regions->num_regions; i++) {
  621. ion_region[i].size = vidc_regions->region[i].size;
  622. ion_region[i].vmid = vidc_regions->region[i].vmid;
  623. }
  624. rc = msm_ion_heap_drain(ION_SECURE_HEAP_ID, ion_region,
  625. vidc_regions->num_regions);
  626. if (rc)
  627. i_vpr_e(inst, "%s: drain failed, ret: %d\n", __func__, rc);
  628. else
  629. i_vpr_l(inst, "%s: drain succeeded\n", __func__);
  630. return rc;
  631. }
  632. */