msm_vidc_memory.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-heap.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/qcom-dma-mapping.h>
  9. #include <linux/mem-buf.h>
  10. #include <soc/qcom/secure_buffer.h>
  11. #include "msm_vidc_memory.h"
  12. #include "msm_vidc_debug.h"
  13. #include "msm_vidc_internal.h"
  14. #include "msm_vidc_driver.h"
  15. #include "msm_vidc_core.h"
  16. #include "msm_vidc_events.h"
  17. #include "msm_vidc_platform.h"
  18. #include "venus_hfi.h"
  19. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0))
  20. MODULE_IMPORT_NS(DMA_BUF);
  21. #endif
  22. struct msm_vidc_buf_region_name {
  23. enum msm_vidc_buffer_region region;
  24. char *name;
  25. };
  26. struct context_bank_info *msm_vidc_get_context_bank(struct msm_vidc_core *core,
  27. enum msm_vidc_buffer_region region)
  28. {
  29. struct context_bank_info *cb = NULL, *match = NULL;
  30. if (!region || region >= MSM_VIDC_REGION_MAX) {
  31. d_vpr_e("Invalid region %#x\n", region);
  32. return NULL;
  33. }
  34. venus_hfi_for_each_context_bank(core, cb) {
  35. if (cb->region == region) {
  36. match = cb;
  37. break;
  38. }
  39. }
  40. if (!match)
  41. d_vpr_e("cb not found for region %#x\n", region);
  42. return match;
  43. }
  44. struct dma_buf *msm_vidc_memory_get_dmabuf(struct msm_vidc_inst *inst, int fd)
  45. {
  46. struct msm_memory_dmabuf *buf = NULL;
  47. struct dma_buf *dmabuf = NULL;
  48. bool found = false;
  49. if (!inst) {
  50. d_vpr_e("%s: invalid params\n", __func__);
  51. return NULL;
  52. }
  53. /* get local dmabuf ref for tracking */
  54. dmabuf = dma_buf_get(fd);
  55. if (IS_ERR_OR_NULL(dmabuf)) {
  56. d_vpr_e("Failed to get dmabuf for %d, error %ld\n",
  57. fd, PTR_ERR(dmabuf));
  58. return NULL;
  59. }
  60. /* track dmabuf - inc refcount if already present */
  61. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  62. if (buf->dmabuf == dmabuf) {
  63. buf->refcount++;
  64. found = true;
  65. break;
  66. }
  67. }
  68. if (found) {
  69. /* put local dmabuf ref */
  70. dma_buf_put(dmabuf);
  71. return dmabuf;
  72. }
  73. /* get tracker instance from pool */
  74. buf = msm_memory_pool_alloc(inst, MSM_MEM_POOL_DMABUF);
  75. if (!buf) {
  76. i_vpr_e(inst, "%s: dmabuf alloc failed\n", __func__);
  77. dma_buf_put(dmabuf);
  78. return NULL;
  79. }
  80. /* hold dmabuf strong ref in tracker */
  81. buf->dmabuf = dmabuf;
  82. buf->refcount = 1;
  83. INIT_LIST_HEAD(&buf->list);
  84. /* add new dmabuf entry to tracker */
  85. list_add_tail(&buf->list, &inst->dmabuf_tracker);
  86. return dmabuf;
  87. }
  88. void msm_vidc_memory_put_dmabuf(struct msm_vidc_inst *inst, struct dma_buf *dmabuf)
  89. {
  90. struct msm_memory_dmabuf *buf = NULL;
  91. bool found = false;
  92. if (!inst || !dmabuf) {
  93. d_vpr_e("%s: invalid params\n", __func__);
  94. return;
  95. }
  96. /* track dmabuf - dec refcount if already present */
  97. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  98. if (buf->dmabuf == dmabuf) {
  99. buf->refcount--;
  100. found = true;
  101. break;
  102. }
  103. }
  104. if (!found) {
  105. i_vpr_e(inst, "%s: invalid dmabuf %#x\n", __func__, dmabuf);
  106. return;
  107. }
  108. /* non-zero refcount - do nothing */
  109. if (buf->refcount)
  110. return;
  111. /* remove dmabuf entry from tracker */
  112. list_del(&buf->list);
  113. /* release dmabuf strong ref from tracker */
  114. dma_buf_put(buf->dmabuf);
  115. /* put tracker instance back to pool */
  116. msm_memory_pool_free(inst, buf);
  117. }
  118. void msm_vidc_memory_put_dmabuf_completely(struct msm_vidc_inst *inst,
  119. struct msm_memory_dmabuf *buf)
  120. {
  121. if (!inst || !buf) {
  122. d_vpr_e("%s: invalid params\n", __func__);
  123. return;
  124. }
  125. while (buf->refcount) {
  126. buf->refcount--;
  127. if (!buf->refcount) {
  128. /* remove dmabuf entry from tracker */
  129. list_del(&buf->list);
  130. /* release dmabuf strong ref from tracker */
  131. dma_buf_put(buf->dmabuf);
  132. /* put tracker instance back to pool */
  133. msm_memory_pool_free(inst, buf);
  134. break;
  135. }
  136. }
  137. }
  138. static bool is_non_secure_buffer(struct dma_buf *dmabuf)
  139. {
  140. return mem_buf_dma_buf_exclusive_owner(dmabuf);
  141. }
  142. int msm_vidc_memory_map(struct msm_vidc_core *core, struct msm_vidc_map *map)
  143. {
  144. int rc = 0;
  145. struct dma_buf_attachment *attach = NULL;
  146. struct sg_table *table = NULL;
  147. struct context_bank_info *cb = NULL;
  148. if (!core || !map) {
  149. d_vpr_e("%s: invalid params\n", __func__);
  150. return -EINVAL;
  151. }
  152. if (map->refcount) {
  153. map->refcount++;
  154. goto exit;
  155. }
  156. /* reject non-secure mapping request for a secure buffer(or vice versa) */
  157. if (map->region == MSM_VIDC_NON_SECURE || map->region == MSM_VIDC_NON_SECURE_PIXEL) {
  158. if (!is_non_secure_buffer(map->dmabuf)) {
  159. d_vpr_e("%s: secure buffer mapping to non-secure region %d not allowed\n",
  160. __func__, map->region);
  161. return -EINVAL;
  162. }
  163. } else {
  164. if (is_non_secure_buffer(map->dmabuf)) {
  165. d_vpr_e("%s: non-secure buffer mapping to secure region %d not allowed\n",
  166. __func__, map->region);
  167. return -EINVAL;
  168. }
  169. }
  170. cb = msm_vidc_get_context_bank(core, map->region);
  171. if (!cb) {
  172. d_vpr_e("%s: Failed to get context bank device\n",
  173. __func__);
  174. rc = -EIO;
  175. goto error_cb;
  176. }
  177. /* Prepare a dma buf for dma on the given device */
  178. attach = dma_buf_attach(map->dmabuf, cb->dev);
  179. if (IS_ERR_OR_NULL(attach)) {
  180. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -ENOMEM;
  181. d_vpr_e("Failed to attach dmabuf\n");
  182. goto error_attach;
  183. }
  184. if (!map->skip_delayed_unmap) {
  185. /*
  186. * Get the scatterlist for the given attachment
  187. * Mapping of sg is taken care by map attachment
  188. */
  189. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  190. }
  191. /*
  192. * We do not need dma_map function to perform cache operations
  193. * on the whole buffer size and hence pass skip sync flag.
  194. * We do the required cache operations separately for the
  195. * required buffer size
  196. */
  197. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  198. if (is_sys_cache_present(core))
  199. attach->dma_map_attrs |=
  200. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  201. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  202. if (IS_ERR_OR_NULL(table)) {
  203. rc = PTR_ERR(table) ? PTR_ERR(table) : -ENOMEM;
  204. d_vpr_e("Failed to map table\n");
  205. goto error_table;
  206. }
  207. if (!table->sgl) {
  208. d_vpr_e("sgl is NULL\n");
  209. rc = -ENOMEM;
  210. goto error_sg;
  211. }
  212. map->device_addr = table->sgl->dma_address;
  213. map->table = table;
  214. map->attach = attach;
  215. map->refcount++;
  216. exit:
  217. d_vpr_l(
  218. "%s: type %11s, device_addr %#llx, refcount %d, region %d\n",
  219. __func__, buf_name(map->type), map->device_addr, map->refcount, map->region);
  220. return 0;
  221. error_sg:
  222. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  223. error_table:
  224. dma_buf_detach(map->dmabuf, attach);
  225. error_attach:
  226. error_cb:
  227. return rc;
  228. }
  229. int msm_vidc_memory_unmap(struct msm_vidc_core *core,
  230. struct msm_vidc_map *map)
  231. {
  232. int rc = 0;
  233. if (!core || !map) {
  234. d_vpr_e("%s: invalid params\n", __func__);
  235. return -EINVAL;
  236. }
  237. if (map->refcount) {
  238. map->refcount--;
  239. } else {
  240. d_vpr_e("unmap called while refcount is zero already\n");
  241. return -EINVAL;
  242. }
  243. d_vpr_l(
  244. "%s: type %11s, device_addr %#llx, refcount %d, region %d\n",
  245. __func__, buf_name(map->type), map->device_addr, map->refcount, map->region);
  246. if (map->refcount)
  247. goto exit;
  248. dma_buf_unmap_attachment(map->attach, map->table, DMA_BIDIRECTIONAL);
  249. dma_buf_detach(map->dmabuf, map->attach);
  250. map->device_addr = 0x0;
  251. map->attach = NULL;
  252. map->table = NULL;
  253. exit:
  254. return rc;
  255. }
  256. struct dma_buf_attachment *msm_vidc_dma_buf_attach(struct dma_buf *dbuf,
  257. struct device *dev)
  258. {
  259. int rc = 0;
  260. struct dma_buf_attachment *attach = NULL;
  261. if (!dbuf || !dev) {
  262. d_vpr_e("%s: invalid params\n", __func__);
  263. return NULL;
  264. }
  265. attach = dma_buf_attach(dbuf, dev);
  266. if (IS_ERR_OR_NULL(attach)) {
  267. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -1;
  268. d_vpr_e("Failed to attach dmabuf, error %d\n", rc);
  269. return NULL;;
  270. }
  271. return attach;
  272. }
  273. int msm_vidc_dma_buf_detach(struct dma_buf *dbuf,
  274. struct dma_buf_attachment *attach)
  275. {
  276. int rc = 0;
  277. if (!dbuf || !attach) {
  278. d_vpr_e("%s: invalid params\n", __func__);
  279. return -EINVAL;
  280. }
  281. dma_buf_detach(dbuf, attach);
  282. return rc;
  283. }
  284. struct sg_table *msm_vidc_dma_buf_map_attachment(
  285. struct dma_buf_attachment *attach)
  286. {
  287. int rc = 0;
  288. struct sg_table *table = NULL;
  289. if (!attach) {
  290. d_vpr_e("%s: invalid params\n", __func__);
  291. return NULL;
  292. }
  293. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  294. if (IS_ERR_OR_NULL(table)) {
  295. rc = PTR_ERR(table) ? PTR_ERR(table) : -1;
  296. d_vpr_e("Failed to map table, error %d\n", rc);
  297. return NULL;
  298. }
  299. return table;
  300. }
  301. int msm_vidc_dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
  302. struct sg_table *table)
  303. {
  304. int rc = 0;
  305. if (!attach || !table) {
  306. d_vpr_e("%s: invalid params\n", __func__);
  307. return -EINVAL;
  308. }
  309. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  310. return rc;
  311. }
  312. int msm_vidc_vmem_alloc(unsigned long size, void **mem, const char *msg)
  313. {
  314. int rc = 0;
  315. if (*mem) {
  316. d_vpr_e("%s: error: double alloc\n", msg);
  317. rc = -EINVAL;
  318. }
  319. *mem = vzalloc(size);
  320. if (!*mem) {
  321. d_vpr_e("allocation failed for %s\n", msg);
  322. rc = -ENOMEM;
  323. }
  324. return rc;
  325. }
  326. void msm_vidc_vmem_free(void **addr)
  327. {
  328. if (addr && *addr) {
  329. vfree(*addr);
  330. *addr = NULL;
  331. }
  332. }
  333. int msm_vidc_memory_alloc(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  334. {
  335. int rc = 0;
  336. int size = 0;
  337. struct dma_heap *heap;
  338. char *heap_name = NULL;
  339. struct mem_buf_lend_kernel_arg lend_arg;
  340. int vmids[1];
  341. int perms[1];
  342. if (!mem) {
  343. d_vpr_e("%s: invalid params\n", __func__);
  344. return -EINVAL;
  345. }
  346. size = ALIGN(mem->size, SZ_4K);
  347. if (mem->secure) {
  348. switch (mem->region) {
  349. case MSM_VIDC_SECURE_PIXEL:
  350. heap_name = "qcom,secure-pixel";
  351. break;
  352. case MSM_VIDC_SECURE_NONPIXEL:
  353. heap_name = "qcom,secure-non-pixel";
  354. break;
  355. case MSM_VIDC_SECURE_BITSTREAM:
  356. heap_name = "qcom,system";
  357. break;
  358. default:
  359. d_vpr_e("invalid secure region : %#x\n", mem->region);
  360. return -EINVAL;
  361. }
  362. } else {
  363. heap_name = "qcom,system";
  364. }
  365. heap = dma_heap_find(heap_name);
  366. mem->dmabuf = dma_heap_buffer_alloc(heap, size, 0, 0);
  367. if (IS_ERR_OR_NULL(mem->dmabuf)) {
  368. d_vpr_e("%s: dma heap %s alloc failed\n", __func__, heap_name);
  369. mem->dmabuf = NULL;
  370. rc = -ENOMEM;
  371. goto error;
  372. }
  373. if (mem->secure && mem->type == MSM_VIDC_BUF_BIN)
  374. {
  375. vmids[0] = VMID_CP_BITSTREAM;
  376. perms[0] = PERM_READ | PERM_WRITE;
  377. lend_arg.nr_acl_entries = ARRAY_SIZE(vmids);
  378. lend_arg.vmids = vmids;
  379. lend_arg.perms = perms;
  380. rc = mem_buf_lend(mem->dmabuf, &lend_arg);
  381. if (rc) {
  382. d_vpr_e("%s: BIN dmabuf %pK LEND failed, rc %d heap %s\n",
  383. __func__, mem->dmabuf, rc, heap_name);
  384. goto error;
  385. }
  386. }
  387. if (mem->map_kernel) {
  388. dma_buf_begin_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  389. /*
  390. * Waipio uses Kernel version 5.10.x,
  391. * Kalama uses Kernel Version 5.15.x,
  392. * Pineapple uses Kernel Version 5.18.x
  393. */
  394. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0))
  395. mem->kvaddr = dma_buf_vmap(mem->dmabuf);
  396. if (!mem->kvaddr) {
  397. d_vpr_e("%s: kernel map failed\n", __func__);
  398. rc = -EIO;
  399. goto error;
  400. }
  401. #elif (LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0))
  402. rc = dma_buf_vmap(mem->dmabuf, &mem->dmabuf_map);
  403. if (rc) {
  404. d_vpr_e("%s: kernel map failed\n", __func__);
  405. rc = -EIO;
  406. goto error;
  407. }
  408. mem->kvaddr = mem->dmabuf_map.vaddr;
  409. #else
  410. rc = dma_buf_vmap(mem->dmabuf, &mem->dmabuf_map);
  411. if (rc) {
  412. d_vpr_e("%s: kernel map failed\n", __func__);
  413. rc = -EIO;
  414. goto error;
  415. }
  416. mem->kvaddr = mem->dmabuf_map.vaddr;
  417. #endif
  418. }
  419. d_vpr_h(
  420. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  421. __func__, mem->dmabuf, mem->size, mem->kvaddr, buf_name(mem->type),
  422. mem->secure, mem->region);
  423. trace_msm_vidc_dma_buffer("ALLOC", mem->dmabuf, mem->size, mem->kvaddr,
  424. buf_name(mem->type), mem->secure, mem->region);
  425. return 0;
  426. error:
  427. msm_vidc_memory_free(core, mem);
  428. return rc;
  429. }
  430. int msm_vidc_memory_free(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  431. {
  432. int rc = 0;
  433. if (!mem || !mem->dmabuf) {
  434. d_vpr_e("%s: invalid params\n", __func__);
  435. return -EINVAL;
  436. }
  437. d_vpr_h(
  438. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  439. __func__, mem->dmabuf, mem->size, mem->kvaddr, buf_name(mem->type),
  440. mem->secure, mem->region);
  441. trace_msm_vidc_dma_buffer("FREE", mem->dmabuf, mem->size, mem->kvaddr,
  442. buf_name(mem->type), mem->secure, mem->region);
  443. if (mem->kvaddr) {
  444. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0))
  445. dma_buf_vunmap(mem->dmabuf, mem->kvaddr);
  446. #else
  447. dma_buf_vunmap(mem->dmabuf, &mem->dmabuf_map);
  448. #endif
  449. mem->kvaddr = NULL;
  450. dma_buf_end_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  451. }
  452. if (mem->dmabuf) {
  453. dma_heap_buffer_free(mem->dmabuf);
  454. mem->dmabuf = NULL;
  455. }
  456. return rc;
  457. };
  458. void *msm_memory_pool_alloc(struct msm_vidc_inst *inst, enum msm_memory_pool_type type)
  459. {
  460. struct msm_memory_alloc_header *hdr = NULL;
  461. struct msm_memory_pool *pool;
  462. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  463. d_vpr_e("%s: Invalid params\n", __func__);
  464. return NULL;
  465. }
  466. pool = &inst->pool[type];
  467. if (!list_empty(&pool->free_pool)) {
  468. /* get 1st node from free pool */
  469. hdr = list_first_entry(&pool->free_pool,
  470. struct msm_memory_alloc_header, list);
  471. list_del_init(&hdr->list);
  472. /* reset existing data */
  473. memset((char *)hdr->buf, 0, pool->size);
  474. /* add to busy pool */
  475. list_add_tail(&hdr->list, &pool->busy_pool);
  476. /* set busy flag to true. This is to catch double free request */
  477. hdr->busy = true;
  478. return hdr->buf;
  479. }
  480. if (msm_vidc_vmem_alloc(pool->size + sizeof(struct msm_memory_alloc_header),
  481. (void **)&hdr, __func__))
  482. return NULL;
  483. INIT_LIST_HEAD(&hdr->list);
  484. hdr->type = type;
  485. hdr->busy = true;
  486. hdr->buf = (void *)(hdr + 1);
  487. list_add_tail(&hdr->list, &pool->busy_pool);
  488. return hdr->buf;
  489. }
  490. void msm_memory_pool_free(struct msm_vidc_inst *inst, void *vidc_buf)
  491. {
  492. struct msm_memory_alloc_header *hdr;
  493. struct msm_memory_pool *pool;
  494. if (!inst || !vidc_buf) {
  495. d_vpr_e("%s: Invalid params\n", __func__);
  496. return;
  497. }
  498. hdr = (struct msm_memory_alloc_header *)vidc_buf - 1;
  499. /* sanitize buffer addr */
  500. if (hdr->buf != vidc_buf) {
  501. i_vpr_e(inst, "%s: invalid buf addr %#x\n", __func__, vidc_buf);
  502. return;
  503. }
  504. /* sanitize pool type */
  505. if (hdr->type < 0 || hdr->type >= MSM_MEM_POOL_MAX) {
  506. i_vpr_e(inst, "%s: invalid pool type %#x\n", __func__, hdr->type);
  507. return;
  508. }
  509. pool = &inst->pool[hdr->type];
  510. /* catch double-free request */
  511. if (!hdr->busy) {
  512. i_vpr_e(inst, "%s: double free request. type %s, addr %#x\n", __func__,
  513. pool->name, vidc_buf);
  514. return;
  515. }
  516. hdr->busy = false;
  517. /* remove from busy pool */
  518. list_del_init(&hdr->list);
  519. /* add to free pool */
  520. list_add_tail(&hdr->list, &pool->free_pool);
  521. }
  522. static void msm_vidc_destroy_pool_buffers(struct msm_vidc_inst *inst,
  523. enum msm_memory_pool_type type)
  524. {
  525. struct msm_memory_alloc_header *hdr, *dummy;
  526. struct msm_memory_pool *pool;
  527. u32 fcount = 0, bcount = 0;
  528. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  529. d_vpr_e("%s: Invalid params\n", __func__);
  530. return;
  531. }
  532. pool = &inst->pool[type];
  533. /* detect memleak: busy pool is expected to be empty here */
  534. if (!list_empty(&pool->busy_pool))
  535. i_vpr_e(inst, "%s: destroy request on active buffer. type %s\n",
  536. __func__, pool->name);
  537. /* destroy all free buffers */
  538. list_for_each_entry_safe(hdr, dummy, &pool->free_pool, list) {
  539. list_del(&hdr->list);
  540. msm_vidc_vmem_free((void **)&hdr);
  541. fcount++;
  542. }
  543. /* destroy all busy buffers */
  544. list_for_each_entry_safe(hdr, dummy, &pool->busy_pool, list) {
  545. list_del(&hdr->list);
  546. msm_vidc_vmem_free((void **)&hdr);
  547. bcount++;
  548. }
  549. i_vpr_h(inst, "%s: type: %23s, count: free %2u, busy %2u\n",
  550. __func__, pool->name, fcount, bcount);
  551. }
  552. void msm_memory_pools_deinit(struct msm_vidc_inst *inst)
  553. {
  554. u32 i = 0;
  555. if (!inst) {
  556. d_vpr_e("%s: Invalid params\n", __func__);
  557. return;
  558. }
  559. /* destroy all buffers from all pool types */
  560. for (i = 0; i < MSM_MEM_POOL_MAX; i++)
  561. msm_vidc_destroy_pool_buffers(inst, i);
  562. }
  563. struct msm_vidc_type_size_name {
  564. enum msm_memory_pool_type type;
  565. u32 size;
  566. char *name;
  567. };
  568. static const struct msm_vidc_type_size_name buftype_size_name_arr[] = {
  569. {MSM_MEM_POOL_BUFFER, sizeof(struct msm_vidc_buffer), "MSM_MEM_POOL_BUFFER" },
  570. {MSM_MEM_POOL_MAP, sizeof(struct msm_vidc_map), "MSM_MEM_POOL_MAP" },
  571. {MSM_MEM_POOL_ALLOC, sizeof(struct msm_vidc_alloc), "MSM_MEM_POOL_ALLOC" },
  572. {MSM_MEM_POOL_TIMESTAMP, sizeof(struct msm_vidc_timestamp), "MSM_MEM_POOL_TIMESTAMP" },
  573. {MSM_MEM_POOL_DMABUF, sizeof(struct msm_memory_dmabuf), "MSM_MEM_POOL_DMABUF" },
  574. {MSM_MEM_POOL_PACKET, sizeof(struct hfi_pending_packet) + MSM_MEM_POOL_PACKET_SIZE,
  575. "MSM_MEM_POOL_PACKET"},
  576. {MSM_MEM_POOL_BUF_TIMER, sizeof(struct msm_vidc_input_timer), "MSM_MEM_POOL_BUF_TIMER" },
  577. {MSM_MEM_POOL_BUF_STATS, sizeof(struct msm_vidc_buffer_stats), "MSM_MEM_POOL_BUF_STATS"},
  578. };
  579. int msm_memory_pools_init(struct msm_vidc_inst *inst)
  580. {
  581. u32 i;
  582. if (!inst) {
  583. d_vpr_e("%s: Invalid params\n", __func__);
  584. return -EINVAL;
  585. }
  586. if (ARRAY_SIZE(buftype_size_name_arr) != MSM_MEM_POOL_MAX) {
  587. i_vpr_e(inst, "%s: num elements mismatch %lu %u\n", __func__,
  588. ARRAY_SIZE(buftype_size_name_arr), MSM_MEM_POOL_MAX);
  589. return -EINVAL;
  590. }
  591. for (i = 0; i < MSM_MEM_POOL_MAX; i++) {
  592. if (i != buftype_size_name_arr[i].type) {
  593. i_vpr_e(inst, "%s: type mismatch %u %u\n", __func__,
  594. i, buftype_size_name_arr[i].type);
  595. return -EINVAL;
  596. }
  597. inst->pool[i].size = buftype_size_name_arr[i].size;
  598. inst->pool[i].name = buftype_size_name_arr[i].name;
  599. INIT_LIST_HEAD(&inst->pool[i].free_pool);
  600. INIT_LIST_HEAD(&inst->pool[i].busy_pool);
  601. }
  602. return 0;
  603. }
  604. /*
  605. int msm_memory_cache_operations(struct msm_vidc_inst *inst,
  606. struct dma_buf *dbuf, enum smem_cache_ops cache_op,
  607. unsigned long offset, unsigned long size, u32 sid)
  608. {
  609. int rc = 0;
  610. unsigned long flags = 0;
  611. if (!inst) {
  612. d_vpr_e("%s: invalid parameters\n", __func__);
  613. return -EINVAL;
  614. }
  615. if (!dbuf) {
  616. i_vpr_e(inst, "%s: invalid params\n", __func__);
  617. return -EINVAL;
  618. }
  619. rc = dma_buf_get_flags(dbuf, &flags);
  620. if (rc) {
  621. i_vpr_e(inst, "%s: dma_buf_get_flags failed, err %d\n",
  622. __func__, rc);
  623. return rc;
  624. } else if (!(flags & ION_FLAG_CACHED)) {
  625. return rc;
  626. }
  627. switch (cache_op) {
  628. case SMEM_CACHE_CLEAN:
  629. case SMEM_CACHE_CLEAN_INVALIDATE:
  630. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  631. offset, size);
  632. if (rc)
  633. break;
  634. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  635. offset, size);
  636. break;
  637. case SMEM_CACHE_INVALIDATE:
  638. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  639. offset, size);
  640. if (rc)
  641. break;
  642. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
  643. offset, size);
  644. break;
  645. default:
  646. i_vpr_e(inst, "%s: cache (%d) operation not supported\n",
  647. __func__, cache_op);
  648. rc = -EINVAL;
  649. break;
  650. }
  651. return rc;
  652. }
  653. int msm_smem_memory_prefetch(struct msm_vidc_inst *inst)
  654. {
  655. int i, rc = 0;
  656. struct memory_regions *vidc_regions = NULL;
  657. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  658. if (!inst) {
  659. d_vpr_e("%s: invalid parameters\n", __func__);
  660. return -EINVAL;
  661. }
  662. vidc_regions = &inst->regions;
  663. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  664. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  665. __func__, vidc_regions->num_regions,
  666. MEMORY_REGIONS_MAX);
  667. return -EINVAL;
  668. }
  669. memset(ion_region, 0, sizeof(ion_region));
  670. for (i = 0; i < vidc_regions->num_regions; i++) {
  671. ion_region[i].size = vidc_regions->region[i].size;
  672. ion_region[i].vmid = vidc_regions->region[i].vmid;
  673. }
  674. rc = msm_ion_heap_prefetch(ION_SECURE_HEAP_ID, ion_region,
  675. vidc_regions->num_regions);
  676. if (rc)
  677. i_vpr_e(inst, "%s: prefetch failed, ret: %d\n",
  678. __func__, rc);
  679. else
  680. i_vpr_l(inst, "%s: prefetch succeeded\n", __func__);
  681. return rc;
  682. }
  683. int msm_smem_memory_drain(struct msm_vidc_inst *inst)
  684. {
  685. int i, rc = 0;
  686. struct memory_regions *vidc_regions = NULL;
  687. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  688. if (!inst) {
  689. d_vpr_e("%s: invalid parameters\n", __func__);
  690. return -EINVAL;
  691. }
  692. vidc_regions = &inst->regions;
  693. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  694. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  695. __func__, vidc_regions->num_regions,
  696. MEMORY_REGIONS_MAX);
  697. return -EINVAL;
  698. }
  699. memset(ion_region, 0, sizeof(ion_region));
  700. for (i = 0; i < vidc_regions->num_regions; i++) {
  701. ion_region[i].size = vidc_regions->region[i].size;
  702. ion_region[i].vmid = vidc_regions->region[i].vmid;
  703. }
  704. rc = msm_ion_heap_drain(ION_SECURE_HEAP_ID, ion_region,
  705. vidc_regions->num_regions);
  706. if (rc)
  707. i_vpr_e(inst, "%s: drain failed, ret: %d\n", __func__, rc);
  708. else
  709. i_vpr_l(inst, "%s: drain succeeded\n", __func__);
  710. return rc;
  711. }
  712. */