msm_vidc_memory.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-heap.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/qcom-dma-mapping.h>
  9. #include <linux/mem-buf.h>
  10. #include <soc/qcom/secure_buffer.h>
  11. #include "msm_vidc_memory.h"
  12. #include "msm_vidc_debug.h"
  13. #include "msm_vidc_internal.h"
  14. #include "msm_vidc_driver.h"
  15. #include "msm_vidc_core.h"
  16. #include "msm_vidc_events.h"
  17. #include "venus_hfi.h"
  18. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0))
  19. MODULE_IMPORT_NS(DMA_BUF);
  20. #endif
  21. struct msm_vidc_buf_region_name {
  22. enum msm_vidc_buffer_region region;
  23. char *name;
  24. };
  25. struct context_bank_info *msm_vidc_get_context_bank(struct msm_vidc_core *core,
  26. enum msm_vidc_buffer_region region)
  27. {
  28. const char *name;
  29. struct context_bank_info *cb = NULL, *match = NULL;
  30. static const struct msm_vidc_buf_region_name buf_region_name[] = {
  31. {MSM_VIDC_REGION_NONE, "none" },
  32. {MSM_VIDC_NON_SECURE, "venus_ns" },
  33. {MSM_VIDC_NON_SECURE_PIXEL, "venus_ns_pixel" },
  34. {MSM_VIDC_SECURE_PIXEL, "venus_sec_pixel" },
  35. {MSM_VIDC_SECURE_NONPIXEL, "venus_sec_non_pixel" },
  36. {MSM_VIDC_SECURE_BITSTREAM, "venus_sec_bitstream" },
  37. };
  38. if (!region || region > ARRAY_SIZE(buf_region_name))
  39. goto exit;
  40. if (buf_region_name[region].region != region)
  41. goto exit;
  42. name = buf_region_name[region].name;
  43. list_for_each_entry(cb, &core->dt->context_banks, list) {
  44. if (!strcmp(cb->name, name)) {
  45. match = cb;
  46. break;
  47. }
  48. }
  49. if (!match)
  50. d_vpr_e("cb not found for region %#x\n", region);
  51. return match;
  52. exit:
  53. d_vpr_e("Invalid region %#x\n", region);
  54. return NULL;
  55. }
  56. struct dma_buf *msm_vidc_memory_get_dmabuf(struct msm_vidc_inst *inst, int fd)
  57. {
  58. struct msm_memory_dmabuf *buf = NULL;
  59. struct dma_buf *dmabuf = NULL;
  60. bool found = false;
  61. if (!inst) {
  62. d_vpr_e("%s: invalid params\n", __func__);
  63. return NULL;
  64. }
  65. /* get local dmabuf ref for tracking */
  66. dmabuf = dma_buf_get(fd);
  67. if (IS_ERR_OR_NULL(dmabuf)) {
  68. d_vpr_e("Failed to get dmabuf for %d, error %ld\n",
  69. fd, PTR_ERR(dmabuf));
  70. return NULL;
  71. }
  72. /* track dmabuf - inc refcount if already present */
  73. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  74. if (buf->dmabuf == dmabuf) {
  75. buf->refcount++;
  76. found = true;
  77. break;
  78. }
  79. }
  80. if (found) {
  81. /* put local dmabuf ref */
  82. dma_buf_put(dmabuf);
  83. return dmabuf;
  84. }
  85. /* get tracker instance from pool */
  86. buf = msm_memory_pool_alloc(inst, MSM_MEM_POOL_DMABUF);
  87. if (!buf) {
  88. i_vpr_e(inst, "%s: dmabuf alloc failed\n", __func__);
  89. dma_buf_put(dmabuf);
  90. return NULL;
  91. }
  92. /* hold dmabuf strong ref in tracker */
  93. buf->dmabuf = dmabuf;
  94. buf->refcount = 1;
  95. INIT_LIST_HEAD(&buf->list);
  96. /* add new dmabuf entry to tracker */
  97. list_add_tail(&buf->list, &inst->dmabuf_tracker);
  98. return dmabuf;
  99. }
  100. void msm_vidc_memory_put_dmabuf(struct msm_vidc_inst *inst, struct dma_buf *dmabuf)
  101. {
  102. struct msm_memory_dmabuf *buf = NULL;
  103. bool found = false;
  104. if (!inst || !dmabuf) {
  105. d_vpr_e("%s: invalid params\n", __func__);
  106. return;
  107. }
  108. /* track dmabuf - dec refcount if already present */
  109. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  110. if (buf->dmabuf == dmabuf) {
  111. buf->refcount--;
  112. found = true;
  113. break;
  114. }
  115. }
  116. if (!found) {
  117. i_vpr_e(inst, "%s: invalid dmabuf %#x\n", __func__, dmabuf);
  118. return;
  119. }
  120. /* non-zero refcount - do nothing */
  121. if (buf->refcount)
  122. return;
  123. /* remove dmabuf entry from tracker */
  124. list_del(&buf->list);
  125. /* release dmabuf strong ref from tracker */
  126. dma_buf_put(buf->dmabuf);
  127. /* put tracker instance back to pool */
  128. msm_memory_pool_free(inst, buf);
  129. }
  130. void msm_vidc_memory_put_dmabuf_completely(struct msm_vidc_inst *inst,
  131. struct msm_memory_dmabuf *buf)
  132. {
  133. if (!inst || !buf) {
  134. d_vpr_e("%s: invalid params\n", __func__);
  135. return;
  136. }
  137. while (buf->refcount) {
  138. buf->refcount--;
  139. if (!buf->refcount) {
  140. /* remove dmabuf entry from tracker */
  141. list_del(&buf->list);
  142. /* release dmabuf strong ref from tracker */
  143. dma_buf_put(buf->dmabuf);
  144. /* put tracker instance back to pool */
  145. msm_memory_pool_free(inst, buf);
  146. break;
  147. }
  148. }
  149. }
  150. static bool is_non_secure_buffer(struct dma_buf *dmabuf)
  151. {
  152. return mem_buf_dma_buf_exclusive_owner(dmabuf);
  153. }
  154. int msm_vidc_memory_map(struct msm_vidc_core *core, struct msm_vidc_map *map)
  155. {
  156. int rc = 0;
  157. struct dma_buf_attachment *attach = NULL;
  158. struct sg_table *table = NULL;
  159. struct context_bank_info *cb = NULL;
  160. if (!core || !map) {
  161. d_vpr_e("%s: invalid params\n", __func__);
  162. return -EINVAL;
  163. }
  164. if (map->refcount) {
  165. map->refcount++;
  166. goto exit;
  167. }
  168. /* reject non-secure mapping request for a secure buffer(or vice versa) */
  169. if (map->region == MSM_VIDC_NON_SECURE || map->region == MSM_VIDC_NON_SECURE_PIXEL) {
  170. if (!is_non_secure_buffer(map->dmabuf)) {
  171. d_vpr_e("%s: secure buffer mapping to non-secure region %d not allowed\n",
  172. __func__, map->region);
  173. return -EINVAL;
  174. }
  175. } else {
  176. if (is_non_secure_buffer(map->dmabuf)) {
  177. d_vpr_e("%s: non-secure buffer mapping to secure region %d not allowed\n",
  178. __func__, map->region);
  179. return -EINVAL;
  180. }
  181. }
  182. cb = msm_vidc_get_context_bank(core, map->region);
  183. if (!cb) {
  184. d_vpr_e("%s: Failed to get context bank device\n",
  185. __func__);
  186. rc = -EIO;
  187. goto error_cb;
  188. }
  189. /* Prepare a dma buf for dma on the given device */
  190. attach = dma_buf_attach(map->dmabuf, cb->dev);
  191. if (IS_ERR_OR_NULL(attach)) {
  192. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -ENOMEM;
  193. d_vpr_e("Failed to attach dmabuf\n");
  194. goto error_attach;
  195. }
  196. if (!map->skip_delayed_unmap) {
  197. /*
  198. * Get the scatterlist for the given attachment
  199. * Mapping of sg is taken care by map attachment
  200. */
  201. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  202. }
  203. /*
  204. * We do not need dma_map function to perform cache operations
  205. * on the whole buffer size and hence pass skip sync flag.
  206. * We do the required cache operations separately for the
  207. * required buffer size
  208. */
  209. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  210. if (core->dt->sys_cache_present)
  211. attach->dma_map_attrs |=
  212. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  213. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  214. if (IS_ERR_OR_NULL(table)) {
  215. rc = PTR_ERR(table) ? PTR_ERR(table) : -ENOMEM;
  216. d_vpr_e("Failed to map table\n");
  217. goto error_table;
  218. }
  219. if (!table->sgl) {
  220. d_vpr_e("sgl is NULL\n");
  221. rc = -ENOMEM;
  222. goto error_sg;
  223. }
  224. map->device_addr = table->sgl->dma_address;
  225. map->table = table;
  226. map->attach = attach;
  227. map->refcount++;
  228. exit:
  229. d_vpr_l(
  230. "%s: type %11s, device_addr %#llx, refcount %d, region %d\n",
  231. __func__, buf_name(map->type), map->device_addr, map->refcount, map->region);
  232. return 0;
  233. error_sg:
  234. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  235. error_table:
  236. dma_buf_detach(map->dmabuf, attach);
  237. error_attach:
  238. error_cb:
  239. return rc;
  240. }
  241. int msm_vidc_memory_unmap(struct msm_vidc_core *core,
  242. struct msm_vidc_map *map)
  243. {
  244. int rc = 0;
  245. if (!core || !map) {
  246. d_vpr_e("%s: invalid params\n", __func__);
  247. return -EINVAL;
  248. }
  249. if (map->refcount) {
  250. map->refcount--;
  251. } else {
  252. d_vpr_e("unmap called while refcount is zero already\n");
  253. return -EINVAL;
  254. }
  255. d_vpr_l(
  256. "%s: type %11s, device_addr %#llx, refcount %d, region %d\n",
  257. __func__, buf_name(map->type), map->device_addr, map->refcount, map->region);
  258. if (map->refcount)
  259. goto exit;
  260. dma_buf_unmap_attachment(map->attach, map->table, DMA_BIDIRECTIONAL);
  261. dma_buf_detach(map->dmabuf, map->attach);
  262. map->device_addr = 0x0;
  263. map->attach = NULL;
  264. map->table = NULL;
  265. exit:
  266. return rc;
  267. }
  268. struct dma_buf_attachment *msm_vidc_dma_buf_attach(struct dma_buf *dbuf,
  269. struct device *dev)
  270. {
  271. int rc = 0;
  272. struct dma_buf_attachment *attach = NULL;
  273. if (!dbuf || !dev) {
  274. d_vpr_e("%s: invalid params\n", __func__);
  275. return NULL;
  276. }
  277. attach = dma_buf_attach(dbuf, dev);
  278. if (IS_ERR_OR_NULL(attach)) {
  279. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -1;
  280. d_vpr_e("Failed to attach dmabuf, error %d\n", rc);
  281. return NULL;;
  282. }
  283. return attach;
  284. }
  285. int msm_vidc_dma_buf_detach(struct dma_buf *dbuf,
  286. struct dma_buf_attachment *attach)
  287. {
  288. int rc = 0;
  289. if (!dbuf || !attach) {
  290. d_vpr_e("%s: invalid params\n", __func__);
  291. return -EINVAL;
  292. }
  293. dma_buf_detach(dbuf, attach);
  294. return rc;
  295. }
  296. struct sg_table *msm_vidc_dma_buf_map_attachment(
  297. struct dma_buf_attachment *attach)
  298. {
  299. int rc = 0;
  300. struct sg_table *table = NULL;
  301. if (!attach) {
  302. d_vpr_e("%s: invalid params\n", __func__);
  303. return NULL;
  304. }
  305. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  306. if (IS_ERR_OR_NULL(table)) {
  307. rc = PTR_ERR(table) ? PTR_ERR(table) : -1;
  308. d_vpr_e("Failed to map table, error %d\n", rc);
  309. return NULL;
  310. }
  311. return table;
  312. }
  313. int msm_vidc_dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
  314. struct sg_table *table)
  315. {
  316. int rc = 0;
  317. if (!attach || !table) {
  318. d_vpr_e("%s: invalid params\n", __func__);
  319. return -EINVAL;
  320. }
  321. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  322. return rc;
  323. }
  324. int msm_vidc_vmem_alloc(unsigned long size, void **mem, const char *msg)
  325. {
  326. int rc = 0;
  327. if (*mem) {
  328. d_vpr_e("%s: error: double alloc\n", msg);
  329. rc = -EINVAL;
  330. }
  331. *mem = vzalloc(size);
  332. if (!*mem) {
  333. d_vpr_e("allocation failed for %s\n", msg);
  334. rc = -ENOMEM;
  335. }
  336. return rc;
  337. }
  338. void msm_vidc_vmem_free(void **addr)
  339. {
  340. if (addr && *addr) {
  341. vfree(*addr);
  342. *addr = NULL;
  343. }
  344. }
  345. int msm_vidc_memory_alloc(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  346. {
  347. int rc = 0;
  348. int size = 0;
  349. struct dma_heap *heap;
  350. char *heap_name = NULL;
  351. struct mem_buf_lend_kernel_arg lend_arg;
  352. int vmids[1];
  353. int perms[1];
  354. if (!mem) {
  355. d_vpr_e("%s: invalid params\n", __func__);
  356. return -EINVAL;
  357. }
  358. size = ALIGN(mem->size, SZ_4K);
  359. if (mem->secure) {
  360. switch (mem->region) {
  361. case MSM_VIDC_SECURE_PIXEL:
  362. heap_name = "qcom,secure-pixel";
  363. break;
  364. case MSM_VIDC_SECURE_NONPIXEL:
  365. heap_name = "qcom,secure-non-pixel";
  366. break;
  367. case MSM_VIDC_SECURE_BITSTREAM:
  368. heap_name = "qcom,system";
  369. break;
  370. default:
  371. d_vpr_e("invalid secure region : %#x\n", mem->region);
  372. return -EINVAL;
  373. }
  374. } else {
  375. heap_name = "qcom,system";
  376. }
  377. heap = dma_heap_find(heap_name);
  378. mem->dmabuf = dma_heap_buffer_alloc(heap, size, 0, 0);
  379. if (IS_ERR_OR_NULL(mem->dmabuf)) {
  380. d_vpr_e("%s: dma heap %s alloc failed\n", __func__, heap_name);
  381. mem->dmabuf = NULL;
  382. rc = -ENOMEM;
  383. goto error;
  384. }
  385. if (mem->secure && mem->type == MSM_VIDC_BUF_BIN)
  386. {
  387. vmids[0] = VMID_CP_BITSTREAM;
  388. perms[0] = PERM_READ | PERM_WRITE;
  389. lend_arg.nr_acl_entries = ARRAY_SIZE(vmids);
  390. lend_arg.vmids = vmids;
  391. lend_arg.perms = perms;
  392. rc = mem_buf_lend(mem->dmabuf, &lend_arg);
  393. if (rc) {
  394. d_vpr_e("%s: BIN dmabuf %pK LEND failed, rc %d heap %s\n",
  395. __func__, mem->dmabuf, rc, heap_name);
  396. goto error;
  397. }
  398. }
  399. if (mem->map_kernel) {
  400. dma_buf_begin_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  401. /*
  402. * Waipio uses Kernel version 5.10.x,
  403. * Kalama uses Kernel Version 5.15.x,
  404. * Pineapple uses Kernel Version 5.18.x
  405. */
  406. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0))
  407. mem->kvaddr = dma_buf_vmap(mem->dmabuf);
  408. if (!mem->kvaddr) {
  409. d_vpr_e("%s: kernel map failed\n", __func__);
  410. rc = -EIO;
  411. goto error;
  412. }
  413. #elif (LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0))
  414. rc = dma_buf_vmap(mem->dmabuf, &mem->dmabuf_map);
  415. if (rc) {
  416. d_vpr_e("%s: kernel map failed\n", __func__);
  417. rc = -EIO;
  418. goto error;
  419. }
  420. mem->kvaddr = mem->dmabuf_map.vaddr;
  421. #else
  422. rc = dma_buf_vmap(mem->dmabuf, &mem->dmabuf_map);
  423. if (rc) {
  424. d_vpr_e("%s: kernel map failed\n", __func__);
  425. rc = -EIO;
  426. goto error;
  427. }
  428. mem->kvaddr = mem->dmabuf_map.vaddr;
  429. #endif
  430. }
  431. d_vpr_h(
  432. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  433. __func__, mem->dmabuf, mem->size, mem->kvaddr, buf_name(mem->type),
  434. mem->secure, mem->region);
  435. trace_msm_vidc_dma_buffer("ALLOC", mem->dmabuf, mem->size, mem->kvaddr,
  436. buf_name(mem->type), mem->secure, mem->region);
  437. return 0;
  438. error:
  439. msm_vidc_memory_free(core, mem);
  440. return rc;
  441. }
  442. int msm_vidc_memory_free(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
  443. {
  444. int rc = 0;
  445. if (!mem || !mem->dmabuf) {
  446. d_vpr_e("%s: invalid params\n", __func__);
  447. return -EINVAL;
  448. }
  449. d_vpr_h(
  450. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  451. __func__, mem->dmabuf, mem->size, mem->kvaddr, buf_name(mem->type),
  452. mem->secure, mem->region);
  453. trace_msm_vidc_dma_buffer("FREE", mem->dmabuf, mem->size, mem->kvaddr,
  454. buf_name(mem->type), mem->secure, mem->region);
  455. if (mem->kvaddr) {
  456. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0))
  457. dma_buf_vunmap(mem->dmabuf, mem->kvaddr);
  458. #else
  459. dma_buf_vunmap(mem->dmabuf, &mem->dmabuf_map);
  460. #endif
  461. mem->kvaddr = NULL;
  462. dma_buf_end_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
  463. }
  464. if (mem->dmabuf) {
  465. dma_heap_buffer_free(mem->dmabuf);
  466. mem->dmabuf = NULL;
  467. }
  468. return rc;
  469. };
  470. void *msm_memory_pool_alloc(struct msm_vidc_inst *inst, enum msm_memory_pool_type type)
  471. {
  472. struct msm_memory_alloc_header *hdr = NULL;
  473. struct msm_memory_pool *pool;
  474. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  475. d_vpr_e("%s: Invalid params\n", __func__);
  476. return NULL;
  477. }
  478. pool = &inst->pool[type];
  479. if (!list_empty(&pool->free_pool)) {
  480. /* get 1st node from free pool */
  481. hdr = list_first_entry(&pool->free_pool,
  482. struct msm_memory_alloc_header, list);
  483. list_del_init(&hdr->list);
  484. /* reset existing data */
  485. memset((char *)hdr->buf, 0, pool->size);
  486. /* add to busy pool */
  487. list_add_tail(&hdr->list, &pool->busy_pool);
  488. /* set busy flag to true. This is to catch double free request */
  489. hdr->busy = true;
  490. return hdr->buf;
  491. }
  492. if (msm_vidc_vmem_alloc(pool->size + sizeof(struct msm_memory_alloc_header),
  493. (void **)&hdr, __func__))
  494. return NULL;
  495. INIT_LIST_HEAD(&hdr->list);
  496. hdr->type = type;
  497. hdr->busy = true;
  498. hdr->buf = (void *)(hdr + 1);
  499. list_add_tail(&hdr->list, &pool->busy_pool);
  500. return hdr->buf;
  501. }
  502. void msm_memory_pool_free(struct msm_vidc_inst *inst, void *vidc_buf)
  503. {
  504. struct msm_memory_alloc_header *hdr;
  505. struct msm_memory_pool *pool;
  506. if (!inst || !vidc_buf) {
  507. d_vpr_e("%s: Invalid params\n", __func__);
  508. return;
  509. }
  510. hdr = (struct msm_memory_alloc_header *)vidc_buf - 1;
  511. /* sanitize buffer addr */
  512. if (hdr->buf != vidc_buf) {
  513. i_vpr_e(inst, "%s: invalid buf addr %#x\n", __func__, vidc_buf);
  514. return;
  515. }
  516. /* sanitize pool type */
  517. if (hdr->type < 0 || hdr->type >= MSM_MEM_POOL_MAX) {
  518. i_vpr_e(inst, "%s: invalid pool type %#x\n", __func__, hdr->type);
  519. return;
  520. }
  521. pool = &inst->pool[hdr->type];
  522. /* catch double-free request */
  523. if (!hdr->busy) {
  524. i_vpr_e(inst, "%s: double free request. type %s, addr %#x\n", __func__,
  525. pool->name, vidc_buf);
  526. return;
  527. }
  528. hdr->busy = false;
  529. /* remove from busy pool */
  530. list_del_init(&hdr->list);
  531. /* add to free pool */
  532. list_add_tail(&hdr->list, &pool->free_pool);
  533. }
  534. static void msm_vidc_destroy_pool_buffers(struct msm_vidc_inst *inst,
  535. enum msm_memory_pool_type type)
  536. {
  537. struct msm_memory_alloc_header *hdr, *dummy;
  538. struct msm_memory_pool *pool;
  539. u32 fcount = 0, bcount = 0;
  540. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  541. d_vpr_e("%s: Invalid params\n", __func__);
  542. return;
  543. }
  544. pool = &inst->pool[type];
  545. /* detect memleak: busy pool is expected to be empty here */
  546. if (!list_empty(&pool->busy_pool))
  547. i_vpr_e(inst, "%s: destroy request on active buffer. type %s\n",
  548. __func__, pool->name);
  549. /* destroy all free buffers */
  550. list_for_each_entry_safe(hdr, dummy, &pool->free_pool, list) {
  551. list_del(&hdr->list);
  552. msm_vidc_vmem_free((void **)&hdr);
  553. fcount++;
  554. }
  555. /* destroy all busy buffers */
  556. list_for_each_entry_safe(hdr, dummy, &pool->busy_pool, list) {
  557. list_del(&hdr->list);
  558. msm_vidc_vmem_free((void **)&hdr);
  559. bcount++;
  560. }
  561. i_vpr_h(inst, "%s: type: %23s, count: free %2u, busy %2u\n",
  562. __func__, pool->name, fcount, bcount);
  563. }
  564. void msm_memory_pools_deinit(struct msm_vidc_inst *inst)
  565. {
  566. u32 i = 0;
  567. if (!inst) {
  568. d_vpr_e("%s: Invalid params\n", __func__);
  569. return;
  570. }
  571. /* destroy all buffers from all pool types */
  572. for (i = 0; i < MSM_MEM_POOL_MAX; i++)
  573. msm_vidc_destroy_pool_buffers(inst, i);
  574. }
  575. struct msm_vidc_type_size_name {
  576. enum msm_memory_pool_type type;
  577. u32 size;
  578. char *name;
  579. };
  580. static const struct msm_vidc_type_size_name buftype_size_name_arr[] = {
  581. {MSM_MEM_POOL_BUFFER, sizeof(struct msm_vidc_buffer), "MSM_MEM_POOL_BUFFER" },
  582. {MSM_MEM_POOL_MAP, sizeof(struct msm_vidc_map), "MSM_MEM_POOL_MAP" },
  583. {MSM_MEM_POOL_ALLOC, sizeof(struct msm_vidc_alloc), "MSM_MEM_POOL_ALLOC" },
  584. {MSM_MEM_POOL_TIMESTAMP, sizeof(struct msm_vidc_timestamp), "MSM_MEM_POOL_TIMESTAMP" },
  585. {MSM_MEM_POOL_DMABUF, sizeof(struct msm_memory_dmabuf), "MSM_MEM_POOL_DMABUF" },
  586. {MSM_MEM_POOL_PACKET, sizeof(struct hfi_pending_packet) + MSM_MEM_POOL_PACKET_SIZE,
  587. "MSM_MEM_POOL_PACKET"},
  588. {MSM_MEM_POOL_BUF_TIMER, sizeof(struct msm_vidc_input_timer), "MSM_MEM_POOL_BUF_TIMER" },
  589. {MSM_MEM_POOL_BUF_STATS, sizeof(struct msm_vidc_buffer_stats), "MSM_MEM_POOL_BUF_STATS"},
  590. };
  591. int msm_memory_pools_init(struct msm_vidc_inst *inst)
  592. {
  593. u32 i;
  594. if (!inst) {
  595. d_vpr_e("%s: Invalid params\n", __func__);
  596. return -EINVAL;
  597. }
  598. if (ARRAY_SIZE(buftype_size_name_arr) != MSM_MEM_POOL_MAX) {
  599. i_vpr_e(inst, "%s: num elements mismatch %lu %u\n", __func__,
  600. ARRAY_SIZE(buftype_size_name_arr), MSM_MEM_POOL_MAX);
  601. return -EINVAL;
  602. }
  603. for (i = 0; i < MSM_MEM_POOL_MAX; i++) {
  604. if (i != buftype_size_name_arr[i].type) {
  605. i_vpr_e(inst, "%s: type mismatch %u %u\n", __func__,
  606. i, buftype_size_name_arr[i].type);
  607. return -EINVAL;
  608. }
  609. inst->pool[i].size = buftype_size_name_arr[i].size;
  610. inst->pool[i].name = buftype_size_name_arr[i].name;
  611. INIT_LIST_HEAD(&inst->pool[i].free_pool);
  612. INIT_LIST_HEAD(&inst->pool[i].busy_pool);
  613. }
  614. return 0;
  615. }
  616. /*
  617. int msm_memory_cache_operations(struct msm_vidc_inst *inst,
  618. struct dma_buf *dbuf, enum smem_cache_ops cache_op,
  619. unsigned long offset, unsigned long size, u32 sid)
  620. {
  621. int rc = 0;
  622. unsigned long flags = 0;
  623. if (!inst) {
  624. d_vpr_e("%s: invalid parameters\n", __func__);
  625. return -EINVAL;
  626. }
  627. if (!dbuf) {
  628. i_vpr_e(inst, "%s: invalid params\n", __func__);
  629. return -EINVAL;
  630. }
  631. rc = dma_buf_get_flags(dbuf, &flags);
  632. if (rc) {
  633. i_vpr_e(inst, "%s: dma_buf_get_flags failed, err %d\n",
  634. __func__, rc);
  635. return rc;
  636. } else if (!(flags & ION_FLAG_CACHED)) {
  637. return rc;
  638. }
  639. switch (cache_op) {
  640. case SMEM_CACHE_CLEAN:
  641. case SMEM_CACHE_CLEAN_INVALIDATE:
  642. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  643. offset, size);
  644. if (rc)
  645. break;
  646. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  647. offset, size);
  648. break;
  649. case SMEM_CACHE_INVALIDATE:
  650. rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
  651. offset, size);
  652. if (rc)
  653. break;
  654. rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
  655. offset, size);
  656. break;
  657. default:
  658. i_vpr_e(inst, "%s: cache (%d) operation not supported\n",
  659. __func__, cache_op);
  660. rc = -EINVAL;
  661. break;
  662. }
  663. return rc;
  664. }
  665. int msm_smem_memory_prefetch(struct msm_vidc_inst *inst)
  666. {
  667. int i, rc = 0;
  668. struct memory_regions *vidc_regions = NULL;
  669. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  670. if (!inst) {
  671. d_vpr_e("%s: invalid parameters\n", __func__);
  672. return -EINVAL;
  673. }
  674. vidc_regions = &inst->regions;
  675. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  676. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  677. __func__, vidc_regions->num_regions,
  678. MEMORY_REGIONS_MAX);
  679. return -EINVAL;
  680. }
  681. memset(ion_region, 0, sizeof(ion_region));
  682. for (i = 0; i < vidc_regions->num_regions; i++) {
  683. ion_region[i].size = vidc_regions->region[i].size;
  684. ion_region[i].vmid = vidc_regions->region[i].vmid;
  685. }
  686. rc = msm_ion_heap_prefetch(ION_SECURE_HEAP_ID, ion_region,
  687. vidc_regions->num_regions);
  688. if (rc)
  689. i_vpr_e(inst, "%s: prefetch failed, ret: %d\n",
  690. __func__, rc);
  691. else
  692. i_vpr_l(inst, "%s: prefetch succeeded\n", __func__);
  693. return rc;
  694. }
  695. int msm_smem_memory_drain(struct msm_vidc_inst *inst)
  696. {
  697. int i, rc = 0;
  698. struct memory_regions *vidc_regions = NULL;
  699. struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
  700. if (!inst) {
  701. d_vpr_e("%s: invalid parameters\n", __func__);
  702. return -EINVAL;
  703. }
  704. vidc_regions = &inst->regions;
  705. if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
  706. i_vpr_e(inst, "%s: invalid num_regions %d, max %d\n",
  707. __func__, vidc_regions->num_regions,
  708. MEMORY_REGIONS_MAX);
  709. return -EINVAL;
  710. }
  711. memset(ion_region, 0, sizeof(ion_region));
  712. for (i = 0; i < vidc_regions->num_regions; i++) {
  713. ion_region[i].size = vidc_regions->region[i].size;
  714. ion_region[i].vmid = vidc_regions->region[i].vmid;
  715. }
  716. rc = msm_ion_heap_drain(ION_SECURE_HEAP_ID, ion_region,
  717. vidc_regions->num_regions);
  718. if (rc)
  719. i_vpr_e(inst, "%s: drain failed, ret: %d\n", __func__, rc);
  720. else
  721. i_vpr_l(inst, "%s: drain succeeded\n", __func__);
  722. return rc;
  723. }
  724. */