msm_vidc_memory.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-heap.h>
  7. #include <linux/dma-mapping.h>
  8. #include "msm_vidc_memory.h"
  9. #include "msm_vidc_debug.h"
  10. #include "msm_vidc_internal.h"
  11. #include "msm_vidc_driver.h"
  12. #include "msm_vidc_core.h"
  13. #include "msm_vidc_events.h"
  14. #include "msm_vidc_platform.h"
  15. #include "venus_hfi.h"
  16. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0))
  17. MODULE_IMPORT_NS(DMA_BUF);
  18. #endif
  19. struct msm_vidc_type_size_name {
  20. enum msm_memory_pool_type type;
  21. u32 size;
  22. char *name;
  23. };
  24. static const struct msm_vidc_type_size_name buftype_size_name_arr[] = {
  25. {MSM_MEM_POOL_BUFFER, sizeof(struct msm_vidc_buffer), "MSM_MEM_POOL_BUFFER" },
  26. {MSM_MEM_POOL_ALLOC_MAP, sizeof(struct msm_vidc_mem), "MSM_MEM_POOL_ALLOC_MAP" },
  27. {MSM_MEM_POOL_TIMESTAMP, sizeof(struct msm_vidc_timestamp), "MSM_MEM_POOL_TIMESTAMP" },
  28. {MSM_MEM_POOL_DMABUF, sizeof(struct msm_memory_dmabuf), "MSM_MEM_POOL_DMABUF" },
  29. {MSM_MEM_POOL_PACKET, sizeof(struct hfi_pending_packet) + MSM_MEM_POOL_PACKET_SIZE,
  30. "MSM_MEM_POOL_PACKET"},
  31. {MSM_MEM_POOL_BUF_TIMER, sizeof(struct msm_vidc_input_timer), "MSM_MEM_POOL_BUF_TIMER" },
  32. {MSM_MEM_POOL_BUF_STATS, sizeof(struct msm_vidc_buffer_stats), "MSM_MEM_POOL_BUF_STATS"},
  33. };
  34. int msm_vidc_vmem_alloc(unsigned long size, void **mem, const char *msg)
  35. {
  36. int rc = 0;
  37. if (*mem) {
  38. d_vpr_e("%s: error: double alloc\n", msg);
  39. rc = -EINVAL;
  40. }
  41. *mem = vzalloc(size);
  42. if (!*mem) {
  43. d_vpr_e("allocation failed for %s\n", msg);
  44. rc = -ENOMEM;
  45. }
  46. return rc;
  47. }
  48. void msm_vidc_vmem_free(void **addr)
  49. {
  50. if (addr && *addr) {
  51. vfree(*addr);
  52. *addr = NULL;
  53. }
  54. }
  55. void *msm_vidc_pool_alloc(struct msm_vidc_inst *inst, enum msm_memory_pool_type type)
  56. {
  57. struct msm_memory_alloc_header *hdr = NULL;
  58. struct msm_memory_pool *pool;
  59. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  60. d_vpr_e("%s: Invalid params\n", __func__);
  61. return NULL;
  62. }
  63. pool = &inst->pool[type];
  64. if (!list_empty(&pool->free_pool)) {
  65. /* get 1st node from free pool */
  66. hdr = list_first_entry(&pool->free_pool,
  67. struct msm_memory_alloc_header, list);
  68. /* move node from free pool to busy pool */
  69. list_move_tail(&hdr->list, &pool->busy_pool);
  70. /* reset existing data */
  71. memset((char *)hdr->buf, 0, pool->size);
  72. /* set busy flag to true. This is to catch double free request */
  73. hdr->busy = true;
  74. return hdr->buf;
  75. }
  76. if (msm_vidc_vmem_alloc(pool->size + sizeof(struct msm_memory_alloc_header),
  77. (void **)&hdr, __func__))
  78. return NULL;
  79. INIT_LIST_HEAD(&hdr->list);
  80. hdr->type = type;
  81. hdr->busy = true;
  82. hdr->buf = (void *)(hdr + 1);
  83. list_add_tail(&hdr->list, &pool->busy_pool);
  84. return hdr->buf;
  85. }
  86. void msm_vidc_pool_free(struct msm_vidc_inst *inst, void *vidc_buf)
  87. {
  88. struct msm_memory_alloc_header *hdr;
  89. struct msm_memory_pool *pool;
  90. if (!inst || !vidc_buf) {
  91. d_vpr_e("%s: Invalid params\n", __func__);
  92. return;
  93. }
  94. hdr = (struct msm_memory_alloc_header *)vidc_buf - 1;
  95. /* sanitize buffer addr */
  96. if (hdr->buf != vidc_buf) {
  97. i_vpr_e(inst, "%s: invalid buf addr %p\n", __func__, vidc_buf);
  98. return;
  99. }
  100. /* sanitize pool type */
  101. if (hdr->type < 0 || hdr->type >= MSM_MEM_POOL_MAX) {
  102. i_vpr_e(inst, "%s: invalid pool type %#x\n", __func__, hdr->type);
  103. return;
  104. }
  105. pool = &inst->pool[hdr->type];
  106. /* catch double-free request */
  107. if (!hdr->busy) {
  108. i_vpr_e(inst, "%s: double free request. type %s, addr %p\n", __func__,
  109. pool->name, vidc_buf);
  110. return;
  111. }
  112. hdr->busy = false;
  113. /* move node from busy pool to free pool */
  114. list_move_tail(&hdr->list, &pool->free_pool);
  115. }
  116. static void msm_vidc_destroy_pool_buffers(struct msm_vidc_inst *inst,
  117. enum msm_memory_pool_type type)
  118. {
  119. struct msm_memory_alloc_header *hdr, *dummy;
  120. struct msm_memory_pool *pool;
  121. u32 fcount = 0, bcount = 0;
  122. if (!inst || type < 0 || type >= MSM_MEM_POOL_MAX) {
  123. d_vpr_e("%s: Invalid params\n", __func__);
  124. return;
  125. }
  126. pool = &inst->pool[type];
  127. /* detect memleak: busy pool is expected to be empty here */
  128. if (!list_empty(&pool->busy_pool))
  129. i_vpr_e(inst, "%s: destroy request on active buffer. type %s\n",
  130. __func__, pool->name);
  131. /* destroy all free buffers */
  132. list_for_each_entry_safe(hdr, dummy, &pool->free_pool, list) {
  133. list_del(&hdr->list);
  134. msm_vidc_vmem_free((void **)&hdr);
  135. fcount++;
  136. }
  137. /* destroy all busy buffers */
  138. list_for_each_entry_safe(hdr, dummy, &pool->busy_pool, list) {
  139. list_del(&hdr->list);
  140. msm_vidc_vmem_free((void **)&hdr);
  141. bcount++;
  142. }
  143. i_vpr_h(inst, "%s: type: %23s, count: free %2u, busy %2u\n",
  144. __func__, pool->name, fcount, bcount);
  145. }
  146. int msm_vidc_pools_init(struct msm_vidc_inst *inst)
  147. {
  148. u32 i;
  149. if (!inst) {
  150. d_vpr_e("%s: Invalid params\n", __func__);
  151. return -EINVAL;
  152. }
  153. if (ARRAY_SIZE(buftype_size_name_arr) != MSM_MEM_POOL_MAX) {
  154. i_vpr_e(inst, "%s: num elements mismatch %lu %u\n", __func__,
  155. ARRAY_SIZE(buftype_size_name_arr), MSM_MEM_POOL_MAX);
  156. return -EINVAL;
  157. }
  158. for (i = 0; i < MSM_MEM_POOL_MAX; i++) {
  159. if (i != buftype_size_name_arr[i].type) {
  160. i_vpr_e(inst, "%s: type mismatch %u %u\n", __func__,
  161. i, buftype_size_name_arr[i].type);
  162. return -EINVAL;
  163. }
  164. inst->pool[i].size = buftype_size_name_arr[i].size;
  165. inst->pool[i].name = buftype_size_name_arr[i].name;
  166. INIT_LIST_HEAD(&inst->pool[i].free_pool);
  167. INIT_LIST_HEAD(&inst->pool[i].busy_pool);
  168. }
  169. return 0;
  170. }
  171. void msm_vidc_pools_deinit(struct msm_vidc_inst *inst)
  172. {
  173. u32 i = 0;
  174. if (!inst) {
  175. d_vpr_e("%s: Invalid params\n", __func__);
  176. return;
  177. }
  178. /* destroy all buffers from all pool types */
  179. for (i = 0; i < MSM_MEM_POOL_MAX; i++)
  180. msm_vidc_destroy_pool_buffers(inst, i);
  181. }
  182. static struct dma_buf *msm_vidc_dma_buf_get(struct msm_vidc_inst *inst, int fd)
  183. {
  184. struct msm_memory_dmabuf *buf = NULL;
  185. struct dma_buf *dmabuf = NULL;
  186. bool found = false;
  187. if (!inst) {
  188. d_vpr_e("%s: invalid params\n", __func__);
  189. return NULL;
  190. }
  191. /* get local dmabuf ref for tracking */
  192. dmabuf = dma_buf_get(fd);
  193. if (IS_ERR_OR_NULL(dmabuf)) {
  194. d_vpr_e("Failed to get dmabuf for %d, error %ld\n",
  195. fd, PTR_ERR(dmabuf));
  196. return NULL;
  197. }
  198. /* track dmabuf - inc refcount if already present */
  199. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  200. if (buf->dmabuf == dmabuf) {
  201. buf->refcount++;
  202. found = true;
  203. break;
  204. }
  205. }
  206. if (found) {
  207. /* put local dmabuf ref */
  208. dma_buf_put(dmabuf);
  209. return dmabuf;
  210. }
  211. /* get tracker instance from pool */
  212. buf = msm_vidc_pool_alloc(inst, MSM_MEM_POOL_DMABUF);
  213. if (!buf) {
  214. i_vpr_e(inst, "%s: dmabuf alloc failed\n", __func__);
  215. dma_buf_put(dmabuf);
  216. return NULL;
  217. }
  218. /* hold dmabuf strong ref in tracker */
  219. buf->dmabuf = dmabuf;
  220. buf->refcount = 1;
  221. INIT_LIST_HEAD(&buf->list);
  222. /* add new dmabuf entry to tracker */
  223. list_add_tail(&buf->list, &inst->dmabuf_tracker);
  224. return dmabuf;
  225. }
  226. static void msm_vidc_dma_buf_put(struct msm_vidc_inst *inst, struct dma_buf *dmabuf)
  227. {
  228. struct msm_memory_dmabuf *buf = NULL;
  229. bool found = false;
  230. if (!inst || !dmabuf) {
  231. d_vpr_e("%s: invalid params\n", __func__);
  232. return;
  233. }
  234. /* track dmabuf - dec refcount if already present */
  235. list_for_each_entry(buf, &inst->dmabuf_tracker, list) {
  236. if (buf->dmabuf == dmabuf) {
  237. buf->refcount--;
  238. found = true;
  239. break;
  240. }
  241. }
  242. if (!found) {
  243. i_vpr_e(inst, "%s: invalid dmabuf %p\n", __func__, dmabuf);
  244. return;
  245. }
  246. /* non-zero refcount - do nothing */
  247. if (buf->refcount)
  248. return;
  249. /* remove dmabuf entry from tracker */
  250. list_del(&buf->list);
  251. /* release dmabuf strong ref from tracker */
  252. dma_buf_put(buf->dmabuf);
  253. /* put tracker instance back to pool */
  254. msm_vidc_pool_free(inst, buf);
  255. }
  256. static void msm_vidc_dma_buf_put_completely(struct msm_vidc_inst *inst,
  257. struct msm_memory_dmabuf *buf)
  258. {
  259. if (!inst || !buf) {
  260. d_vpr_e("%s: invalid params\n", __func__);
  261. return;
  262. }
  263. while (buf->refcount) {
  264. buf->refcount--;
  265. if (!buf->refcount) {
  266. /* remove dmabuf entry from tracker */
  267. list_del(&buf->list);
  268. /* release dmabuf strong ref from tracker */
  269. dma_buf_put(buf->dmabuf);
  270. /* put tracker instance back to pool */
  271. msm_vidc_pool_free(inst, buf);
  272. break;
  273. }
  274. }
  275. }
  276. static struct dma_buf_attachment *msm_vidc_dma_buf_attach(struct msm_vidc_core *core,
  277. struct dma_buf *dbuf, struct device *dev)
  278. {
  279. int rc = 0;
  280. struct dma_buf_attachment *attach = NULL;
  281. if (!core || !dbuf || !dev) {
  282. d_vpr_e("%s: invalid params\n", __func__);
  283. return NULL;
  284. }
  285. attach = dma_buf_attach(dbuf, dev);
  286. if (IS_ERR_OR_NULL(attach)) {
  287. rc = PTR_ERR(attach) ? PTR_ERR(attach) : -1;
  288. d_vpr_e("Failed to attach dmabuf, error %d\n", rc);
  289. return NULL;
  290. }
  291. return attach;
  292. }
  293. static int msm_vidc_dma_buf_detach(struct msm_vidc_core *core,
  294. struct dma_buf *dbuf, struct dma_buf_attachment *attach)
  295. {
  296. int rc = 0;
  297. if (!dbuf || !attach) {
  298. d_vpr_e("%s: invalid params\n", __func__);
  299. return -EINVAL;
  300. }
  301. dma_buf_detach(dbuf, attach);
  302. return rc;
  303. }
  304. static int msm_vidc_dma_buf_unmap_attachment(struct msm_vidc_core *core,
  305. struct dma_buf_attachment *attach, struct sg_table *table)
  306. {
  307. int rc = 0;
  308. if (!attach || !table) {
  309. d_vpr_e("%s: invalid params\n", __func__);
  310. return -EINVAL;
  311. }
  312. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  313. return rc;
  314. }
  315. static struct sg_table *msm_vidc_dma_buf_map_attachment(
  316. struct msm_vidc_core *core, struct dma_buf_attachment *attach)
  317. {
  318. int rc = 0;
  319. struct sg_table *table = NULL;
  320. if (!attach) {
  321. d_vpr_e("%s: invalid params\n", __func__);
  322. return NULL;
  323. }
  324. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  325. if (IS_ERR_OR_NULL(table)) {
  326. rc = PTR_ERR(table) ? PTR_ERR(table) : -1;
  327. d_vpr_e("Failed to map table, error %d\n", rc);
  328. return NULL;
  329. }
  330. if (!table->sgl) {
  331. d_vpr_e("%s: sgl is NULL\n", __func__);
  332. msm_vidc_dma_buf_unmap_attachment(core, attach, table);
  333. return NULL;
  334. }
  335. return table;
  336. }
  337. static int msm_vidc_memory_alloc_map(struct msm_vidc_core *core, struct msm_vidc_mem *mem)
  338. {
  339. int size = 0;
  340. struct context_bank_info *cb = NULL;
  341. if (!mem) {
  342. d_vpr_e("%s: invalid params\n", __func__);
  343. return -EINVAL;
  344. }
  345. size = ALIGN(mem->size, SZ_4K);
  346. mem->attrs = DMA_ATTR_WRITE_COMBINE;
  347. cb = msm_vidc_get_context_bank_for_region(core, mem->region);
  348. if (!cb) {
  349. d_vpr_e("%s: Failed to get context bank device\n",
  350. __func__);
  351. return -EIO;
  352. }
  353. mem->kvaddr = dma_alloc_attrs(cb->dev, size, &mem->device_addr, GFP_KERNEL,
  354. mem->attrs);
  355. if (!mem->kvaddr) {
  356. d_vpr_e("%s: dma_alloc_attrs returned NULL\n", __func__);
  357. return -ENOMEM;
  358. }
  359. d_vpr_h(
  360. "%s: dmabuf %pK, size %d, buffer_type %s, secure %d, region %d\n",
  361. __func__, mem->kvaddr, mem->size, buf_name(mem->type),
  362. mem->secure, mem->region);
  363. return 0;
  364. }
  365. static int msm_vidc_memory_unmap_free(struct msm_vidc_core *core, struct msm_vidc_mem *mem)
  366. {
  367. int rc = 0;
  368. struct context_bank_info *cb = NULL;
  369. if (!mem || !mem->device_addr || !mem->kvaddr) {
  370. d_vpr_e("%s: invalid params\n", __func__);
  371. return -EINVAL;
  372. }
  373. d_vpr_h(
  374. "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
  375. __func__, mem->device_addr, mem->size, mem->kvaddr, buf_name(mem->type),
  376. mem->secure, mem->region);
  377. cb = msm_vidc_get_context_bank_for_region(core, mem->region);
  378. if (!cb) {
  379. d_vpr_e("%s: Failed to get context bank device\n",
  380. __func__);
  381. return -EIO;
  382. }
  383. dma_free_attrs(cb->dev, mem->size, mem->kvaddr, mem->device_addr,
  384. mem->attrs);
  385. mem->kvaddr = NULL;
  386. mem->device_addr = 0;
  387. return rc;
  388. }
  389. static u32 msm_vidc_buffer_region(struct msm_vidc_inst *inst,
  390. enum msm_vidc_buffer_type buffer_type)
  391. {
  392. return MSM_VIDC_NON_SECURE;
  393. }
  394. static struct msm_vidc_memory_ops msm_mem_ops = {
  395. .dma_buf_get = msm_vidc_dma_buf_get,
  396. .dma_buf_put = msm_vidc_dma_buf_put,
  397. .dma_buf_put_completely = msm_vidc_dma_buf_put_completely,
  398. .dma_buf_attach = msm_vidc_dma_buf_attach,
  399. .dma_buf_detach = msm_vidc_dma_buf_detach,
  400. .dma_buf_map_attachment = msm_vidc_dma_buf_map_attachment,
  401. .dma_buf_unmap_attachment = msm_vidc_dma_buf_unmap_attachment,
  402. .memory_alloc_map = msm_vidc_memory_alloc_map,
  403. .memory_unmap_free = msm_vidc_memory_unmap_free,
  404. .buffer_region = msm_vidc_buffer_region,
  405. };
  406. struct msm_vidc_memory_ops *get_mem_ops(void)
  407. {
  408. return &msm_mem_ops;
  409. }